#define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_36.V.54.182.h"
//#define PDUMP
#define PVRSRV_NEED_PVR_DPF
-#define PVRSRV_NEED_PVR_ASSERT
+//#define PVRSRV_NEED_PVR_ASSERT
#define SUPPORT_RGXTQ_BRIDGE
#define PVRSRV_POISON_ON_ALLOC_VALUE 0xd9
#define PVRSRV_POISON_ON_FREE_VALUE 0x63
#define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB 2048
#define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB 2048
#define PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS 50
-#define PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP 0
+
#define PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME 0
#define PVRSRV_APPHINT_JONESDISABLEMASK 0
#define PVRSRV_APPHINT_NEWFILTERINGMODE 1
#define SUPPORT_AGP
#define RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US 1000000
#define PVR_ANNOTATION_MAX_LEN 96
-#define PVRSRV_DEVICE_INIT_MODE PVRSRV_LINUX_DEV_INIT_ON_OPEN
+#define PVRSRV_DEVICE_INIT_MODE PVRSRV_LINUX_DEV_INIT_ON_PROBE
#define SUPPORT_DI_BRG_IMPL
#define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240
#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 20480
#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM 17
#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_RDM 15
#define SUPPORT_BUFFER_SYNC 1
+
+#define DRIVER0_SECURITY_SUPPORT 0
+#define DRIVER1_SECURITY_SUPPORT 0
+#define DRIVER2_SECURITY_SUPPORT 0
+#define DRIVER3_SECURITY_SUPPORT 0
+#define DRIVER4_SECURITY_SUPPORT 0
+#define DRIVER5_SECURITY_SUPPORT 0
+#define DRIVER6_SECURITY_SUPPORT 0
+#define DRIVER7_SECURITY_SUPPORT 0
+
+#define PVRSRV_APPHINT_CHECKPOINTPOOLINITLOG2 7
+#define PVRSRV_APPHINT_CHECKPOINTPOOLMAXLOG2 8
+
+#define PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 11
+#define PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES 10000
+
+#define PVRSRV_APPHINT_ENABLESPUCLOCKGATING IMG_FALSE
+#define PVRSRV_APPHINT_PHYSHEAPMINMEMONCONNECTION 0
+
+#define PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP 1
+
+#define PVRSRV_USE_LINUX_CONFIG_INIT_ON_ALLOC 1
+
+#define RGX_DRIVERID_0_DEFAULT_ISOLATION_GROUP 0
+#define RGX_DRIVERID_0_DEFAULT_PRIORITY (1 - 0)
+#define RGX_DRIVERID_1_DEFAULT_ISOLATION_GROUP 0
+#define RGX_DRIVERID_1_DEFAULT_PRIORITY (1 - 1)
+#define RGX_DRIVERID_2_DEFAULT_ISOLATION_GROUP 0
+#define RGX_DRIVERID_2_DEFAULT_PRIORITY (1 - 2)
+#define RGX_DRIVERID_3_DEFAULT_ISOLATION_GROUP 0
+#define RGX_DRIVERID_3_DEFAULT_PRIORITY (1 - 3)
+#define RGX_DRIVERID_4_DEFAULT_ISOLATION_GROUP 0
+#define RGX_DRIVERID_4_DEFAULT_PRIORITY (1 - 4)
+#define RGX_DRIVERID_5_DEFAULT_ISOLATION_GROUP 0
+#define RGX_DRIVERID_5_DEFAULT_PRIORITY (1 - 5)
+#define RGX_DRIVERID_6_DEFAULT_ISOLATION_GROUP 0
+#define RGX_DRIVERID_6_DEFAULT_PRIORITY (1 - 6)
+#define RGX_DRIVERID_7_DEFAULT_ISOLATION_GROUP 0
+#define RGX_DRIVERID_7_DEFAULT_PRIORITY (1 - 7)
+
+#define RGX_FW_HEAP_OSID_ASSIGNMENT RGX_FW_HEAP_USES_FIRMWARE_OSID
+#define RGX_FW_HEAP_USES_DEDICATED_OSID 2
+#define RGX_FW_HEAP_USES_FIRMWARE_OSID 0
+#define RGX_FW_HEAP_USES_HOST_OSID 1
+#define RGX_FW_PHYSHEAP_MINMEM_ON_CONNECTION 512
+
+#define RGX_NUM_DRIVERS_SUPPORTED 1
+#define RGX_VZ_CONNECTION_TIMEOUT_US 60000000
+#define SUPPORT_RGXKICKSYNC_BRIDGE
override SUPPORT_DMA_FENCE := 1
override SUPPORT_BUFFER_SYNC := 1
override DEFINE_X86_FEATURE_LA57 := 1
+override DEFINE_X86_FEATURE_LA57 := 1
+
+override RGX_NUM_OS_SUPPORTED := 1
+override RGX_FW_HEAP_OSID_ASSIGNMENT := RGX_FW_HEAP_USES_FIRMWARE_OSID
+override RGX_NUM_DRIVERS_SUPPORTED := 1
+override SUPPORT_RGXKICKSYNC_BRIDGE := 1
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
{
/* Unreference the previously looked up handle */
- if (psPMRInt[i])
+ if (psPMRInt && psPMRInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hPMRInt2[i],
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
#include "img_defs.h"
#include "devicemem_typedefs.h"
+#include "pmr.h"
#include "devicemem_history_server.h"
IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
#include "img_defs.h"
+#include "pmr.h"
#include "devicemem_history_server.h"
#include "common_devicememhistory_bridge.h"
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
"DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX,
+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX,
+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
static IMG_INT
PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
((IMG_UINT64) psDevicememHistorySparseChangeIN->ui32FreePageCount *
sizeof(IMG_UINT32)) + 0;
+ if (unlikely
+ (psDevicememHistorySparseChangeIN->ui32AllocPageCount >
+ PMR_MAX_SUPPORTED_4K_PAGE_COUNT))
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto DevicememHistorySparseChange_exit;
+ }
+
+ if (unlikely
+ (psDevicememHistorySparseChangeIN->ui32FreePageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT))
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto DevicememHistorySparseChange_exit;
+ }
+
if (ui64BufferSize > IMG_UINT32_MAX)
{
psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
return eError;
}
+static_assert(PRVSRVTL_MAX_STREAM_NAME_SIZE <= IMG_UINT32_MAX,
+ "PRVSRVTL_MAX_STREAM_NAME_SIZE must not be larger than IMG_UINT32_MAX");
+
static IMG_INT
PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT8 * psDICreateContextIN_UI8,
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psDICreateContextIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) + 0;
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psDIReadEntryIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psDIWriteEntryIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psDmaTransferIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
{
/* Unreference the previously looked up handle */
- if (psPMRInt[i])
+ if (psPMRInt && psPMRInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hPMRInt2[i],
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
}
+static_assert(32 <= IMG_UINT32_MAX, "32 must not be larger than IMG_UINT32_MAX");
+
static IMG_INT
PVRSRVBridgeDmaSparseMappingTable(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT8 * psDmaSparseMappingTableIN_UI8,
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psDmaSparseMappingTableIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psPhysmemImportDmaBufIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psPhysmemImportDmaBufLockedIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
return eError;
}
-static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX,
- "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX,
+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
"DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
((IMG_UINT64) psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0;
if (unlikely
- (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_PAGE_COUNT))
+ (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_4K_PAGE_COUNT))
{
psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
goto PhysmemImportSparseDmaBuf_exit;
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32EnablePID,
IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode);
-IMG_INTERNAL PVRSRV_ERROR BridgeHTBLog(IMG_HANDLE hBridge,
- IMG_UINT32 ui32PID,
- IMG_UINT32 ui32TID,
- IMG_UINT64 ui64TimeStamp,
- IMG_UINT32 ui32SF,
- IMG_UINT32 ui32NumArgs, IMG_UINT32 * pui32Args);
-
#endif /* CLIENT_HTBUFFER_BRIDGE_H */
return eError;
}
-
-IMG_INTERNAL PVRSRV_ERROR BridgeHTBLog(IMG_HANDLE hBridge,
- IMG_UINT32 ui32PID,
- IMG_UINT32 ui32TID,
- IMG_UINT64 ui64TimeStamp,
- IMG_UINT32 ui32SF,
- IMG_UINT32 ui32NumArgs, IMG_UINT32 * pui32Args)
-{
- PVRSRV_ERROR eError;
- PVR_UNREFERENCED_PARAMETER(hBridge);
-
- eError = HTBLogKM(ui32PID, ui32TID, ui64TimeStamp, ui32SF, ui32NumArgs, pui32Args);
-
- return eError;
-}
#define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST 0
#define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0
-#define PVRSRV_BRIDGE_HTBUFFER_HTBLOG PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1
-#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1)
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0)
/*******************************************
HTBControl
PVRSRV_ERROR eError;
} __packed PVRSRV_BRIDGE_OUT_HTBCONTROL;
-/*******************************************
- HTBLog
- *******************************************/
-
-/* Bridge in structure for HTBLog */
-typedef struct PVRSRV_BRIDGE_IN_HTBLOG_TAG
-{
- IMG_UINT64 ui64TimeStamp;
- IMG_UINT32 *pui32Args;
- IMG_UINT32 ui32NumArgs;
- IMG_UINT32 ui32PID;
- IMG_UINT32 ui32SF;
- IMG_UINT32 ui32TID;
-} __packed PVRSRV_BRIDGE_IN_HTBLOG;
-
-/* Bridge out structure for HTBLog */
-typedef struct PVRSRV_BRIDGE_OUT_HTBLOG_TAG
-{
- PVRSRV_ERROR eError;
-} __packed PVRSRV_BRIDGE_OUT_HTBLOG;
-
#endif /* COMMON_HTBUFFER_BRIDGE_H */
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
- if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
- OSFreeMemNoStats(pArrayArgsBuffer);
-
- return 0;
-}
-
-static_assert(HTB_LOG_MAX_PARAMS <= IMG_UINT32_MAX,
- "HTB_LOG_MAX_PARAMS must not be larger than IMG_UINT32_MAX");
-
-static IMG_INT
-PVRSRVBridgeHTBLog(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psHTBLogIN_UI8,
- IMG_UINT8 * psHTBLogOUT_UI8, CONNECTION_DATA * psConnection)
-{
- PVRSRV_BRIDGE_IN_HTBLOG *psHTBLogIN =
- (PVRSRV_BRIDGE_IN_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogIN_UI8, 0);
- PVRSRV_BRIDGE_OUT_HTBLOG *psHTBLogOUT =
- (PVRSRV_BRIDGE_OUT_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogOUT_UI8, 0);
-
- IMG_UINT32 *ui32ArgsInt = NULL;
-
- IMG_UINT32 ui32NextOffset = 0;
- IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
- IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
-
- IMG_UINT32 ui32BufferSize = 0;
- IMG_UINT64 ui64BufferSize = ((IMG_UINT64) psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) + 0;
-
- if (unlikely(psHTBLogIN->ui32NumArgs > HTB_LOG_MAX_PARAMS))
- {
- psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
- goto HTBLog_exit;
- }
-
- PVR_UNREFERENCED_PARAMETER(psConnection);
-
- if (ui64BufferSize > IMG_UINT32_MAX)
- {
- psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
- goto HTBLog_exit;
- }
-
- ui32BufferSize = (IMG_UINT32) ui64BufferSize;
-
- if (ui32BufferSize != 0)
- {
-#if !defined(INTEGRITY_OS)
- /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
- IMG_UINT32 ui32InBufferOffset =
- PVR_ALIGN(sizeof(*psHTBLogIN), sizeof(unsigned long));
- IMG_UINT32 ui32InBufferExcessSize =
- ui32InBufferOffset >=
- PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
-
- bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
- if (bHaveEnoughSpace)
- {
- IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHTBLogIN;
-
- pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
- }
- else
-#endif
- {
- pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
-
- if (!pArrayArgsBuffer)
- {
- psHTBLogOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
- goto HTBLog_exit;
- }
- }
- }
-
- if (psHTBLogIN->ui32NumArgs != 0)
- {
- ui32ArgsInt = (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
- ui32NextOffset += psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32);
- }
-
- /* Copy the data over */
- if (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32) > 0)
- {
- if (OSCopyFromUser
- (NULL, ui32ArgsInt, (const void __user *)psHTBLogIN->pui32Args,
- psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) != PVRSRV_OK)
- {
- psHTBLogOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
-
- goto HTBLog_exit;
- }
- }
-
- psHTBLogOUT->eError =
- HTBLogKM(psHTBLogIN->ui32PID,
- psHTBLogIN->ui32TID,
- psHTBLogIN->ui64TimeStamp,
- psHTBLogIN->ui32SF, psHTBLogIN->ui32NumArgs, ui32ArgsInt);
-
-HTBLog_exit:
-
- /* Allocated space should be equal to the last updated offset */
-#ifdef PVRSRV_NEED_PVR_ASSERT
- if (psHTBLogOUT->eError == PVRSRV_OK)
- PVR_ASSERT(ui32BufferSize == ui32NextOffset);
-#endif /* PVRSRV_NEED_PVR_ASSERT */
-
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL,
PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock);
- SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG,
- PVRSRVBridgeHTBLog, pHTBUFFERBridgeLock);
-
return PVRSRV_OK;
}
UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL);
- UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG);
-
}
#else /* EXCLUDE_HTBUFFER_BRIDGE */
/* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined,
IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 * pui32MappingTable,
IMG_UINT32 ui32PDumpFlags,
PVRSRV_MEMALLOCFLAGS_T * puiOutFlags);
-IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge,
- IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
- IMG_UINT32 ui32NumPhysChunks,
- IMG_UINT32 ui32NumVirtChunks,
- IMG_UINT32 * pui32MappingTable,
- IMG_UINT32 ui32Log2PageSize,
- PVRSRV_MEMALLOCFLAGS_T uiFlags,
- IMG_UINT32 ui32AnnotationLength,
- const IMG_CHAR * puiAnnotation,
- IMG_PID ui32PID,
- IMG_HANDLE * phPMRPtr,
- IMG_UINT32 ui32PDumpFlags,
- PVRSRV_MEMALLOCFLAGS_T * puiOutFlags);
-
-IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPin(IMG_HANDLE hBridge, IMG_HANDLE hPMR);
-
-IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpin(IMG_HANDLE hBridge, IMG_HANDLE hPMR);
-
-IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPinValidate(IMG_HANDLE hBridge,
- IMG_HANDLE hMapping, IMG_HANDLE hPMR);
-
-IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge,
- IMG_HANDLE hMapping, IMG_HANDLE hPMR);
-
IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
IMG_BOOL bbKernelMemoryCtx,
IMG_HANDLE * phDevMemServerContext,
IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapIndex,
IMG_DEV_VIRTADDR sHeapBaseAddr,
- IMG_DEVMEM_SIZE_T uiHeapLength,
IMG_UINT32 ui32Log2DataPageSize,
IMG_HANDLE * phDevmemHeapPtr);
IMG_HANDLE hDevmemCtx,
IMG_DEV_VIRTADDR sAddress);
-IMG_INTERNAL PVRSRV_ERROR BridgeDevmemFlushDevSLCRange(IMG_HANDLE hBridge,
- IMG_HANDLE hDevmemCtx,
- IMG_DEV_VIRTADDR sAddress,
- IMG_DEVMEM_SIZE_T uiSize,
- IMG_BOOL bInvalidate);
-
IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge,
IMG_HANDLE hDevmemCtx,
IMG_UINT64 ui64FBSCEntries);
IMG_UINT32 * pui32Log2ImportAlignmentOut);
IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge,
- IMG_HANDLE hDevm,
- IMG_UINT32 ui32PID, IMG_BOOL bRegister);
-
-IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxPhysHeapCount(IMG_HANDLE hBridge,
- IMG_UINT32 * pui32PhysHeapCount);
+ IMG_HANDLE hDevmemCtx,
+ IMG_BOOL bRegister);
IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge,
IMG_UINT32 ui32PhysHeapCount,
IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge,
PVRSRV_PHYS_HEAP * peHeap);
-IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsage(IMG_HANDLE hBridge,
- IMG_UINT32 ui32PhysHeapCount,
- PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats);
-
IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge,
IMG_HANDLE hDevmemCtx,
IMG_DEV_VIRTADDR * psFaultAddress);
-IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge,
- IMG_UINT32 ui32ui32StatType, IMG_PID ui32pid);
-
-IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfoPkd(IMG_HANDLE hBridge,
- IMG_UINT32 ui32PhysHeapCount,
- PVRSRV_PHYS_HEAP * peaPhysHeapID,
- PHYS_HEAP_MEM_STATS_PKD *
- psapPhysHeapMemStats);
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVStatsUpdateOOMStat(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32ui32StatType,
+ IMG_PID ui32pid);
-IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsagePkd(IMG_HANDLE hBridge,
- IMG_UINT32 ui32PhysHeapCount,
- PHYS_HEAP_MEM_STATS_PKD *
- psapPhysHeapMemStats);
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntReserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ IMG_HANDLE * phReservation);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnreserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysPageOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32VirtPageOffset);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnmapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_UINT32 ui32VirtPageOffset,
+ IMG_UINT32 ui32PageCount);
#endif /* CLIENT_MM_BRIDGE_H */
IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 * pui32MappingTable,
eError =
PhysmemNewRamBackedPMR_direct(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
uiSize,
- uiChunkSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
pui32MappingTable,
return eError;
}
-IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge,
- IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
- IMG_UINT32 ui32NumPhysChunks,
- IMG_UINT32 ui32NumVirtChunks,
- IMG_UINT32 * pui32MappingTable,
- IMG_UINT32 ui32Log2PageSize,
- PVRSRV_MEMALLOCFLAGS_T uiFlags,
- IMG_UINT32 ui32AnnotationLength,
- const IMG_CHAR * puiAnnotation,
- IMG_PID ui32PID,
- IMG_HANDLE * phPMRPtr,
- IMG_UINT32 ui32PDumpFlags,
- PVRSRV_MEMALLOCFLAGS_T * puiOutFlags)
-{
- PVRSRV_ERROR eError;
- PMR *psPMRPtrInt = NULL;
-
- eError =
- PhysmemNewRamBackedLockedPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
- uiSize,
- uiChunkSize,
- ui32NumPhysChunks,
- ui32NumVirtChunks,
- pui32MappingTable,
- ui32Log2PageSize,
- uiFlags,
- ui32AnnotationLength,
- puiAnnotation,
- ui32PID, &psPMRPtrInt, ui32PDumpFlags, puiOutFlags);
-
- *phPMRPtr = psPMRPtrInt;
- return eError;
-}
-
-IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPin(IMG_HANDLE hBridge, IMG_HANDLE hPMR)
-{
- PVRSRV_ERROR eError;
- PMR *psPMRInt;
- PVR_UNREFERENCED_PARAMETER(hBridge);
-
- psPMRInt = (PMR *) hPMR;
-
- eError = DevmemIntPin(psPMRInt);
-
- return eError;
-}
-
-IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpin(IMG_HANDLE hBridge, IMG_HANDLE hPMR)
-{
- PVRSRV_ERROR eError;
- PMR *psPMRInt;
- PVR_UNREFERENCED_PARAMETER(hBridge);
-
- psPMRInt = (PMR *) hPMR;
-
- eError = DevmemIntUnpin(psPMRInt);
-
- return eError;
-}
-
-IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPinValidate(IMG_HANDLE hBridge,
- IMG_HANDLE hMapping, IMG_HANDLE hPMR)
-{
- PVRSRV_ERROR eError;
- DEVMEMINT_MAPPING *psMappingInt;
- PMR *psPMRInt;
- PVR_UNREFERENCED_PARAMETER(hBridge);
-
- psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
- psPMRInt = (PMR *) hPMR;
-
- eError = DevmemIntPinValidate(psMappingInt, psPMRInt);
-
- return eError;
-}
-
-IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge,
- IMG_HANDLE hMapping, IMG_HANDLE hPMR)
-{
- PVRSRV_ERROR eError;
- DEVMEMINT_MAPPING *psMappingInt;
- PMR *psPMRInt;
- PVR_UNREFERENCED_PARAMETER(hBridge);
-
- psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
- psPMRInt = (PMR *) hPMR;
-
- eError = DevmemIntUnpinInvalidate(psMappingInt, psPMRInt);
-
- return eError;
-}
-
IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
IMG_BOOL bbKernelMemoryCtx,
IMG_HANDLE * phDevMemServerContext,
IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapIndex,
IMG_DEV_VIRTADDR sHeapBaseAddr,
- IMG_DEVMEM_SIZE_T uiHeapLength,
IMG_UINT32 ui32Log2DataPageSize,
IMG_HANDLE * phDevmemHeapPtr)
{
eError =
DevmemIntHeapCreate(psDevmemCtxInt,
- sHeapBaseAddr,
- uiHeapLength, ui32Log2DataPageSize, &psDevmemHeapPtrInt);
+ ui32HeapConfigIndex,
+ ui32HeapIndex,
+ sHeapBaseAddr, ui32Log2DataPageSize, &psDevmemHeapPtrInt);
*phDevmemHeapPtr = psDevmemHeapPtrInt;
return eError;
return eError;
}
-IMG_INTERNAL PVRSRV_ERROR BridgeDevmemFlushDevSLCRange(IMG_HANDLE hBridge,
- IMG_HANDLE hDevmemCtx,
- IMG_DEV_VIRTADDR sAddress,
- IMG_DEVMEM_SIZE_T uiSize,
- IMG_BOOL bInvalidate)
-{
-#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED)
- PVRSRV_ERROR eError;
- DEVMEMINT_CTX *psDevmemCtxInt;
- PVR_UNREFERENCED_PARAMETER(hBridge);
-
- psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
-
- eError = DevmemIntFlushDevSLCRange(psDevmemCtxInt, sAddress, uiSize, bInvalidate);
-
- return eError;
-#else
- PVR_UNREFERENCED_PARAMETER(hBridge);
- PVR_UNREFERENCED_PARAMETER(hDevmemCtx);
- PVR_UNREFERENCED_PARAMETER(sAddress);
- PVR_UNREFERENCED_PARAMETER(uiSize);
- PVR_UNREFERENCED_PARAMETER(bInvalidate);
-
- return PVRSRV_ERROR_NOT_IMPLEMENTED;
-#endif
-}
-
IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge,
IMG_HANDLE hDevmemCtx,
IMG_UINT64 ui64FBSCEntries)
}
IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge,
- IMG_HANDLE hDevm,
- IMG_UINT32 ui32PID, IMG_BOOL bRegister)
+ IMG_HANDLE hDevmemCtx,
+ IMG_BOOL bRegister)
{
PVRSRV_ERROR eError;
- DEVMEMINT_CTX *psDevmInt;
+ DEVMEMINT_CTX *psDevmemCtxInt;
PVR_UNREFERENCED_PARAMETER(hBridge);
- psDevmInt = (DEVMEMINT_CTX *) hDevm;
-
- eError = DevmemIntRegisterPFNotifyKM(psDevmInt, ui32PID, bRegister);
-
- return eError;
-}
-
-IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxPhysHeapCount(IMG_HANDLE hBridge,
- IMG_UINT32 * pui32PhysHeapCount)
-{
- PVRSRV_ERROR eError;
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
- eError =
- PVRSRVGetMaxPhysHeapCountKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
- pui32PhysHeapCount);
+ eError = DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, bRegister);
return eError;
}
return eError;
}
-IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsage(IMG_HANDLE hBridge,
- IMG_UINT32 ui32PhysHeapCount,
- PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats)
-{
- PVRSRV_ERROR eError;
-
- eError =
- PVRSRVGetHeapPhysMemUsageKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
- ui32PhysHeapCount, pasapPhysHeapMemStats);
-
- return eError;
-}
-
IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge,
IMG_HANDLE hDevmemCtx,
IMG_DEV_VIRTADDR * psFaultAddress)
return eError;
}
-IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge,
- IMG_UINT32 ui32ui32StatType, IMG_PID ui32pid)
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVStatsUpdateOOMStat(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32ui32StatType,
+ IMG_PID ui32pid)
{
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
PVRSRV_ERROR eError;
- PVR_UNREFERENCED_PARAMETER(hBridge);
- eError = PVRSRVServerUpdateOOMStats(ui32ui32StatType, ui32pid);
+ eError =
+ PVRSRVStatsUpdateOOMStat(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32ui32StatType, ui32pid);
return eError;
#else
- PVR_UNREFERENCED_PARAMETER(hBridge);
PVR_UNREFERENCED_PARAMETER(ui32ui32StatType);
PVR_UNREFERENCED_PARAMETER(ui32pid);
#endif
}
-IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfoPkd(IMG_HANDLE hBridge,
- IMG_UINT32 ui32PhysHeapCount,
- PVRSRV_PHYS_HEAP * peaPhysHeapID,
- PHYS_HEAP_MEM_STATS_PKD *
- psapPhysHeapMemStats)
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntReserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ IMG_HANDLE * phReservation)
{
PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP *psDevmemServerHeapInt;
+ DEVMEMXINT_RESERVATION *psReservationInt = NULL;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
eError =
- PVRSRVPhysHeapGetMemInfoPkdKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
- ui32PhysHeapCount, peaPhysHeapID, psapPhysHeapMemStats);
+ DevmemXIntReserveRange(psDevmemServerHeapInt, sAddress, uiLength, &psReservationInt);
+ *phReservation = psReservationInt;
return eError;
}
-IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsagePkd(IMG_HANDLE hBridge,
- IMG_UINT32 ui32PhysHeapCount,
- PHYS_HEAP_MEM_STATS_PKD *
- psapPhysHeapMemStats)
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnreserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation)
{
PVRSRV_ERROR eError;
+ DEVMEMXINT_RESERVATION *psReservationInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation;
+
+ eError = DevmemXIntUnreserveRange(psReservationInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysPageOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32VirtPageOffset)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMXINT_RESERVATION *psReservationInt;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation;
+ psPMRInt = (PMR *) hPMR;
eError =
- PVRSRVGetHeapPhysMemUsagePkdKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
- ui32PhysHeapCount, psapPhysHeapMemStats);
+ DevmemXIntMapPages(psReservationInt,
+ psPMRInt,
+ ui32PageCount, ui32PhysPageOffset, uiFlags, ui32VirtPageOffset);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnmapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_UINT32 ui32VirtPageOffset,
+ IMG_UINT32 ui32PageCount)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMXINT_RESERVATION *psReservationInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation;
+
+ eError = DevmemXIntUnmapPages(psReservationInt, ui32VirtPageOffset, ui32PageCount);
return eError;
}
#define PVRSRV_BRIDGE_MM_PMRUNREFPMR PVRSRV_BRIDGE_MM_CMD_FIRST+7
#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8
#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+9
-#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+10
-#define PVRSRV_BRIDGE_MM_DEVMEMINTPIN PVRSRV_BRIDGE_MM_CMD_FIRST+11
-#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN PVRSRV_BRIDGE_MM_CMD_FIRST+12
-#define PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+13
-#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+14
-#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+15
-#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+16
-#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+17
-#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+18
-#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+19
-#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+20
-#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+21
-#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+22
-#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+23
-#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+24
-#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+25
-#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+26
-#define PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE PVRSRV_BRIDGE_MM_CMD_FIRST+27
-#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE PVRSRV_BRIDGE_MM_CMD_FIRST+28
-#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+29
-#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+30
-#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+31
-#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+32
-#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+33
-#define PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+34
-#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO PVRSRV_BRIDGE_MM_CMD_FIRST+35
-#define PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP PVRSRV_BRIDGE_MM_CMD_FIRST+36
-#define PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE PVRSRV_BRIDGE_MM_CMD_FIRST+37
-#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+38
-#define PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS PVRSRV_BRIDGE_MM_CMD_FIRST+39
-#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFOPKD PVRSRV_BRIDGE_MM_CMD_FIRST+40
-#define PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGEPKD PVRSRV_BRIDGE_MM_CMD_FIRST+41
-#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+41)
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+10
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+11
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+12
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+13
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+14
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+15
+#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+16
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+17
+#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+18
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+19
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+20
+#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+21
+#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE PVRSRV_BRIDGE_MM_CMD_FIRST+22
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+23
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+24
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+25
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+26
+#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+27
+#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO PVRSRV_BRIDGE_MM_CMD_FIRST+28
+#define PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP PVRSRV_BRIDGE_MM_CMD_FIRST+29
+#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+30
+#define PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT PVRSRV_BRIDGE_MM_CMD_FIRST+31
+#define PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+32
+#define PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+33
+#define PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+34
+#define PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+35
+#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+35)
/*******************************************
PMRExportPMR
/* Bridge in structure for PhysmemNewRamBackedPMR */
typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG
{
- IMG_DEVMEM_SIZE_T uiChunkSize;
IMG_DEVMEM_SIZE_T uiSize;
IMG_UINT32 *pui32MappingTable;
const IMG_CHAR *puiAnnotation;
PVRSRV_MEMALLOCFLAGS_T uiOutFlags;
} __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR;
-/*******************************************
- PhysmemNewRamBackedLockedPMR
- *******************************************/
-
-/* Bridge in structure for PhysmemNewRamBackedLockedPMR */
-typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG
-{
- IMG_DEVMEM_SIZE_T uiChunkSize;
- IMG_DEVMEM_SIZE_T uiSize;
- IMG_UINT32 *pui32MappingTable;
- const IMG_CHAR *puiAnnotation;
- IMG_UINT32 ui32AnnotationLength;
- IMG_UINT32 ui32Log2PageSize;
- IMG_UINT32 ui32NumPhysChunks;
- IMG_UINT32 ui32NumVirtChunks;
- IMG_UINT32 ui32PDumpFlags;
- IMG_PID ui32PID;
- PVRSRV_MEMALLOCFLAGS_T uiFlags;
-} __packed PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR;
-
-/* Bridge out structure for PhysmemNewRamBackedLockedPMR */
-typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG
-{
- IMG_HANDLE hPMRPtr;
- PVRSRV_ERROR eError;
- PVRSRV_MEMALLOCFLAGS_T uiOutFlags;
-} __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR;
-
-/*******************************************
- DevmemIntPin
- *******************************************/
-
-/* Bridge in structure for DevmemIntPin */
-typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPIN_TAG
-{
- IMG_HANDLE hPMR;
-} __packed PVRSRV_BRIDGE_IN_DEVMEMINTPIN;
-
-/* Bridge out structure for DevmemIntPin */
-typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPIN_TAG
-{
- PVRSRV_ERROR eError;
-} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTPIN;
-
-/*******************************************
- DevmemIntUnpin
- *******************************************/
-
-/* Bridge in structure for DevmemIntUnpin */
-typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN_TAG
-{
- IMG_HANDLE hPMR;
-} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN;
-
-/* Bridge out structure for DevmemIntUnpin */
-typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN_TAG
-{
- PVRSRV_ERROR eError;
-} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN;
-
-/*******************************************
- DevmemIntPinValidate
- *******************************************/
-
-/* Bridge in structure for DevmemIntPinValidate */
-typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE_TAG
-{
- IMG_HANDLE hMapping;
- IMG_HANDLE hPMR;
-} __packed PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE;
-
-/* Bridge out structure for DevmemIntPinValidate */
-typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE_TAG
-{
- PVRSRV_ERROR eError;
-} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE;
-
-/*******************************************
- DevmemIntUnpinInvalidate
- *******************************************/
-
-/* Bridge in structure for DevmemIntUnpinInvalidate */
-typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE_TAG
-{
- IMG_HANDLE hMapping;
- IMG_HANDLE hPMR;
-} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE;
-
-/* Bridge out structure for DevmemIntUnpinInvalidate */
-typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE_TAG
-{
- PVRSRV_ERROR eError;
-} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE;
-
/*******************************************
DevmemIntCtxCreate
*******************************************/
typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG
{
IMG_DEV_VIRTADDR sHeapBaseAddr;
- IMG_DEVMEM_SIZE_T uiHeapLength;
IMG_HANDLE hDevmemCtx;
+ IMG_UINT32 ui32HeapConfigIndex;
+ IMG_UINT32 ui32HeapIndex;
IMG_UINT32 ui32Log2DataPageSize;
} __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE;
PVRSRV_ERROR eError;
} __packed PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID;
-/*******************************************
- DevmemFlushDevSLCRange
- *******************************************/
-
-/* Bridge in structure for DevmemFlushDevSLCRange */
-typedef struct PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE_TAG
-{
- IMG_DEV_VIRTADDR sAddress;
- IMG_DEVMEM_SIZE_T uiSize;
- IMG_HANDLE hDevmemCtx;
- IMG_BOOL bInvalidate;
-} __packed PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE;
-
-/* Bridge out structure for DevmemFlushDevSLCRange */
-typedef struct PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE_TAG
-{
- PVRSRV_ERROR eError;
-} __packed PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE;
-
/*******************************************
DevmemInvalidateFBSCTable
*******************************************/
/* Bridge in structure for DevmemIntRegisterPFNotifyKM */
typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG
{
- IMG_HANDLE hDevm;
+ IMG_HANDLE hDevmemCtx;
IMG_BOOL bRegister;
- IMG_UINT32 ui32PID;
} __packed PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM;
/* Bridge out structure for DevmemIntRegisterPFNotifyKM */
PVRSRV_ERROR eError;
} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM;
-/*******************************************
- GetMaxPhysHeapCount
- *******************************************/
-
-/* Bridge in structure for GetMaxPhysHeapCount */
-typedef struct PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT_TAG
-{
- IMG_UINT32 ui32EmptyStructPlaceholder;
-} __packed PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT;
-
-/* Bridge out structure for GetMaxPhysHeapCount */
-typedef struct PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT_TAG
-{
- PVRSRV_ERROR eError;
- IMG_UINT32 ui32PhysHeapCount;
-} __packed PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT;
-
/*******************************************
PhysHeapGetMemInfo
*******************************************/
PVRSRV_PHYS_HEAP eHeap;
} __packed PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP;
-/*******************************************
- GetHeapPhysMemUsage
- *******************************************/
-
-/* Bridge in structure for GetHeapPhysMemUsage */
-typedef struct PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE_TAG
-{
- PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats;
- IMG_UINT32 ui32PhysHeapCount;
-} __packed PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE;
-
-/* Bridge out structure for GetHeapPhysMemUsage */
-typedef struct PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE_TAG
-{
- PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats;
- PVRSRV_ERROR eError;
-} __packed PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE;
-
/*******************************************
DevmemGetFaultAddress
*******************************************/
} __packed PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS;
/*******************************************
- PVRSRVUpdateOOMStats
+ PVRSRVStatsUpdateOOMStat
*******************************************/
-/* Bridge in structure for PVRSRVUpdateOOMStats */
-typedef struct PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS_TAG
+/* Bridge in structure for PVRSRVStatsUpdateOOMStat */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT_TAG
{
IMG_PID ui32pid;
IMG_UINT32 ui32ui32StatType;
-} __packed PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT;
-/* Bridge out structure for PVRSRVUpdateOOMStats */
-typedef struct PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS_TAG
+/* Bridge out structure for PVRSRVStatsUpdateOOMStat */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT_TAG
{
PVRSRV_ERROR eError;
-} __packed PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT;
/*******************************************
- PhysHeapGetMemInfoPkd
+ DevmemXIntReserveRange
*******************************************/
-/* Bridge in structure for PhysHeapGetMemInfoPkd */
-typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFOPKD_TAG
+/* Bridge in structure for DevmemXIntReserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE_TAG
{
- PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStats;
- PVRSRV_PHYS_HEAP *peaPhysHeapID;
- IMG_UINT32 ui32PhysHeapCount;
-} __packed PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFOPKD;
+ IMG_DEV_VIRTADDR sAddress;
+ IMG_DEVMEM_SIZE_T uiLength;
+ IMG_HANDLE hDevmemServerHeap;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE;
-/* Bridge out structure for PhysHeapGetMemInfoPkd */
-typedef struct PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFOPKD_TAG
+/* Bridge out structure for DevmemXIntReserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE_TAG
{
- PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStats;
+ IMG_HANDLE hReservation;
PVRSRV_ERROR eError;
-} __packed PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFOPKD;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE;
/*******************************************
- GetHeapPhysMemUsagePkd
+ DevmemXIntUnreserveRange
*******************************************/
-/* Bridge in structure for GetHeapPhysMemUsagePkd */
-typedef struct PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGEPKD_TAG
+/* Bridge in structure for DevmemXIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE_TAG
{
- PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStats;
- IMG_UINT32 ui32PhysHeapCount;
-} __packed PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGEPKD;
+ IMG_HANDLE hReservation;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE;
+
+/* Bridge out structure for DevmemXIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE;
+
+/*******************************************
+ DevmemXIntMapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemXIntMapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_HANDLE hReservation;
+ IMG_UINT32 ui32PageCount;
+ IMG_UINT32 ui32PhysPageOffset;
+ IMG_UINT32 ui32VirtPageOffset;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES;
+
+/* Bridge out structure for DevmemXIntMapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES;
+
+/*******************************************
+ DevmemXIntUnmapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemXIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES_TAG
+{
+ IMG_HANDLE hReservation;
+ IMG_UINT32 ui32PageCount;
+ IMG_UINT32 ui32VirtPageOffset;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES;
-/* Bridge out structure for GetHeapPhysMemUsagePkd */
-typedef struct PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGEPKD_TAG
+/* Bridge out structure for DevmemXIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES_TAG
{
- PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStats;
PVRSRV_ERROR eError;
-} __packed PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGEPKD;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES;
#endif /* COMMON_MM_BRIDGE_H */
return eError;
}
-static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX,
- "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX,
+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
"DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
((IMG_UINT64) psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) +
((IMG_UINT64) psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) + 0;
- if (unlikely(psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_PAGE_COUNT))
+ if (unlikely
+ (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_4K_PAGE_COUNT))
{
psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
goto PhysmemNewRamBackedPMR_exit;
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
psPhysmemNewRamBackedPMROUT->eError =
PhysmemNewRamBackedPMR(psConnection, OSGetDevNode(psConnection),
psPhysmemNewRamBackedPMRIN->uiSize,
- psPhysmemNewRamBackedPMRIN->uiChunkSize,
psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks,
psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks,
ui32MappingTableInt,
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
- if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
- OSFreeMemNoStats(pArrayArgsBuffer);
-
- return 0;
-}
-
-static PVRSRV_ERROR _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease(void *pvData)
-{
- PVRSRV_ERROR eError;
- eError = PMRUnrefUnlockPMR((PMR *) pvData);
- return eError;
-}
-
-static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX,
- "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
-static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
- "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
-
-static IMG_INT
-PVRSRVBridgePhysmemNewRamBackedLockedPMR(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psPhysmemNewRamBackedLockedPMRIN_UI8,
- IMG_UINT8 * psPhysmemNewRamBackedLockedPMROUT_UI8,
- CONNECTION_DATA * psConnection)
-{
- PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMRIN =
- (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *)
- IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMRIN_UI8, 0);
- PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMROUT =
- (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *)
- IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMROUT_UI8, 0);
-
- IMG_UINT32 *ui32MappingTableInt = NULL;
- IMG_CHAR *uiAnnotationInt = NULL;
- PMR *psPMRPtrInt = NULL;
-
- IMG_UINT32 ui32NextOffset = 0;
- IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
- IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
-
- IMG_UINT32 ui32BufferSize = 0;
- IMG_UINT64 ui64BufferSize =
- ((IMG_UINT64) psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks *
- sizeof(IMG_UINT32)) +
- ((IMG_UINT64) psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength *
- sizeof(IMG_CHAR)) + 0;
-
- if (unlikely
- (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks > PMR_MAX_SUPPORTED_PAGE_COUNT))
- {
- psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
- goto PhysmemNewRamBackedLockedPMR_exit;
- }
-
- if (unlikely
- (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength > DEVMEM_ANNOTATION_MAX_LEN))
- {
- psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
- goto PhysmemNewRamBackedLockedPMR_exit;
- }
-
- if (ui64BufferSize > IMG_UINT32_MAX)
- {
- psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
- goto PhysmemNewRamBackedLockedPMR_exit;
- }
-
- ui32BufferSize = (IMG_UINT32) ui64BufferSize;
-
- if (ui32BufferSize != 0)
- {
-#if !defined(INTEGRITY_OS)
- /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
- IMG_UINT32 ui32InBufferOffset =
- PVR_ALIGN(sizeof(*psPhysmemNewRamBackedLockedPMRIN), sizeof(unsigned long));
- IMG_UINT32 ui32InBufferExcessSize =
- ui32InBufferOffset >=
- PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
-
- bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
- if (bHaveEnoughSpace)
- {
- IMG_BYTE *pInputBuffer =
- (IMG_BYTE *) (void *)psPhysmemNewRamBackedLockedPMRIN;
-
- pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
- }
- else
-#endif
- {
- pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
-
- if (!pArrayArgsBuffer)
- {
- psPhysmemNewRamBackedLockedPMROUT->eError =
- PVRSRV_ERROR_OUT_OF_MEMORY;
- goto PhysmemNewRamBackedLockedPMR_exit;
- }
- }
- }
-
- if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks != 0)
- {
- ui32MappingTableInt =
- (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
- ui32NextOffset +=
- psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32);
- }
-
- /* Copy the data over */
- if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32) > 0)
- {
- if (OSCopyFromUser
- (NULL, ui32MappingTableInt,
- (const void __user *)psPhysmemNewRamBackedLockedPMRIN->pui32MappingTable,
- psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) !=
- PVRSRV_OK)
- {
- psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
-
- goto PhysmemNewRamBackedLockedPMR_exit;
- }
- }
- if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength != 0)
- {
- uiAnnotationInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
- ui32NextOffset +=
- psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR);
- }
-
- /* Copy the data over */
- if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0)
- {
- if (OSCopyFromUser
- (NULL, uiAnnotationInt,
- (const void __user *)psPhysmemNewRamBackedLockedPMRIN->puiAnnotation,
- psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) !=
- PVRSRV_OK)
- {
- psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
-
- goto PhysmemNewRamBackedLockedPMR_exit;
- }
- ((IMG_CHAR *)
- uiAnnotationInt)[(psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength *
- sizeof(IMG_CHAR)) - 1] = '\0';
- }
-
- psPhysmemNewRamBackedLockedPMROUT->eError =
- PhysmemNewRamBackedLockedPMR(psConnection, OSGetDevNode(psConnection),
- psPhysmemNewRamBackedLockedPMRIN->uiSize,
- psPhysmemNewRamBackedLockedPMRIN->uiChunkSize,
- psPhysmemNewRamBackedLockedPMRIN->ui32NumPhysChunks,
- psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks,
- ui32MappingTableInt,
- psPhysmemNewRamBackedLockedPMRIN->ui32Log2PageSize,
- psPhysmemNewRamBackedLockedPMRIN->uiFlags,
- psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength,
- uiAnnotationInt,
- psPhysmemNewRamBackedLockedPMRIN->ui32PID,
- &psPMRPtrInt,
- psPhysmemNewRamBackedLockedPMRIN->ui32PDumpFlags,
- &psPhysmemNewRamBackedLockedPMROUT->uiOutFlags);
- /* Exit early if bridged call fails */
- if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK))
- {
- goto PhysmemNewRamBackedLockedPMR_exit;
- }
-
- /* Lock over handle creation. */
- LockHandle(psConnection->psHandleBase);
-
- psPhysmemNewRamBackedLockedPMROUT->eError =
- PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
- &psPhysmemNewRamBackedLockedPMROUT->hPMRPtr,
- (void *)psPMRPtrInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
- PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
- (PFN_HANDLE_RELEASE) &
- _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease);
- if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK))
- {
- UnlockHandle(psConnection->psHandleBase);
- goto PhysmemNewRamBackedLockedPMR_exit;
- }
-
- /* Release now we have created handles. */
- UnlockHandle(psConnection->psHandleBase);
-
-PhysmemNewRamBackedLockedPMR_exit:
-
- if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)
- {
- if (psPMRPtrInt)
- {
- LockHandle(KERNEL_HANDLE_BASE);
- PMRUnrefUnlockPMR(psPMRPtrInt);
- UnlockHandle(KERNEL_HANDLE_BASE);
- }
- }
-
- /* Allocated space should be equal to the last updated offset */
-#ifdef PVRSRV_NEED_PVR_ASSERT
- if (psPhysmemNewRamBackedLockedPMROUT->eError == PVRSRV_OK)
- PVR_ASSERT(ui32BufferSize == ui32NextOffset);
-#endif /* PVRSRV_NEED_PVR_ASSERT */
-
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
}
-static IMG_INT
-PVRSRVBridgeDevmemIntPin(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psDevmemIntPinIN_UI8,
- IMG_UINT8 * psDevmemIntPinOUT_UI8, CONNECTION_DATA * psConnection)
-{
- PVRSRV_BRIDGE_IN_DEVMEMINTPIN *psDevmemIntPinIN =
- (PVRSRV_BRIDGE_IN_DEVMEMINTPIN *) IMG_OFFSET_ADDR(psDevmemIntPinIN_UI8, 0);
- PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *psDevmemIntPinOUT =
- (PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *) IMG_OFFSET_ADDR(psDevmemIntPinOUT_UI8, 0);
-
- IMG_HANDLE hPMR = psDevmemIntPinIN->hPMR;
- PMR *psPMRInt = NULL;
-
- /* Lock over handle lookup. */
- LockHandle(psConnection->psHandleBase);
-
- /* Look up the address from the handle */
- psDevmemIntPinOUT->eError =
- PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
- (void **)&psPMRInt,
- hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
- if (unlikely(psDevmemIntPinOUT->eError != PVRSRV_OK))
- {
- UnlockHandle(psConnection->psHandleBase);
- goto DevmemIntPin_exit;
- }
- /* Release now we have looked up handles. */
- UnlockHandle(psConnection->psHandleBase);
-
- psDevmemIntPinOUT->eError = DevmemIntPin(psPMRInt);
-
-DevmemIntPin_exit:
-
- /* Lock over handle lookup cleanup. */
- LockHandle(psConnection->psHandleBase);
-
- /* Unreference the previously looked up handle */
- if (psPMRInt)
- {
- PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
- hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
- }
- /* Release now we have cleaned up look up handles. */
- UnlockHandle(psConnection->psHandleBase);
-
- return 0;
-}
-
-static IMG_INT
-PVRSRVBridgeDevmemIntUnpin(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psDevmemIntUnpinIN_UI8,
- IMG_UINT8 * psDevmemIntUnpinOUT_UI8, CONNECTION_DATA * psConnection)
-{
- PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *psDevmemIntUnpinIN =
- (PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *) IMG_OFFSET_ADDR(psDevmemIntUnpinIN_UI8, 0);
- PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *psDevmemIntUnpinOUT =
- (PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *) IMG_OFFSET_ADDR(psDevmemIntUnpinOUT_UI8, 0);
-
- IMG_HANDLE hPMR = psDevmemIntUnpinIN->hPMR;
- PMR *psPMRInt = NULL;
-
- /* Lock over handle lookup. */
- LockHandle(psConnection->psHandleBase);
-
- /* Look up the address from the handle */
- psDevmemIntUnpinOUT->eError =
- PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
- (void **)&psPMRInt,
- hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
- if (unlikely(psDevmemIntUnpinOUT->eError != PVRSRV_OK))
- {
- UnlockHandle(psConnection->psHandleBase);
- goto DevmemIntUnpin_exit;
- }
- /* Release now we have looked up handles. */
- UnlockHandle(psConnection->psHandleBase);
-
- psDevmemIntUnpinOUT->eError = DevmemIntUnpin(psPMRInt);
-
-DevmemIntUnpin_exit:
-
- /* Lock over handle lookup cleanup. */
- LockHandle(psConnection->psHandleBase);
-
- /* Unreference the previously looked up handle */
- if (psPMRInt)
- {
- PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
- hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
- }
- /* Release now we have cleaned up look up handles. */
- UnlockHandle(psConnection->psHandleBase);
-
- return 0;
-}
-
-static IMG_INT
-PVRSRVBridgeDevmemIntPinValidate(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psDevmemIntPinValidateIN_UI8,
- IMG_UINT8 * psDevmemIntPinValidateOUT_UI8,
- CONNECTION_DATA * psConnection)
-{
- PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateIN =
- (PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *) IMG_OFFSET_ADDR(psDevmemIntPinValidateIN_UI8,
- 0);
- PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateOUT =
- (PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *)
- IMG_OFFSET_ADDR(psDevmemIntPinValidateOUT_UI8, 0);
-
- IMG_HANDLE hMapping = psDevmemIntPinValidateIN->hMapping;
- DEVMEMINT_MAPPING *psMappingInt = NULL;
- IMG_HANDLE hPMR = psDevmemIntPinValidateIN->hPMR;
- PMR *psPMRInt = NULL;
-
- /* Lock over handle lookup. */
- LockHandle(psConnection->psHandleBase);
-
- /* Look up the address from the handle */
- psDevmemIntPinValidateOUT->eError =
- PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
- (void **)&psMappingInt,
- hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, IMG_TRUE);
- if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK))
- {
- UnlockHandle(psConnection->psHandleBase);
- goto DevmemIntPinValidate_exit;
- }
-
- /* Look up the address from the handle */
- psDevmemIntPinValidateOUT->eError =
- PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
- (void **)&psPMRInt,
- hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
- if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK))
- {
- UnlockHandle(psConnection->psHandleBase);
- goto DevmemIntPinValidate_exit;
- }
- /* Release now we have looked up handles. */
- UnlockHandle(psConnection->psHandleBase);
-
- psDevmemIntPinValidateOUT->eError = DevmemIntPinValidate(psMappingInt, psPMRInt);
-
-DevmemIntPinValidate_exit:
-
- /* Lock over handle lookup cleanup. */
- LockHandle(psConnection->psHandleBase);
-
- /* Unreference the previously looked up handle */
- if (psMappingInt)
- {
- PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
- hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
- }
-
- /* Unreference the previously looked up handle */
- if (psPMRInt)
- {
- PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
- hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
- }
- /* Release now we have cleaned up look up handles. */
- UnlockHandle(psConnection->psHandleBase);
-
- return 0;
-}
-
-static IMG_INT
-PVRSRVBridgeDevmemIntUnpinInvalidate(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psDevmemIntUnpinInvalidateIN_UI8,
- IMG_UINT8 * psDevmemIntUnpinInvalidateOUT_UI8,
- CONNECTION_DATA * psConnection)
-{
- PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateIN =
- (PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *)
- IMG_OFFSET_ADDR(psDevmemIntUnpinInvalidateIN_UI8, 0);
- PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateOUT =
- (PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *)
- IMG_OFFSET_ADDR(psDevmemIntUnpinInvalidateOUT_UI8, 0);
-
- IMG_HANDLE hMapping = psDevmemIntUnpinInvalidateIN->hMapping;
- DEVMEMINT_MAPPING *psMappingInt = NULL;
- IMG_HANDLE hPMR = psDevmemIntUnpinInvalidateIN->hPMR;
- PMR *psPMRInt = NULL;
-
- /* Lock over handle lookup. */
- LockHandle(psConnection->psHandleBase);
-
- /* Look up the address from the handle */
- psDevmemIntUnpinInvalidateOUT->eError =
- PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
- (void **)&psMappingInt,
- hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, IMG_TRUE);
- if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK))
- {
- UnlockHandle(psConnection->psHandleBase);
- goto DevmemIntUnpinInvalidate_exit;
- }
-
- /* Look up the address from the handle */
- psDevmemIntUnpinInvalidateOUT->eError =
- PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
- (void **)&psPMRInt,
- hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
- if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK))
- {
- UnlockHandle(psConnection->psHandleBase);
- goto DevmemIntUnpinInvalidate_exit;
- }
- /* Release now we have looked up handles. */
- UnlockHandle(psConnection->psHandleBase);
-
- psDevmemIntUnpinInvalidateOUT->eError = DevmemIntUnpinInvalidate(psMappingInt, psPMRInt);
-
-DevmemIntUnpinInvalidate_exit:
-
- /* Lock over handle lookup cleanup. */
- LockHandle(psConnection->psHandleBase);
-
- /* Unreference the previously looked up handle */
- if (psMappingInt)
- {
- PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
- hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
- }
-
- /* Unreference the previously looked up handle */
- if (psPMRInt)
- {
- PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
- hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
- }
- /* Release now we have cleaned up look up handles. */
- UnlockHandle(psConnection->psHandleBase);
-
- return 0;
-}
-
static PVRSRV_ERROR _DevmemIntCtxCreatepsDevMemServerContextIntRelease(void *pvData)
{
PVRSRV_ERROR eError;
psDevmemIntHeapCreateOUT->eError =
DevmemIntHeapCreate(psDevmemCtxInt,
+ psDevmemIntHeapCreateIN->ui32HeapConfigIndex,
+ psDevmemIntHeapCreateIN->ui32HeapIndex,
psDevmemIntHeapCreateIN->sHeapBaseAddr,
- psDevmemIntHeapCreateIN->uiHeapLength,
psDevmemIntHeapCreateIN->ui32Log2DataPageSize, &psDevmemHeapPtrInt);
/* Exit early if bridged call fails */
if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK))
return 0;
}
-static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX,
- "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
-static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX,
- "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX,
+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX,
+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
static IMG_INT
PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
((IMG_UINT64) psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) +
((IMG_UINT64) psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0;
- if (unlikely(psChangeSparseMemIN->ui32AllocPageCount > PMR_MAX_SUPPORTED_PAGE_COUNT))
+ if (unlikely(psChangeSparseMemIN->ui32AllocPageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT))
{
psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
goto ChangeSparseMem_exit;
}
- if (unlikely(psChangeSparseMemIN->ui32FreePageCount > PMR_MAX_SUPPORTED_PAGE_COUNT))
+ if (unlikely(psChangeSparseMemIN->ui32FreePageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT))
{
psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
goto ChangeSparseMem_exit;
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psChangeSparseMemIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
return 0;
}
-#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED)
-
-static IMG_INT
-PVRSRVBridgeDevmemFlushDevSLCRange(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psDevmemFlushDevSLCRangeIN_UI8,
- IMG_UINT8 * psDevmemFlushDevSLCRangeOUT_UI8,
- CONNECTION_DATA * psConnection)
-{
- PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeIN =
- (PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *)
- IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeIN_UI8, 0);
- PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeOUT =
- (PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *)
- IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeOUT_UI8, 0);
-
- IMG_HANDLE hDevmemCtx = psDevmemFlushDevSLCRangeIN->hDevmemCtx;
- DEVMEMINT_CTX *psDevmemCtxInt = NULL;
-
- /* Lock over handle lookup. */
- LockHandle(psConnection->psHandleBase);
-
- /* Look up the address from the handle */
- psDevmemFlushDevSLCRangeOUT->eError =
- PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
- (void **)&psDevmemCtxInt,
- hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
- if (unlikely(psDevmemFlushDevSLCRangeOUT->eError != PVRSRV_OK))
- {
- UnlockHandle(psConnection->psHandleBase);
- goto DevmemFlushDevSLCRange_exit;
- }
- /* Release now we have looked up handles. */
- UnlockHandle(psConnection->psHandleBase);
-
- psDevmemFlushDevSLCRangeOUT->eError =
- DevmemIntFlushDevSLCRange(psDevmemCtxInt,
- psDevmemFlushDevSLCRangeIN->sAddress,
- psDevmemFlushDevSLCRangeIN->uiSize,
- psDevmemFlushDevSLCRangeIN->bInvalidate);
-
-DevmemFlushDevSLCRange_exit:
-
- /* Lock over handle lookup cleanup. */
- LockHandle(psConnection->psHandleBase);
-
- /* Unreference the previously looked up handle */
- if (psDevmemCtxInt)
- {
- PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
- hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
- }
- /* Release now we have cleaned up look up handles. */
- UnlockHandle(psConnection->psHandleBase);
-
- return 0;
-}
-
-#else
-#define PVRSRVBridgeDevmemFlushDevSLCRange NULL
-#endif
-
#if defined(RGX_FEATURE_FBCDC)
static IMG_INT
return 0;
}
+static_assert(DEVMEM_HEAPNAME_MAXLENGTH <= IMG_UINT32_MAX,
+ "DEVMEM_HEAPNAME_MAXLENGTH must not be larger than IMG_UINT32_MAX");
+
static IMG_INT
PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT8 * psHeapCfgHeapConfigNameIN_UI8,
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
}
+static_assert(DEVMEM_HEAPNAME_MAXLENGTH <= IMG_UINT32_MAX,
+ "DEVMEM_HEAPNAME_MAXLENGTH must not be larger than IMG_UINT32_MAX");
+
static IMG_INT
PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT8 * psHeapCfgHeapDetailsIN_UI8,
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
(PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *)
IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMOUT_UI8, 0);
- IMG_HANDLE hDevm = psDevmemIntRegisterPFNotifyKMIN->hDevm;
- DEVMEMINT_CTX *psDevmInt = NULL;
+ IMG_HANDLE hDevmemCtx = psDevmemIntRegisterPFNotifyKMIN->hDevmemCtx;
+ DEVMEMINT_CTX *psDevmemCtxInt = NULL;
/* Lock over handle lookup. */
LockHandle(psConnection->psHandleBase);
/* Look up the address from the handle */
psDevmemIntRegisterPFNotifyKMOUT->eError =
PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
- (void **)&psDevmInt,
- hDevm, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+ (void **)&psDevmemCtxInt,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK))
{
UnlockHandle(psConnection->psHandleBase);
UnlockHandle(psConnection->psHandleBase);
psDevmemIntRegisterPFNotifyKMOUT->eError =
- DevmemIntRegisterPFNotifyKM(psDevmInt,
- psDevmemIntRegisterPFNotifyKMIN->ui32PID,
- psDevmemIntRegisterPFNotifyKMIN->bRegister);
+ DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, psDevmemIntRegisterPFNotifyKMIN->bRegister);
DevmemIntRegisterPFNotifyKM_exit:
LockHandle(psConnection->psHandleBase);
/* Unreference the previously looked up handle */
- if (psDevmInt)
+ if (psDevmemCtxInt)
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
- hDevm, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
}
/* Release now we have cleaned up look up handles. */
UnlockHandle(psConnection->psHandleBase);
return 0;
}
-static IMG_INT
-PVRSRVBridgeGetMaxPhysHeapCount(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psGetMaxPhysHeapCountIN_UI8,
- IMG_UINT8 * psGetMaxPhysHeapCountOUT_UI8,
- CONNECTION_DATA * psConnection)
-{
- PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT *psGetMaxPhysHeapCountIN =
- (PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT *) IMG_OFFSET_ADDR(psGetMaxPhysHeapCountIN_UI8,
- 0);
- PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT *psGetMaxPhysHeapCountOUT =
- (PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT *) IMG_OFFSET_ADDR(psGetMaxPhysHeapCountOUT_UI8,
- 0);
-
- PVR_UNREFERENCED_PARAMETER(psGetMaxPhysHeapCountIN);
-
- psGetMaxPhysHeapCountOUT->eError =
- PVRSRVGetMaxPhysHeapCountKM(psConnection, OSGetDevNode(psConnection),
- &psGetMaxPhysHeapCountOUT->ui32PhysHeapCount);
-
- return 0;
-}
-
static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX,
"PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX");
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psPhysHeapGetMemInfoIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
(PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *)
IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapOUT_UI8, 0);
- PVR_UNREFERENCED_PARAMETER(psGetDefaultPhysicalHeapIN);
-
- psGetDefaultPhysicalHeapOUT->eError =
- PVRSRVGetDefaultPhysicalHeapKM(psConnection, OSGetDevNode(psConnection),
- &psGetDefaultPhysicalHeapOUT->eHeap);
-
- return 0;
-}
-
-static IMG_INT
-PVRSRVBridgeGetHeapPhysMemUsage(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psGetHeapPhysMemUsageIN_UI8,
- IMG_UINT8 * psGetHeapPhysMemUsageOUT_UI8,
- CONNECTION_DATA * psConnection)
-{
- PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE *psGetHeapPhysMemUsageIN =
- (PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE *) IMG_OFFSET_ADDR(psGetHeapPhysMemUsageIN_UI8,
- 0);
- PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE *psGetHeapPhysMemUsageOUT =
- (PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE *) IMG_OFFSET_ADDR(psGetHeapPhysMemUsageOUT_UI8,
- 0);
-
- PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStatsInt = NULL;
-
- IMG_UINT32 ui32NextOffset = 0;
- IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
- IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
-
- IMG_UINT32 ui32BufferSize = 0;
- IMG_UINT64 ui64BufferSize =
- ((IMG_UINT64) psGetHeapPhysMemUsageIN->ui32PhysHeapCount *
- sizeof(PHYS_HEAP_MEM_STATS)) + 0;
-
- if (psGetHeapPhysMemUsageIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)
- {
- psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
- goto GetHeapPhysMemUsage_exit;
- }
-
- psGetHeapPhysMemUsageOUT->pasapPhysHeapMemStats =
- psGetHeapPhysMemUsageIN->pasapPhysHeapMemStats;
-
- if (ui64BufferSize > IMG_UINT32_MAX)
- {
- psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
- goto GetHeapPhysMemUsage_exit;
- }
-
- ui32BufferSize = (IMG_UINT32) ui64BufferSize;
-
- if (ui32BufferSize != 0)
- {
-#if !defined(INTEGRITY_OS)
- /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
- IMG_UINT32 ui32InBufferOffset =
- PVR_ALIGN(sizeof(*psGetHeapPhysMemUsageIN), sizeof(unsigned long));
- IMG_UINT32 ui32InBufferExcessSize =
- ui32InBufferOffset >=
- PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
-
- bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
- if (bHaveEnoughSpace)
- {
- IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetHeapPhysMemUsageIN;
-
- pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
- }
- else
-#endif
- {
- pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
-
- if (!pArrayArgsBuffer)
- {
- psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
- goto GetHeapPhysMemUsage_exit;
- }
- }
- }
-
- if (psGetHeapPhysMemUsageIN->ui32PhysHeapCount != 0)
- {
- pasapPhysHeapMemStatsInt =
- (PHYS_HEAP_MEM_STATS *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
- ui32NextOffset +=
- psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS);
- }
-
- psGetHeapPhysMemUsageOUT->eError =
- PVRSRVGetHeapPhysMemUsageKM(psConnection, OSGetDevNode(psConnection),
- psGetHeapPhysMemUsageIN->ui32PhysHeapCount,
- pasapPhysHeapMemStatsInt);
- /* Exit early if bridged call fails */
- if (unlikely(psGetHeapPhysMemUsageOUT->eError != PVRSRV_OK))
- {
- goto GetHeapPhysMemUsage_exit;
- }
-
- /* If dest ptr is non-null and we have data to copy */
- if ((pasapPhysHeapMemStatsInt) &&
- ((psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) > 0))
- {
- if (unlikely
- (OSCopyToUser
- (NULL, (void __user *)psGetHeapPhysMemUsageOUT->pasapPhysHeapMemStats,
- pasapPhysHeapMemStatsInt,
- (psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS))) !=
- PVRSRV_OK))
- {
- psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
-
- goto GetHeapPhysMemUsage_exit;
- }
- }
-
-GetHeapPhysMemUsage_exit:
-
- /* Allocated space should be equal to the last updated offset */
-#ifdef PVRSRV_NEED_PVR_ASSERT
- if (psGetHeapPhysMemUsageOUT->eError == PVRSRV_OK)
- PVR_ASSERT(ui32BufferSize == ui32NextOffset);
-#endif /* PVRSRV_NEED_PVR_ASSERT */
+ PVR_UNREFERENCED_PARAMETER(psGetDefaultPhysicalHeapIN);
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
- if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
- OSFreeMemNoStats(pArrayArgsBuffer);
+ psGetDefaultPhysicalHeapOUT->eError =
+ PVRSRVGetDefaultPhysicalHeapKM(psConnection, OSGetDevNode(psConnection),
+ &psGetDefaultPhysicalHeapOUT->eHeap);
return 0;
}
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
static IMG_INT
-PVRSRVBridgePVRSRVUpdateOOMStats(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psPVRSRVUpdateOOMStatsIN_UI8,
- IMG_UINT8 * psPVRSRVUpdateOOMStatsOUT_UI8,
- CONNECTION_DATA * psConnection)
+PVRSRVBridgePVRSRVStatsUpdateOOMStat(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPVRSRVStatsUpdateOOMStatIN_UI8,
+ IMG_UINT8 * psPVRSRVStatsUpdateOOMStatOUT_UI8,
+ CONNECTION_DATA * psConnection)
{
- PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsIN =
- (PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *) IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsIN_UI8,
- 0);
- PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsOUT =
- (PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *)
- IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsOUT_UI8, 0);
-
- PVR_UNREFERENCED_PARAMETER(psConnection);
-
- psPVRSRVUpdateOOMStatsOUT->eError =
- PVRSRVServerUpdateOOMStats(psPVRSRVUpdateOOMStatsIN->ui32ui32StatType,
- psPVRSRVUpdateOOMStatsIN->ui32pid);
+ PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT *psPVRSRVStatsUpdateOOMStatIN =
+ (PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT *)
+ IMG_OFFSET_ADDR(psPVRSRVStatsUpdateOOMStatIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT *psPVRSRVStatsUpdateOOMStatOUT =
+ (PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT *)
+ IMG_OFFSET_ADDR(psPVRSRVStatsUpdateOOMStatOUT_UI8, 0);
+
+ psPVRSRVStatsUpdateOOMStatOUT->eError =
+ PVRSRVStatsUpdateOOMStat(psConnection, OSGetDevNode(psConnection),
+ psPVRSRVStatsUpdateOOMStatIN->ui32ui32StatType,
+ psPVRSRVStatsUpdateOOMStatIN->ui32pid);
return 0;
}
#else
-#define PVRSRVBridgePVRSRVUpdateOOMStats NULL
+#define PVRSRVBridgePVRSRVStatsUpdateOOMStat NULL
#endif
-static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX,
- "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX");
+static PVRSRV_ERROR _DevmemXIntReserveRangepsReservationIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = DevmemXIntUnreserveRange((DEVMEMXINT_RESERVATION *) pvData);
+ return eError;
+}
static IMG_INT
-PVRSRVBridgePhysHeapGetMemInfoPkd(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psPhysHeapGetMemInfoPkdIN_UI8,
- IMG_UINT8 * psPhysHeapGetMemInfoPkdOUT_UI8,
- CONNECTION_DATA * psConnection)
+PVRSRVBridgeDevmemXIntReserveRange(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemXIntReserveRangeIN_UI8,
+ IMG_UINT8 * psDevmemXIntReserveRangeOUT_UI8,
+ CONNECTION_DATA * psConnection)
{
- PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFOPKD *psPhysHeapGetMemInfoPkdIN =
- (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFOPKD *)
- IMG_OFFSET_ADDR(psPhysHeapGetMemInfoPkdIN_UI8, 0);
- PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFOPKD *psPhysHeapGetMemInfoPkdOUT =
- (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFOPKD *)
- IMG_OFFSET_ADDR(psPhysHeapGetMemInfoPkdOUT_UI8, 0);
-
- PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL;
- PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStatsInt = NULL;
-
- IMG_UINT32 ui32NextOffset = 0;
- IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
- IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
+ PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE *psDevmemXIntReserveRangeIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE *)
+ IMG_OFFSET_ADDR(psDevmemXIntReserveRangeIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE *psDevmemXIntReserveRangeOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE *)
+ IMG_OFFSET_ADDR(psDevmemXIntReserveRangeOUT_UI8, 0);
+
+ IMG_HANDLE hDevmemServerHeap = psDevmemXIntReserveRangeIN->hDevmemServerHeap;
+ DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL;
+ DEVMEMXINT_RESERVATION *psReservationInt = NULL;
- IMG_UINT32 ui32BufferSize = 0;
- IMG_UINT64 ui64BufferSize =
- ((IMG_UINT64) psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) +
- ((IMG_UINT64) psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount *
- sizeof(PHYS_HEAP_MEM_STATS_PKD)) + 0;
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
- if (unlikely(psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST))
+ /* Look up the address from the handle */
+ psDevmemXIntReserveRangeOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psDevmemServerHeapInt,
+ hDevmemServerHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE);
+ if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK))
{
- psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
- goto PhysHeapGetMemInfoPkd_exit;
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemXIntReserveRange_exit;
}
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
- psPhysHeapGetMemInfoPkdOUT->psapPhysHeapMemStats =
- psPhysHeapGetMemInfoPkdIN->psapPhysHeapMemStats;
-
- if (ui64BufferSize > IMG_UINT32_MAX)
+ psDevmemXIntReserveRangeOUT->eError =
+ DevmemXIntReserveRange(psDevmemServerHeapInt,
+ psDevmemXIntReserveRangeIN->sAddress,
+ psDevmemXIntReserveRangeIN->uiLength, &psReservationInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK))
{
- psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
- goto PhysHeapGetMemInfoPkd_exit;
+ goto DevmemXIntReserveRange_exit;
}
- ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
- if (ui32BufferSize != 0)
+ psDevmemXIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psDevmemXIntReserveRangeOUT->
+ hReservation,
+ (void *)psReservationInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _DevmemXIntReserveRangepsReservationIntRelease);
+ if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK))
{
-#if !defined(INTEGRITY_OS)
- /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
- IMG_UINT32 ui32InBufferOffset =
- PVR_ALIGN(sizeof(*psPhysHeapGetMemInfoPkdIN), sizeof(unsigned long));
- IMG_UINT32 ui32InBufferExcessSize =
- ui32InBufferOffset >=
- PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemXIntReserveRange_exit;
+ }
- bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
- if (bHaveEnoughSpace)
- {
- IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfoPkdIN;
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
- pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
- }
- else
-#endif
- {
- pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+DevmemXIntReserveRange_exit:
- if (!pArrayArgsBuffer)
- {
- psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
- goto PhysHeapGetMemInfoPkd_exit;
- }
- }
- }
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
- if (psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount != 0)
+ /* Unreference the previously looked up handle */
+ if (psDevmemServerHeapInt)
{
- eaPhysHeapIDInt =
- (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
- ui32NextOffset +=
- psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP);
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
}
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
- /* Copy the data over */
- if (psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0)
+ if (psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK)
{
- if (OSCopyFromUser
- (NULL, eaPhysHeapIDInt,
- (const void __user *)psPhysHeapGetMemInfoPkdIN->peaPhysHeapID,
- psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) !=
- PVRSRV_OK)
+ if (psReservationInt)
{
- psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
-
- goto PhysHeapGetMemInfoPkd_exit;
+ DevmemXIntUnreserveRange(psReservationInt);
}
}
- if (psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount != 0)
- {
- psapPhysHeapMemStatsInt =
- (PHYS_HEAP_MEM_STATS_PKD *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
- ui32NextOffset +=
- psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_PKD);
- }
- psPhysHeapGetMemInfoPkdOUT->eError =
- PVRSRVPhysHeapGetMemInfoPkdKM(psConnection, OSGetDevNode(psConnection),
- psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount,
- eaPhysHeapIDInt, psapPhysHeapMemStatsInt);
- /* Exit early if bridged call fails */
- if (unlikely(psPhysHeapGetMemInfoPkdOUT->eError != PVRSRV_OK))
- {
- goto PhysHeapGetMemInfoPkd_exit;
- }
+ return 0;
+}
- /* If dest ptr is non-null and we have data to copy */
- if ((psapPhysHeapMemStatsInt) &&
- ((psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_PKD)) > 0))
- {
- if (unlikely
- (OSCopyToUser
- (NULL, (void __user *)psPhysHeapGetMemInfoPkdOUT->psapPhysHeapMemStats,
- psapPhysHeapMemStatsInt,
- (psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount *
- sizeof(PHYS_HEAP_MEM_STATS_PKD))) != PVRSRV_OK))
- {
- psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+static IMG_INT
+PVRSRVBridgeDevmemXIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemXIntUnreserveRangeIN_UI8,
+ IMG_UINT8 * psDevmemXIntUnreserveRangeOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE *psDevmemXIntUnreserveRangeIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE *)
+ IMG_OFFSET_ADDR(psDevmemXIntUnreserveRangeIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE *psDevmemXIntUnreserveRangeOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE *)
+ IMG_OFFSET_ADDR(psDevmemXIntUnreserveRangeOUT_UI8, 0);
- goto PhysHeapGetMemInfoPkd_exit;
- }
- }
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
-PhysHeapGetMemInfoPkd_exit:
+ psDevmemXIntUnreserveRangeOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemXIntUnreserveRangeIN->
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION);
+ if (unlikely
+ ((psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_OK)
+ && (psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psDevmemXIntUnreserveRangeOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemXIntUnreserveRange_exit;
+ }
- /* Allocated space should be equal to the last updated offset */
-#ifdef PVRSRV_NEED_PVR_ASSERT
- if (psPhysHeapGetMemInfoPkdOUT->eError == PVRSRV_OK)
- PVR_ASSERT(ui32BufferSize == ui32NextOffset);
-#endif /* PVRSRV_NEED_PVR_ASSERT */
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
- if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
- OSFreeMemNoStats(pArrayArgsBuffer);
+DevmemXIntUnreserveRange_exit:
return 0;
}
static IMG_INT
-PVRSRVBridgeGetHeapPhysMemUsagePkd(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psGetHeapPhysMemUsagePkdIN_UI8,
- IMG_UINT8 * psGetHeapPhysMemUsagePkdOUT_UI8,
- CONNECTION_DATA * psConnection)
+PVRSRVBridgeDevmemXIntMapPages(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemXIntMapPagesIN_UI8,
+ IMG_UINT8 * psDevmemXIntMapPagesOUT_UI8,
+ CONNECTION_DATA * psConnection)
{
- PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGEPKD *psGetHeapPhysMemUsagePkdIN =
- (PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGEPKD *)
- IMG_OFFSET_ADDR(psGetHeapPhysMemUsagePkdIN_UI8, 0);
- PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGEPKD *psGetHeapPhysMemUsagePkdOUT =
- (PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGEPKD *)
- IMG_OFFSET_ADDR(psGetHeapPhysMemUsagePkdOUT_UI8, 0);
-
- PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStatsInt = NULL;
+ PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES *psDevmemXIntMapPagesIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntMapPagesIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES *psDevmemXIntMapPagesOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntMapPagesOUT_UI8,
+ 0);
- IMG_UINT32 ui32NextOffset = 0;
- IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
- IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
+ IMG_HANDLE hReservation = psDevmemXIntMapPagesIN->hReservation;
+ DEVMEMXINT_RESERVATION *psReservationInt = NULL;
+ IMG_HANDLE hPMR = psDevmemXIntMapPagesIN->hPMR;
+ PMR *psPMRInt = NULL;
- IMG_UINT32 ui32BufferSize = 0;
- IMG_UINT64 ui64BufferSize =
- ((IMG_UINT64) psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount *
- sizeof(PHYS_HEAP_MEM_STATS_PKD)) + 0;
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
- if (psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)
+ /* Look up the address from the handle */
+ psDevmemXIntMapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE);
+ if (unlikely(psDevmemXIntMapPagesOUT->eError != PVRSRV_OK))
{
- psGetHeapPhysMemUsagePkdOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
- goto GetHeapPhysMemUsagePkd_exit;
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemXIntMapPages_exit;
}
- psGetHeapPhysMemUsagePkdOUT->psapPhysHeapMemStats =
- psGetHeapPhysMemUsagePkdIN->psapPhysHeapMemStats;
-
- if (ui64BufferSize > IMG_UINT32_MAX)
+ /* Look up the address from the handle */
+ psDevmemXIntMapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psDevmemXIntMapPagesOUT->eError != PVRSRV_OK))
{
- psGetHeapPhysMemUsagePkdOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
- goto GetHeapPhysMemUsagePkd_exit;
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemXIntMapPages_exit;
}
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
- ui32BufferSize = (IMG_UINT32) ui64BufferSize;
-
- if (ui32BufferSize != 0)
- {
-#if !defined(INTEGRITY_OS)
- /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
- IMG_UINT32 ui32InBufferOffset =
- PVR_ALIGN(sizeof(*psGetHeapPhysMemUsagePkdIN), sizeof(unsigned long));
- IMG_UINT32 ui32InBufferExcessSize =
- ui32InBufferOffset >=
- PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
-
- bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
- if (bHaveEnoughSpace)
- {
- IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetHeapPhysMemUsagePkdIN;
+ psDevmemXIntMapPagesOUT->eError =
+ DevmemXIntMapPages(psReservationInt,
+ psPMRInt,
+ psDevmemXIntMapPagesIN->ui32PageCount,
+ psDevmemXIntMapPagesIN->ui32PhysPageOffset,
+ psDevmemXIntMapPagesIN->uiFlags,
+ psDevmemXIntMapPagesIN->ui32VirtPageOffset);
- pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
- }
- else
-#endif
- {
- pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+DevmemXIntMapPages_exit:
- if (!pArrayArgsBuffer)
- {
- psGetHeapPhysMemUsagePkdOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
- goto GetHeapPhysMemUsagePkd_exit;
- }
- }
- }
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
- if (psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount != 0)
+ /* Unreference the previously looked up handle */
+ if (psReservationInt)
{
- psapPhysHeapMemStatsInt =
- (PHYS_HEAP_MEM_STATS_PKD *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
- ui32NextOffset +=
- psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_PKD);
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION);
}
- psGetHeapPhysMemUsagePkdOUT->eError =
- PVRSRVGetHeapPhysMemUsagePkdKM(psConnection, OSGetDevNode(psConnection),
- psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount,
- psapPhysHeapMemStatsInt);
- /* Exit early if bridged call fails */
- if (unlikely(psGetHeapPhysMemUsagePkdOUT->eError != PVRSRV_OK))
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
{
- goto GetHeapPhysMemUsagePkd_exit;
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
}
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
- /* If dest ptr is non-null and we have data to copy */
- if ((psapPhysHeapMemStatsInt) &&
- ((psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_PKD)) > 0))
- {
- if (unlikely
- (OSCopyToUser
- (NULL, (void __user *)psGetHeapPhysMemUsagePkdOUT->psapPhysHeapMemStats,
- psapPhysHeapMemStatsInt,
- (psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount *
- sizeof(PHYS_HEAP_MEM_STATS_PKD))) != PVRSRV_OK))
- {
- psGetHeapPhysMemUsagePkdOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return 0;
+}
- goto GetHeapPhysMemUsagePkd_exit;
- }
+static IMG_INT
+PVRSRVBridgeDevmemXIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemXIntUnmapPagesIN_UI8,
+ IMG_UINT8 * psDevmemXIntUnmapPagesOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES *psDevmemXIntUnmapPagesIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntUnmapPagesIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES *psDevmemXIntUnmapPagesOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES *)
+ IMG_OFFSET_ADDR(psDevmemXIntUnmapPagesOUT_UI8, 0);
+
+ IMG_HANDLE hReservation = psDevmemXIntUnmapPagesIN->hReservation;
+ DEVMEMXINT_RESERVATION *psReservationInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemXIntUnmapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE);
+ if (unlikely(psDevmemXIntUnmapPagesOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemXIntUnmapPages_exit;
}
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
-GetHeapPhysMemUsagePkd_exit:
+ psDevmemXIntUnmapPagesOUT->eError =
+ DevmemXIntUnmapPages(psReservationInt,
+ psDevmemXIntUnmapPagesIN->ui32VirtPageOffset,
+ psDevmemXIntUnmapPagesIN->ui32PageCount);
- /* Allocated space should be equal to the last updated offset */
-#ifdef PVRSRV_NEED_PVR_ASSERT
- if (psGetHeapPhysMemUsagePkdOUT->eError == PVRSRV_OK)
- PVR_ASSERT(ui32BufferSize == ui32NextOffset);
-#endif /* PVRSRV_NEED_PVR_ASSERT */
+DevmemXIntUnmapPages_exit:
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
- if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
- OSFreeMemNoStats(pArrayArgsBuffer);
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
return 0;
}
SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR,
PVRSRVBridgePhysmemNewRamBackedPMR, NULL);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR,
- PVRSRVBridgePhysmemNewRamBackedLockedPMR, NULL);
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN,
- PVRSRVBridgeDevmemIntPin, NULL);
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN,
- PVRSRVBridgeDevmemIntUnpin, NULL);
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE,
- PVRSRVBridgeDevmemIntPinValidate, NULL);
-
- SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE,
- PVRSRVBridgeDevmemIntUnpinInvalidate, NULL);
-
SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE,
PVRSRVBridgeDevmemIntCtxCreate, NULL);
SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID,
PVRSRVBridgeDevmemIsVDevAddrValid, NULL);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE,
- PVRSRVBridgeDevmemFlushDevSLCRange, NULL);
-
SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE,
PVRSRVBridgeDevmemInvalidateFBSCTable, NULL);
SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM,
PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT,
- PVRSRVBridgeGetMaxPhysHeapCount, NULL);
-
SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO,
PVRSRVBridgePhysHeapGetMemInfo, NULL);
SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP,
PVRSRVBridgeGetDefaultPhysicalHeap, NULL);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE,
- PVRSRVBridgeGetHeapPhysMemUsage, NULL);
-
SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS,
PVRSRVBridgeDevmemGetFaultAddress, NULL);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS,
- PVRSRVBridgePVRSRVUpdateOOMStats, NULL);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT,
+ PVRSRVBridgePVRSRVStatsUpdateOOMStat, NULL);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFOPKD,
- PVRSRVBridgePhysHeapGetMemInfoPkd, NULL);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE,
+ PVRSRVBridgeDevmemXIntReserveRange, NULL);
- SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGEPKD,
- PVRSRVBridgeGetHeapPhysMemUsagePkd, NULL);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE,
+ PVRSRVBridgeDevmemXIntUnreserveRange, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES,
+ PVRSRVBridgeDevmemXIntMapPages, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES,
+ PVRSRVBridgeDevmemXIntUnmapPages, NULL);
return PVRSRV_OK;
}
UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR);
- UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR);
-
- UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN);
-
- UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN);
-
- UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE);
-
- UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE);
-
UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE);
UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY);
UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID);
- UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE);
-
UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE);
UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT);
UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM);
- UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT);
-
UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO);
UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP);
- UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE);
-
UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS);
- UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS);
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE);
- UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFOPKD);
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES);
- UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGEPKD);
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES);
}
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psPDumpImageDescriptorIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psPVRSRVPDumpCommentIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psPDumpDataDescriptorIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32Start,
IMG_UINT32 ui32End,
IMG_UINT32 ui32Interval,
- IMG_UINT32 ui32MaxParamFileSize);
+ IMG_UINT32 ui32MaxParamFileSize,
+ IMG_UINT32 ui32AutoTermTimeout);
IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge,
IMG_BOOL * pbpbIsLastCaptureFrame);
IMG_UINT32 ui32Start,
IMG_UINT32 ui32End,
IMG_UINT32 ui32Interval,
- IMG_UINT32 ui32MaxParamFileSize)
+ IMG_UINT32 ui32MaxParamFileSize,
+ IMG_UINT32 ui32AutoTermTimeout)
{
PVRSRV_ERROR eError;
eError =
PDumpSetDefaultCaptureParamsKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
ui32Mode,
- ui32Start, ui32End, ui32Interval, ui32MaxParamFileSize);
+ ui32Start,
+ ui32End,
+ ui32Interval, ui32MaxParamFileSize, ui32AutoTermTimeout);
return eError;
}
/* Bridge in structure for PVRSRVPDumpSetDefaultCaptureParams */
typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
{
+ IMG_UINT32 ui32AutoTermTimeout;
IMG_UINT32 ui32End;
IMG_UINT32 ui32Interval;
IMG_UINT32 ui32MaxParamFileSize;
/* Bridge out structure for PVRSRVPDumpIsLastCaptureFrame */
typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
{
- IMG_BOOL bpbIsLastCaptureFrame;
PVRSRV_ERROR eError;
+ IMG_BOOL bpbIsLastCaptureFrame;
} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME;
/*******************************************
psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32End,
psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Interval,
psPVRSRVPDumpSetDefaultCaptureParamsIN->
- ui32MaxParamFileSize);
+ ui32MaxParamFileSize,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->
+ ui32AutoTermTimeout);
return 0;
}
{
PVRSRV_ERROR eError;
DEVMEMINT_CTX *psDevmemServerContextInt;
- PVR_UNREFERENCED_PARAMETER(hBridge);
psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
eError =
- DevmemIntPDumpSaveToFileVirtual(psDevmemServerContextInt,
+ DevmemIntPDumpSaveToFileVirtual(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ psDevmemServerContextInt,
sAddress,
uiSize,
ui32ArraySize,
IMG_DEVMEM_OFFSET_T uiOffset;
IMG_DEVMEM_SIZE_T uiSize;
IMG_HANDLE hPMR;
- IMG_BOOL bbZero;
IMG_UINT32 ui32PDumpFlags;
+ IMG_BOOL bbZero;
} __packed PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM;
/* Bridge out structure for PMRPDumpLoadMem */
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psPMRPDumpSaveToFileIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
}
+static_assert(PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH <= IMG_UINT32_MAX,
+ "PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH must not be larger than IMG_UINT32_MAX");
+static_assert(PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH <= IMG_UINT32_MAX,
+ "PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH must not be larger than IMG_UINT32_MAX");
+
static IMG_INT
PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT8 * psPMRPDumpSymbolicAddrIN_UI8,
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psPMRPDumpSymbolicAddrIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psDevmemIntPDumpSaveToFileVirtualIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
UnlockHandle(psConnection->psHandleBase);
psDevmemIntPDumpSaveToFileVirtualOUT->eError =
- DevmemIntPDumpSaveToFileVirtual(psDevmemServerContextInt,
+ DevmemIntPDumpSaveToFileVirtual(psConnection, OSGetDevNode(psConnection),
+ psDevmemServerContextInt,
psDevmemIntPDumpSaveToFileVirtualIN->sAddress,
psDevmemIntPDumpSaveToFileVirtualIN->uiSize,
psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize,
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
static_assert(PRVSRVTL_MAX_STREAM_NAME_SIZE <= IMG_UINT32_MAX,
"PRVSRVTL_MAX_STREAM_NAME_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER <= IMG_UINT32_MAX,
+ "PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER must not be larger than IMG_UINT32_MAX");
static IMG_INT
PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize = ((IMG_UINT64) psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) + 0;
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
/* Bridge in structure for RGXSetBreakpoint */
typedef struct PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT_TAG
{
+ IMG_UINT64 ui64TempSpillingAddr;
IMG_HANDLE hPrivData;
IMG_UINT32 eFWDataMaster;
IMG_UINT32 ui32BreakpointAddr;
PVRSRVRGXSetBreakpointKM(psConnection, OSGetDevNode(psConnection),
hPrivDataInt,
psRGXSetBreakpointIN->eFWDataMaster,
+ psRGXSetBreakpointIN->ui64TempSpillingAddr,
psRGXSetBreakpointIN->ui32BreakpointAddr,
psRGXSetBreakpointIN->ui32HandlerAddr,
psRGXSetBreakpointIN->ui32DM);
#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2 PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5
#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6
#define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7
-#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7)
+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+8)
/*******************************************
RGXCreateComputeContext
IMG_HANDLE hPrivData;
IMG_BYTE *pui8FrameworkCmd;
IMG_BYTE *pui8StaticComputeContextState;
+ IMG_INT32 i32Priority;
IMG_UINT32 ui32ContextFlags;
- IMG_UINT32 ui32FrameworkCmdize;
+ IMG_UINT32 ui32FrameworkCmdSize;
IMG_UINT32 ui32MaxDeadlineMS;
IMG_UINT32 ui32PackedCCBSizeU88;
- IMG_UINT32 ui32Priority;
IMG_UINT32 ui32StaticComputeContextStateSize;
} __packed PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT;
typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG
{
IMG_HANDLE hComputeContext;
- IMG_UINT32 ui32Priority;
+ IMG_INT32 i32Priority;
} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY;
/* Bridge out structure for RGXSetComputeContextPriority */
IMG_UINT32 ui32Error;
} __packed PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR;
+/*******************************************
+ RGXKickTimestampQuery
+ *******************************************/
+
+/* Bridge in structure for RGXKickTimestampQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY_TAG
+{
+ IMG_HANDLE hComputeContext;
+ IMG_BYTE *pui8DMCmd;
+ PVRSRV_FENCE hCheckFenceFd;
+ IMG_UINT32 ui32CmdSize;
+ IMG_UINT32 ui32ExtJobRef;
+} __packed PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY;
+
+/* Bridge out structure for RGXKickTimestampQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY;
+
#endif /* COMMON_RGXCMP_BRIDGE_H */
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
- ((IMG_UINT64) psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+ ((IMG_UINT64) psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) +
((IMG_UINT64) psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize *
sizeof(IMG_BYTE)) + 0;
- if (unlikely(psRGXCreateComputeContextIN->ui32FrameworkCmdize > RGXFWIF_RF_CMD_SIZE))
+ if (unlikely(psRGXCreateComputeContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE))
{
psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
goto RGXCreateComputeContext_exit;
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
}
}
- if (psRGXCreateComputeContextIN->ui32FrameworkCmdize != 0)
+ if (psRGXCreateComputeContextIN->ui32FrameworkCmdSize != 0)
{
ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
ui32NextOffset +=
- psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+ psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE);
}
/* Copy the data over */
- if (psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+ if (psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0)
{
if (OSCopyFromUser
(NULL, ui8FrameworkCmdInt,
(const void __user *)psRGXCreateComputeContextIN->pui8FrameworkCmd,
- psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) !=
+ psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) !=
PVRSRV_OK)
{
psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
psRGXCreateComputeContextOUT->eError =
PVRSRVRGXCreateComputeContextKM(psConnection, OSGetDevNode(psConnection),
- psRGXCreateComputeContextIN->ui32Priority,
- psRGXCreateComputeContextIN->ui32FrameworkCmdize,
+ psRGXCreateComputeContextIN->i32Priority,
+ psRGXCreateComputeContextIN->ui32FrameworkCmdSize,
ui8FrameworkCmdInt,
hPrivDataInt,
psRGXCreateComputeContextIN->
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
psRGXSetComputeContextPriorityOUT->eError =
PVRSRVRGXSetComputeContextPriorityKM(psConnection, OSGetDevNode(psConnection),
psComputeContextInt,
- psRGXSetComputeContextPriorityIN->ui32Priority);
+ psRGXSetComputeContextPriorityIN->i32Priority);
RGXSetComputeContextPriority_exit:
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXKickCDM2IN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
{
/* Unreference the previously looked up handle */
- if (psClientUpdateUFOSyncPrimBlockInt[i])
+ if (psClientUpdateUFOSyncPrimBlockInt
+ && psClientUpdateUFOSyncPrimBlockInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hClientUpdateUFOSyncPrimBlockInt2[i],
{
/* Unreference the previously looked up handle */
- if (psSyncPMRsInt[i])
+ if (psSyncPMRsInt && psSyncPMRsInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hSyncPMRsInt2[i],
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
/* Release now we have looked up handles. */
UnlockHandle(psConnection->psHandleBase);
- psRGXSetComputeContextPropertyOUT->eError =
+/* psRGXSetComputeContextPropertyOUT->eError =
PVRSRVRGXSetComputeContextPropertyKM(psComputeContextInt,
psRGXSetComputeContextPropertyIN->ui32Property,
psRGXSetComputeContextPropertyIN->ui64Input,
&psRGXSetComputeContextPropertyOUT->ui64Output);
-
+*/
RGXSetComputeContextProperty_exit:
/* Lock over handle lookup cleanup. */
return 0;
}
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXKickTimestampQuery(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXKickTimestampQueryIN_UI8,
+ IMG_UINT8 * psRGXKickTimestampQueryOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY *psRGXKickTimestampQueryIN =
+ (PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY *)
+ IMG_OFFSET_ADDR(psRGXKickTimestampQueryIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY *psRGXKickTimestampQueryOUT =
+ (PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY *)
+ IMG_OFFSET_ADDR(psRGXKickTimestampQueryOUT_UI8, 0);
+
+ IMG_HANDLE hComputeContext = psRGXKickTimestampQueryIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+ IMG_BYTE *ui8DMCmdInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE)) + 0;
+
+ if (unlikely(psRGXKickTimestampQueryIN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+ {
+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickTimestampQuery_exit;
+ }
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXKickTimestampQuery_exit;
+ }
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXKickTimestampQuery_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXKickTimestampQueryIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTimestampQueryIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickTimestampQuery_exit;
+ }
+ }
+ }
+
+ if (psRGXKickTimestampQueryIN->ui32CmdSize != 0)
+ {
+ ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8DMCmdInt, (const void __user *)psRGXKickTimestampQueryIN->pui8DMCmd,
+ psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK)
+ {
+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTimestampQuery_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXKickTimestampQueryOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXKickTimestampQueryOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickTimestampQuery_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXKickTimestampQueryOUT->eError =
+ PVRSRVRGXKickTimestampQueryKM(psComputeContextInt,
+ psRGXKickTimestampQueryIN->hCheckFenceFd,
+ psRGXKickTimestampQueryIN->ui32CmdSize,
+ ui8DMCmdInt, psRGXKickTimestampQueryIN->ui32ExtJobRef);
+
+RGXKickTimestampQuery_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXKickTimestampQueryOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
/* ***************************************************************************
* Server bridge dispatch related glue
*/
SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR,
PVRSRVBridgeRGXGetLastDeviceError, NULL);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY,
+ PVRSRVBridgeRGXKickTimestampQuery, NULL);
+
return PVRSRV_OK;
}
UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR);
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY);
+
}
#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+0
#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+1
#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2
-#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3
-#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4
-#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5
-#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6
-#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7
-#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7)
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+9
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+10
+#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+10)
/*******************************************
RGXFWDebugSetFWLog
} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE;
/*******************************************
- RGXFWDebugSetOSidPriority
+ RGXFWDebugSetDriverPriority
*******************************************/
-/* Bridge in structure for RGXFWDebugSetOSidPriority */
-typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY_TAG
+/* Bridge in structure for RGXFWDebugSetDriverPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY_TAG
{
- IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32DriverID;
IMG_UINT32 ui32Priority;
-} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY;
-/* Bridge out structure for RGXFWDebugSetOSidPriority */
-typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY_TAG
+/* Bridge out structure for RGXFWDebugSetDriverPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY_TAG
{
PVRSRV_ERROR eError;
-} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY;
+
+/*******************************************
+ RGXFWDebugSetDriverIsolationGroup
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetDriverIsolationGroup */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP_TAG
+{
+ IMG_UINT32 ui32DriverID;
+ IMG_UINT32 ui32IsolationGroup;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP;
+
+/* Bridge out structure for RGXFWDebugSetDriverIsolationGroup */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP;
/*******************************************
RGXFWDebugSetOSNewOnlineState
/* Bridge in structure for RGXFWDebugSetOSNewOnlineState */
typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE_TAG
{
+ IMG_UINT32 ui32DriverID;
IMG_UINT32 ui32OSNewState;
- IMG_UINT32 ui32OSid;
} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE;
/* Bridge out structure for RGXFWDebugSetOSNewOnlineState */
PVRSRV_ERROR eError;
} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE;
+/*******************************************
+ RGXFWDebugMapGuestHeap
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugMapGuestHeap */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP_TAG
+{
+ IMG_UINT64 ui64ui64GuestHeapBase;
+ IMG_UINT32 ui32DriverID;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP;
+
+/* Bridge out structure for RGXFWDebugMapGuestHeap */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP;
+
/*******************************************
RGXFWDebugPHRConfigure
*******************************************/
PVRSRV_ERROR eError;
} __packed PVRSRV_BRIDGE_OUT_RGXCURRENTTIME;
+/*******************************************
+ RGXFWDebugInjectFault
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugInjectFault */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT;
+
+/* Bridge out structure for RGXFWDebugInjectFault */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT;
+
#endif /* COMMON_RGXFWDBG_BRIDGE_H */
}
static IMG_INT
-PVRSRVBridgeRGXFWDebugSetOSidPriority(IMG_UINT32 ui32DispatchTableEntry,
- IMG_UINT8 * psRGXFWDebugSetOSidPriorityIN_UI8,
- IMG_UINT8 * psRGXFWDebugSetOSidPriorityOUT_UI8,
- CONNECTION_DATA * psConnection)
+PVRSRVBridgeRGXFWDebugSetDriverPriority(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugSetDriverPriorityIN_UI8,
+ IMG_UINT8 * psRGXFWDebugSetDriverPriorityOUT_UI8,
+ CONNECTION_DATA * psConnection)
{
- PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *psRGXFWDebugSetOSidPriorityIN =
- (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *)
- IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityIN_UI8, 0);
- PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *psRGXFWDebugSetOSidPriorityOUT =
- (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *)
- IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityOUT_UI8, 0);
-
- psRGXFWDebugSetOSidPriorityOUT->eError =
- PVRSRVRGXFWDebugSetOSidPriorityKM(psConnection, OSGetDevNode(psConnection),
- psRGXFWDebugSetOSidPriorityIN->ui32OSid,
- psRGXFWDebugSetOSidPriorityIN->ui32Priority);
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY *psRGXFWDebugSetDriverPriorityIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY *)
+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverPriorityIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY *psRGXFWDebugSetDriverPriorityOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY *)
+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverPriorityOUT_UI8, 0);
+
+ psRGXFWDebugSetDriverPriorityOUT->eError =
+ PVRSRVRGXFWDebugSetDriverPriorityKM(psConnection, OSGetDevNode(psConnection),
+ psRGXFWDebugSetDriverPriorityIN->ui32DriverID,
+ psRGXFWDebugSetDriverPriorityIN->ui32Priority);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetDriverIsolationGroup(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugSetDriverIsolationGroupIN_UI8,
+ IMG_UINT8 *
+ psRGXFWDebugSetDriverIsolationGroupOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP *psRGXFWDebugSetDriverIsolationGroupIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP *)
+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverIsolationGroupIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP *psRGXFWDebugSetDriverIsolationGroupOUT
+ =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP *)
+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverIsolationGroupOUT_UI8, 0);
+
+ psRGXFWDebugSetDriverIsolationGroupOUT->eError =
+ PVRSRVRGXFWDebugSetDriverIsolationGroupKM(psConnection, OSGetDevNode(psConnection),
+ psRGXFWDebugSetDriverIsolationGroupIN->
+ ui32DriverID,
+ psRGXFWDebugSetDriverIsolationGroupIN->
+ ui32IsolationGroup);
return 0;
}
psRGXFWDebugSetOSNewOnlineStateOUT->eError =
PVRSRVRGXFWDebugSetOSNewOnlineStateKM(psConnection, OSGetDevNode(psConnection),
- psRGXFWDebugSetOSNewOnlineStateIN->ui32OSid,
+ psRGXFWDebugSetOSNewOnlineStateIN->ui32DriverID,
psRGXFWDebugSetOSNewOnlineStateIN->
ui32OSNewState);
return 0;
}
+static IMG_INT
+PVRSRVBridgeRGXFWDebugMapGuestHeap(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugMapGuestHeapIN_UI8,
+ IMG_UINT8 * psRGXFWDebugMapGuestHeapOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP *psRGXFWDebugMapGuestHeapIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP *)
+ IMG_OFFSET_ADDR(psRGXFWDebugMapGuestHeapIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP *psRGXFWDebugMapGuestHeapOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP *)
+ IMG_OFFSET_ADDR(psRGXFWDebugMapGuestHeapOUT_UI8, 0);
+
+ psRGXFWDebugMapGuestHeapOUT->eError =
+ PVRSRVRGXFWDebugMapGuestHeapKM(psConnection, OSGetDevNode(psConnection),
+ psRGXFWDebugMapGuestHeapIN->ui32DriverID,
+ psRGXFWDebugMapGuestHeapIN->ui64ui64GuestHeapBase);
+
+ return 0;
+}
+
static IMG_INT
PVRSRVBridgeRGXFWDebugPHRConfigure(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT8 * psRGXFWDebugPHRConfigureIN_UI8,
return 0;
}
+#if defined(SUPPORT_VALIDATION)
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugInjectFault(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugInjectFaultIN_UI8,
+ IMG_UINT8 * psRGXFWDebugInjectFaultOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT *psRGXFWDebugInjectFaultIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT *)
+ IMG_OFFSET_ADDR(psRGXFWDebugInjectFaultIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT *psRGXFWDebugInjectFaultOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT *)
+ IMG_OFFSET_ADDR(psRGXFWDebugInjectFaultOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psRGXFWDebugInjectFaultIN);
+
+ psRGXFWDebugInjectFaultOUT->eError =
+ PVRSRVRGXFWDebugInjectFaultKM(psConnection, OSGetDevNode(psConnection));
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeRGXFWDebugInjectFault NULL
+#endif
+
/* ***************************************************************************
* Server bridge dispatch related glue
*/
PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL);
SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
- PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY,
- PVRSRVBridgeRGXFWDebugSetOSidPriority, NULL);
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY,
+ PVRSRVBridgeRGXFWDebugSetDriverPriority, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP,
+ PVRSRVBridgeRGXFWDebugSetDriverIsolationGroup, NULL);
SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE,
PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP,
+ PVRSRVBridgeRGXFWDebugMapGuestHeap, NULL);
+
SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE,
PVRSRVBridgeRGXFWDebugPHRConfigure, NULL);
SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME,
PVRSRVBridgeRGXCurrentTime, NULL);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT,
+ PVRSRVBridgeRGXFWDebugInjectFault, NULL);
+
return PVRSRV_OK;
}
PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE);
UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
- PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY);
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP);
UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE);
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP);
+
UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE);
UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME);
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT);
+
}
#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3
#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4
#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5
-#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5)
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFMUXCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+8)
/*******************************************
RGXCtrlHWPerf
typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG
{
IMG_UINT64 ui64Mask;
- IMG_BOOL bToggle;
IMG_UINT32 ui32StreamId;
+ IMG_BOOL bToggle;
} __packed PVRSRV_BRIDGE_IN_RGXCTRLHWPERF;
/* Bridge out structure for RGXCtrlHWPerf */
typedef struct PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS_TAG
{
IMG_UINT16 *pui16BlockIDs;
- IMG_BOOL bEnable;
IMG_UINT32 ui32ArrayLen;
+ IMG_BOOL bEnable;
} __packed PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS;
/* Bridge out structure for RGXControlHWPerfBlocks */
PVRSRV_ERROR eError;
} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS;
+/*******************************************
+ RGXGetConfiguredHWPerfMuxCounters
+ *******************************************/
+
+/* Bridge in structure for RGXGetConfiguredHWPerfMuxCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFMUXCOUNTERS_TAG
+{
+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters;
+ IMG_UINT32 ui32BlockID;
+} __packed PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFMUXCOUNTERS;
+
+/* Bridge out structure for RGXGetConfiguredHWPerfMuxCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFMUXCOUNTERS_TAG
+{
+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFMUXCOUNTERS;
+
+/*******************************************
+ RGXGetConfiguredHWPerfCounters
+ *******************************************/
+
+/* Bridge in structure for RGXGetConfiguredHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG
+{
+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters;
+ IMG_UINT32 ui32BlockID;
+} __packed PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS;
+
+/* Bridge out structure for RGXGetConfiguredHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG
+{
+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS;
+
+/*******************************************
+ RGXGetEnabledHWPerfBlocks
+ *******************************************/
+
+/* Bridge in structure for RGXGetEnabledHWPerfBlocks */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS_TAG
+{
+ IMG_UINT32 *pui32EnabledBlockIDs;
+ IMG_UINT32 ui32ArrayLen;
+} __packed PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS;
+
+/* Bridge out structure for RGXGetEnabledHWPerfBlocks */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS_TAG
+{
+ IMG_UINT32 *pui32EnabledBlockIDs;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32BlockCount;
+} __packed PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS;
+
#endif /* COMMON_RGXHWPERF_BRIDGE_H */
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXConfigMuxHWPerfCountersIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXControlHWPerfBlocksIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXConfigCustomCountersIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXConfigureHWPerfBlocksIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static_assert(1 <= IMG_UINT32_MAX, "1 must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXGetConfiguredHWPerfMuxCounters(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXGetConfiguredHWPerfMuxCountersIN_UI8,
+ IMG_UINT8 *
+ psRGXGetConfiguredHWPerfMuxCountersOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFMUXCOUNTERS *psRGXGetConfiguredHWPerfMuxCountersIN =
+ (PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFMUXCOUNTERS *)
+ IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfMuxCountersIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFMUXCOUNTERS *psRGXGetConfiguredHWPerfMuxCountersOUT
+ =
+ (PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFMUXCOUNTERS *)
+ IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfMuxCountersOUT_UI8, 0);
+
+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCountersInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) 1 * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) + 0;
+
+ psRGXGetConfiguredHWPerfMuxCountersOUT->psConfiguredMuxCounters =
+ psRGXGetConfiguredHWPerfMuxCountersIN->psConfiguredMuxCounters;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXGetConfiguredHWPerfMuxCountersOUT->eError =
+ PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXGetConfiguredHWPerfMuxCounters_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXGetConfiguredHWPerfMuxCountersIN),
+ sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer =
+ (IMG_BYTE *) (void *)psRGXGetConfiguredHWPerfMuxCountersIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXGetConfiguredHWPerfMuxCountersOUT->eError =
+ PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXGetConfiguredHWPerfMuxCounters_exit;
+ }
+ }
+ }
+
+ if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL)
+ {
+ psConfiguredMuxCountersInt =
+ (RGX_HWPERF_CONFIG_MUX_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer,
+ ui32NextOffset);
+ ui32NextOffset += 1 * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK);
+ }
+
+ psRGXGetConfiguredHWPerfMuxCountersOUT->eError =
+ PVRSRVRGXGetConfiguredHWPerfMuxCountersKM(psConnection, OSGetDevNode(psConnection),
+ psRGXGetConfiguredHWPerfMuxCountersIN->
+ ui32BlockID, psConfiguredMuxCountersInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXGetConfiguredHWPerfMuxCountersOUT->eError != PVRSRV_OK))
+ {
+ goto RGXGetConfiguredHWPerfMuxCounters_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((psConfiguredMuxCountersInt) && ((1 * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL,
+ (void __user *)psRGXGetConfiguredHWPerfMuxCountersOUT->
+ psConfiguredMuxCounters, psConfiguredMuxCountersInt,
+ (1 * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK))) != PVRSRV_OK))
+ {
+ psRGXGetConfiguredHWPerfMuxCountersOUT->eError =
+ PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXGetConfiguredHWPerfMuxCounters_exit;
+ }
+ }
+
+RGXGetConfiguredHWPerfMuxCounters_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXGetConfiguredHWPerfMuxCountersOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static_assert(1 <= IMG_UINT32_MAX, "1 must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXGetConfiguredHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXGetConfiguredHWPerfCountersIN_UI8,
+ IMG_UINT8 * psRGXGetConfiguredHWPerfCountersOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersIN =
+ (PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *)
+ IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersOUT =
+ (PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *)
+ IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersOUT_UI8, 0);
+
+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCountersInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0;
+
+ psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters =
+ psRGXGetConfiguredHWPerfCountersIN->psConfiguredCounters;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXGetConfiguredHWPerfCounters_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXGetConfiguredHWPerfCountersIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer =
+ (IMG_BYTE *) (void *)psRGXGetConfiguredHWPerfCountersIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXGetConfiguredHWPerfCountersOUT->eError =
+ PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXGetConfiguredHWPerfCounters_exit;
+ }
+ }
+ }
+
+ if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL)
+ {
+ psConfiguredCountersInt =
+ (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK);
+ }
+
+ psRGXGetConfiguredHWPerfCountersOUT->eError =
+ PVRSRVRGXGetConfiguredHWPerfCountersKM(psConnection, OSGetDevNode(psConnection),
+ psRGXGetConfiguredHWPerfCountersIN->ui32BlockID,
+ psConfiguredCountersInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXGetConfiguredHWPerfCountersOUT->eError != PVRSRV_OK))
+ {
+ goto RGXGetConfiguredHWPerfCounters_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((psConfiguredCountersInt) && ((1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL,
+ (void __user *)psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters,
+ psConfiguredCountersInt,
+ (1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK))) != PVRSRV_OK))
+ {
+ psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXGetConfiguredHWPerfCounters_exit;
+ }
+ }
+
+RGXGetConfiguredHWPerfCounters_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXGetConfiguredHWPerfCountersOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXGetEnabledHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXGetEnabledHWPerfBlocksIN_UI8,
+ IMG_UINT8 * psRGXGetEnabledHWPerfBlocksOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksIN =
+ (PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *)
+ IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksOUT =
+ (PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *)
+ IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksOUT_UI8, 0);
+
+ IMG_UINT32 *pui32EnabledBlockIDsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) + 0;
+
+ psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs =
+ psRGXGetEnabledHWPerfBlocksIN->pui32EnabledBlockIDs;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXGetEnabledHWPerfBlocks_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXGetEnabledHWPerfBlocksIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXGetEnabledHWPerfBlocksIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXGetEnabledHWPerfBlocks_exit;
+ }
+ }
+ }
+
+ if (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen != 0)
+ {
+ pui32EnabledBlockIDsInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32);
+ }
+
+ psRGXGetEnabledHWPerfBlocksOUT->eError =
+ PVRSRVRGXGetEnabledHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection),
+ psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen,
+ &psRGXGetEnabledHWPerfBlocksOUT->ui32BlockCount,
+ pui32EnabledBlockIDsInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXGetEnabledHWPerfBlocksOUT->eError != PVRSRV_OK))
+ {
+ goto RGXGetEnabledHWPerfBlocks_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((pui32EnabledBlockIDsInt) &&
+ ((psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs,
+ pui32EnabledBlockIDsInt,
+ (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32))) !=
+ PVRSRV_OK))
+ {
+ psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXGetEnabledHWPerfBlocks_exit;
+ }
+ }
+
+RGXGetEnabledHWPerfBlocks_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXGetEnabledHWPerfBlocksOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS,
PVRSRVBridgeRGXConfigureHWPerfBlocks, NULL);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFMUXCOUNTERS,
+ PVRSRVBridgeRGXGetConfiguredHWPerfMuxCounters, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS,
+ PVRSRVBridgeRGXGetConfiguredHWPerfCounters, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS,
+ PVRSRVBridgeRGXGetEnabledHWPerfBlocks, NULL);
+
return PVRSRV_OK;
}
UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS);
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFMUXCOUNTERS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS);
+
}
#include <linux/slab.h>
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
+
/* ***************************************************************************
* Server-side bridge entry points
*/
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXKickSync2IN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
{
/* Unreference the previously looked up handle */
- if (psUpdateUFODevVarBlockInt[i])
+ if (psUpdateUFODevVarBlockInt && psUpdateUFODevVarBlockInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hUpdateUFODevVarBlockInt2[i],
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
* Server bridge dispatch related glue
*/
+#endif /* SUPPORT_RGXKICKSYNC_BRIDGE */
+
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
void DeinitRGXKICKSYNCBridge(void);
PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY);
}
+#else /* SUPPORT_RGXKICKSYNC_BRIDGE */
+/* This bridge is conditional on SUPPORT_RGXKICKSYNC_BRIDGE - when not defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitRGXKICKSYNCBridge() \
+ PVRSRV_OK
+
+#define DeinitRGXKICKSYNCBridge()
+
+#endif /* SUPPORT_RGXKICKSYNC_BRIDGE */
IMG_HANDLE hMemCtxPrivData;
IMG_HANDLE hsFreeListPMR;
IMG_HANDLE hsGlobalFreeList;
- IMG_BOOL bbFreeListCheck;
IMG_UINT32 ui32GrowFLPages;
IMG_UINT32 ui32GrowParamThreshold;
IMG_UINT32 ui32InitFLPages;
IMG_UINT32 ui32MaxFLPages;
+ IMG_BOOL bbFreeListCheck;
} __packed PVRSRV_BRIDGE_IN_RGXCREATEFREELIST;
/* Bridge out structure for RGXCreateFreeList */
IMG_HANDLE hPrivData;
IMG_BYTE *pui8FrameworkCmd;
IMG_BYTE *pui8StaticRenderContextState;
+ IMG_INT32 i32Priority;
IMG_UINT32 ui32ContextFlags;
IMG_UINT32 ui32FrameworkCmdSize;
IMG_UINT32 ui32Max3DDeadlineMS;
IMG_UINT32 ui32MaxTADeadlineMS;
IMG_UINT32 ui32PackedCCBSizeU8888;
- IMG_UINT32 ui32Priority;
IMG_UINT32 ui32StaticRenderContextStateSize;
IMG_UINT32 ui32ui32CallStackDepth;
} __packed PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT;
typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG
{
IMG_HANDLE hRenderContext;
- IMG_UINT32 ui32Priority;
+ IMG_INT32 i32Priority;
} __packed PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY;
/* Bridge out structure for RGXSetRenderContextPriority */
IMG_HANDLE *phClientTAFenceSyncPrimBlock;
IMG_HANDLE *phClientTAUpdateSyncPrimBlock;
IMG_HANDLE *phSyncPMRs;
- IMG_BOOL bbAbort;
- IMG_BOOL bbKick3D;
- IMG_BOOL bbKickPR;
- IMG_BOOL bbKickTA;
PVRSRV_FENCE hCheckFence;
PVRSRV_FENCE hCheckFence3D;
PVRSRV_TIMELINE hUpdateTimeline;
IMG_UINT32 ui32RenderTargetSize;
IMG_UINT32 ui32SyncPMRCount;
IMG_UINT32 ui32TACmdSize;
+ IMG_BOOL bbAbort;
+ IMG_BOOL bbKick3D;
+ IMG_BOOL bbKickPR;
+ IMG_BOOL bbKickTA;
} __packed PVRSRV_BRIDGE_IN_RGXKICKTA3D2;
/* Bridge out structure for RGXKickTA3D2 */
"RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX");
static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX,
"RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX");
+static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX,
+ "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX");
static IMG_INT
PVRSRVBridgeRGXCreateHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSetIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
{
/* Unreference the previously looked up handle */
- if (psapsFreeListsInt[i])
+ if (psapsFreeListsInt && psapsFreeListsInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hapsFreeListsInt2[i],
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
psRGXCreateRenderContextOUT->eError =
PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevNode(psConnection),
- psRGXCreateRenderContextIN->ui32Priority,
+ psRGXCreateRenderContextIN->i32Priority,
psRGXCreateRenderContextIN->sVDMCallStackAddr,
psRGXCreateRenderContextIN->ui32ui32CallStackDepth,
psRGXCreateRenderContextIN->ui32FrameworkCmdSize,
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
psRGXSetRenderContextPriorityOUT->eError =
PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevNode(psConnection),
psRenderContextInt,
- psRGXSetRenderContextPriorityIN->ui32Priority);
+ psRGXSetRenderContextPriorityIN->i32Priority);
RGXSetRenderContextPriority_exit:
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
{
/* Unreference the previously looked up handle */
- if (psClientTAFenceSyncPrimBlockInt[i])
+ if (psClientTAFenceSyncPrimBlockInt && psClientTAFenceSyncPrimBlockInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hClientTAFenceSyncPrimBlockInt2[i],
{
/* Unreference the previously looked up handle */
- if (psClientTAUpdateSyncPrimBlockInt[i])
+ if (psClientTAUpdateSyncPrimBlockInt && psClientTAUpdateSyncPrimBlockInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hClientTAUpdateSyncPrimBlockInt2[i],
{
/* Unreference the previously looked up handle */
- if (psClient3DUpdateSyncPrimBlockInt[i])
+ if (psClient3DUpdateSyncPrimBlockInt && psClient3DUpdateSyncPrimBlockInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hClient3DUpdateSyncPrimBlockInt2[i],
{
/* Unreference the previously looked up handle */
- if (psSyncPMRsInt[i])
+ if (psSyncPMRsInt && psSyncPMRsInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hSyncPMRsInt2[i],
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT64 ui64RobustnessAddress;
IMG_HANDLE hPrivData;
IMG_BYTE *pui8FrameworkCmd;
+ IMG_INT32 i32Priority;
IMG_UINT32 ui32ContextFlags;
IMG_UINT32 ui32FrameworkCmdSize;
IMG_UINT32 ui32PackedCCBSizeU88;
- IMG_UINT32 ui32Priority;
} __packed PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT;
/* Bridge out structure for RGXTDMCreateTransferContext */
typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
{
IMG_HANDLE hTransferContext;
- IMG_UINT32 ui32Priority;
+ IMG_INT32 i32Priority;
} __packed PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY;
/* Bridge out structure for RGXTDMSetTransferContextPriority */
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
psRGXTDMCreateTransferContextOUT->eError =
PVRSRVRGXTDMCreateTransferContextKM(psConnection, OSGetDevNode(psConnection),
- psRGXTDMCreateTransferContextIN->ui32Priority,
+ psRGXTDMCreateTransferContextIN->i32Priority,
psRGXTDMCreateTransferContextIN->
ui32FrameworkCmdSize, ui8FrameworkCmdInt,
hPrivDataInt,
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
PVRSRVRGXTDMSetTransferContextPriorityKM(psConnection, OSGetDevNode(psConnection),
psTransferContextInt,
psRGXTDMSetTransferContextPriorityIN->
- ui32Priority);
+ i32Priority);
RGXTDMSetTransferContextPriority_exit:
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer2IN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
{
/* Unreference the previously looked up handle */
- if (psUpdateUFOSyncPrimBlockInt[i])
+ if (psUpdateUFOSyncPrimBlockInt && psUpdateUFOSyncPrimBlockInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hUpdateUFOSyncPrimBlockInt2[i],
{
/* Unreference the previously looked up handle */
- if (psSyncPMRsInt[i])
+ if (psSyncPMRsInt && psSyncPMRsInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hSyncPMRsInt2[i],
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
#define PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+1
#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+2
#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2 PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3
-#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4
-#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST (PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4)
+#define PVRSRV_BRIDGE_RGXTQ_RGXTQGETSHAREDMEMORY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTQ_RGXTQRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST (PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+6)
/*******************************************
RGXCreateTransferContext
IMG_UINT64 ui64RobustnessAddress;
IMG_HANDLE hPrivData;
IMG_BYTE *pui8FrameworkCmd;
+ IMG_INT32 i32Priority;
IMG_UINT32 ui32ContextFlags;
IMG_UINT32 ui32FrameworkCmdize;
IMG_UINT32 ui32PackedCCBSizeU8888;
- IMG_UINT32 ui32Priority;
} __packed PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT;
/* Bridge out structure for RGXCreateTransferContext */
typedef struct PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT_TAG
{
- IMG_HANDLE hCLIPMRMem;
IMG_HANDLE hTransferContext;
- IMG_HANDLE hUSCPMRMem;
PVRSRV_ERROR eError;
} __packed PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT;
typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY_TAG
{
IMG_HANDLE hTransferContext;
- IMG_UINT32 ui32Priority;
+ IMG_INT32 i32Priority;
} __packed PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY;
/* Bridge out structure for RGXSetTransferContextPriority */
PVRSRV_FENCE h3DUpdateFence;
} __packed PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2;
+/*******************************************
+ RGXTQGetSharedMemory
+ *******************************************/
+
+/* Bridge in structure for RGXTQGetSharedMemory */
+typedef struct PVRSRV_BRIDGE_IN_RGXTQGETSHAREDMEMORY_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXTQGETSHAREDMEMORY;
+
+/* Bridge out structure for RGXTQGetSharedMemory */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY_TAG
+{
+ IMG_HANDLE hCLIPMRMem;
+ IMG_HANDLE hUSCPMRMem;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY;
+
+/*******************************************
+ RGXTQReleaseSharedMemory
+ *******************************************/
+
+/* Bridge in structure for RGXTQReleaseSharedMemory */
+typedef struct PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY_TAG
+{
+ IMG_HANDLE hPMRMem;
+} __packed PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY;
+
+/* Bridge out structure for RGXTQReleaseSharedMemory */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY;
+
/*******************************************
RGXSetTransferContextProperty
*******************************************/
IMG_HANDLE hPrivData = psRGXCreateTransferContextIN->hPrivData;
IMG_HANDLE hPrivDataInt = NULL;
RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL;
- PMR *psCLIPMRMemInt = NULL;
- PMR *psUSCPMRMemInt = NULL;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
goto RGXCreateTransferContext_exit;
}
- psRGXCreateTransferContextOUT->hTransferContext = NULL;
-
if (ui64BufferSize > IMG_UINT32_MAX)
{
psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXCreateTransferContextIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
psRGXCreateTransferContextOUT->eError =
PVRSRVRGXCreateTransferContextKM(psConnection, OSGetDevNode(psConnection),
- psRGXCreateTransferContextIN->ui32Priority,
+ psRGXCreateTransferContextIN->i32Priority,
psRGXCreateTransferContextIN->ui32FrameworkCmdize,
ui8FrameworkCmdInt,
hPrivDataInt,
psRGXCreateTransferContextIN->ui32PackedCCBSizeU8888,
psRGXCreateTransferContextIN->ui32ContextFlags,
psRGXCreateTransferContextIN->ui64RobustnessAddress,
- &psTransferContextInt,
- &psCLIPMRMemInt, &psUSCPMRMemInt);
+ &psTransferContextInt);
/* Exit early if bridged call fails */
if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK))
{
goto RGXCreateTransferContext_exit;
}
- psRGXCreateTransferContextOUT->eError =
- PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
- &psRGXCreateTransferContextOUT->hCLIPMRMem,
- (void *)psCLIPMRMemInt,
- PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
- PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
- psRGXCreateTransferContextOUT->hTransferContext);
- if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK))
- {
- UnlockHandle(psConnection->psHandleBase);
- goto RGXCreateTransferContext_exit;
- }
-
- psRGXCreateTransferContextOUT->eError =
- PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
- &psRGXCreateTransferContextOUT->hUSCPMRMem,
- (void *)psUSCPMRMemInt,
- PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
- PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
- psRGXCreateTransferContextOUT->hTransferContext);
- if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK))
- {
- UnlockHandle(psConnection->psHandleBase);
- goto RGXCreateTransferContext_exit;
- }
-
/* Release now we have created handles. */
UnlockHandle(psConnection->psHandleBase);
if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
{
- if (psRGXCreateTransferContextOUT->hTransferContext)
- {
- PVRSRV_ERROR eError;
-
- /* Lock over handle creation cleanup. */
- LockHandle(psConnection->psHandleBase);
-
- eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase,
- (IMG_HANDLE)
- psRGXCreateTransferContextOUT->
- hTransferContext,
- PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
- if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: %s", __func__, PVRSRVGetErrorString(eError)));
- }
- /* Releasing the handle should free/destroy/release the resource.
- * This should never fail... */
- PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
-
- /* Avoid freeing/destroying/releasing the resource a second time below */
- psTransferContextInt = NULL;
- /* Release now we have cleaned up creation handles. */
- UnlockHandle(psConnection->psHandleBase);
-
- }
-
if (psTransferContextInt)
{
PVRSRVRGXDestroyTransferContextKM(psTransferContextInt);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
psRGXSetTransferContextPriorityOUT->eError =
PVRSRVRGXSetTransferContextPriorityKM(psConnection, OSGetDevNode(psConnection),
psTransferContextInt,
- psRGXSetTransferContextPriorityIN->ui32Priority);
+ psRGXSetTransferContextPriorityIN->i32Priority);
RGXSetTransferContextPriority_exit:
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
IMG_BYTE *pArrayArgsBuffer2 = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
{
ui64BufferSize +=
- psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
- ui64BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_HANDLE **);
- ui64BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *);
- ui64BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *);
- ui64BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT8 *);
+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK **));
+ ui64BufferSize +=
+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_HANDLE **));
+ ui64BufferSize +=
+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *));
+ ui64BufferSize +=
+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *));
+ ui64BufferSize +=
+ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT8 *));
}
if (unlikely(psRGXSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS))
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRGXSubmitTransfer2IN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++)
{
ui64BufferSize +=
- ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
- ui64BufferSize += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *);
- ui64BufferSize += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
- ui64BufferSize += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
- ui64BufferSize += ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+ ((IMG_UINT64) ui32ClientUpdateCountInt[i] *
+ sizeof(SYNC_PRIMITIVE_BLOCK *));
+ ui64BufferSize +=
+ ((IMG_UINT64) ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *));
+ ui64BufferSize +=
+ ((IMG_UINT64) ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32));
+ ui64BufferSize +=
+ ((IMG_UINT64) ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32));
+ ui64BufferSize += ((IMG_UINT64) ui32CommandSizeInt[i] * sizeof(IMG_UINT8));
}
if (ui64BufferSize > IMG_UINT32_MAX)
{
{
/* Unreference the previously looked up handle */
- if (psUpdateUFOSyncPrimBlockInt[i][j])
+ if (psUpdateUFOSyncPrimBlockInt && psUpdateUFOSyncPrimBlockInt[i]
+ && psUpdateUFOSyncPrimBlockInt[i][j])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hUpdateUFOSyncPrimBlockInt2[i]
{
/* Unreference the previously looked up handle */
- if (psSyncPMRsInt[i])
+ if (psSyncPMRsInt && psSyncPMRsInt[i])
{
PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
hSyncPMRsInt2[i],
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
/* Allocated space should be equal to the last updated offset */
return 0;
}
+static PVRSRV_ERROR _RGXTQGetSharedMemorypsCLIPMRMemIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVRGXTQReleaseSharedMemoryKM((PMR *) pvData);
+ return eError;
+}
+
+static PVRSRV_ERROR _RGXTQGetSharedMemorypsUSCPMRMemIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVRGXTQReleaseSharedMemoryKM((PMR *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTQGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXTQGetSharedMemoryIN_UI8,
+ IMG_UINT8 * psRGXTQGetSharedMemoryOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXTQGETSHAREDMEMORY *psRGXTQGetSharedMemoryIN =
+ (PVRSRV_BRIDGE_IN_RGXTQGETSHAREDMEMORY *) IMG_OFFSET_ADDR(psRGXTQGetSharedMemoryIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY *psRGXTQGetSharedMemoryOUT =
+ (PVRSRV_BRIDGE_OUT_RGXTQGETSHAREDMEMORY *)
+ IMG_OFFSET_ADDR(psRGXTQGetSharedMemoryOUT_UI8, 0);
+
+ PMR *psCLIPMRMemInt = NULL;
+ PMR *psUSCPMRMemInt = NULL;
+
+ PVR_UNREFERENCED_PARAMETER(psRGXTQGetSharedMemoryIN);
+
+ psRGXTQGetSharedMemoryOUT->eError =
+ PVRSRVRGXTQGetSharedMemoryKM(psConnection, OSGetDevNode(psConnection),
+ &psCLIPMRMemInt, &psUSCPMRMemInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXTQGetSharedMemoryOUT->eError != PVRSRV_OK))
+ {
+ goto RGXTQGetSharedMemory_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXTQGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRGXTQGetSharedMemoryOUT->
+ hCLIPMRMem,
+ (void *)psCLIPMRMemInt,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXTQGetSharedMemorypsCLIPMRMemIntRelease);
+ if (unlikely(psRGXTQGetSharedMemoryOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTQGetSharedMemory_exit;
+ }
+
+ psRGXTQGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRGXTQGetSharedMemoryOUT->
+ hUSCPMRMem,
+ (void *)psUSCPMRMemInt,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXTQGetSharedMemorypsUSCPMRMemIntRelease);
+ if (unlikely(psRGXTQGetSharedMemoryOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTQGetSharedMemory_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXTQGetSharedMemory_exit:
+
+ if (psRGXTQGetSharedMemoryOUT->eError != PVRSRV_OK)
+ {
+ if (psCLIPMRMemInt)
+ {
+ PVRSRVRGXTQReleaseSharedMemoryKM(psCLIPMRMemInt);
+ }
+ if (psUSCPMRMemInt)
+ {
+ PVRSRVRGXTQReleaseSharedMemoryKM(psUSCPMRMemInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTQReleaseSharedMemory(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXTQReleaseSharedMemoryIN_UI8,
+ IMG_UINT8 * psRGXTQReleaseSharedMemoryOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY *psRGXTQReleaseSharedMemoryIN =
+ (PVRSRV_BRIDGE_IN_RGXTQRELEASESHAREDMEMORY *)
+ IMG_OFFSET_ADDR(psRGXTQReleaseSharedMemoryIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY *psRGXTQReleaseSharedMemoryOUT =
+ (PVRSRV_BRIDGE_OUT_RGXTQRELEASESHAREDMEMORY *)
+ IMG_OFFSET_ADDR(psRGXTQReleaseSharedMemoryOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXTQReleaseSharedMemoryOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXTQReleaseSharedMemoryIN->hPMRMem,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE);
+ if (unlikely((psRGXTQReleaseSharedMemoryOUT->eError != PVRSRV_OK) &&
+ (psRGXTQReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psRGXTQReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psRGXTQReleaseSharedMemoryOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTQReleaseSharedMemory_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXTQReleaseSharedMemory_exit:
+
+ return 0;
+}
+
static IMG_INT
PVRSRVBridgeRGXSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT8 * psRGXSetTransferContextPropertyIN_UI8,
SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2,
PVRSRVBridgeRGXSubmitTransfer2, NULL);
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXTQGETSHAREDMEMORY,
+ PVRSRVBridgeRGXTQGetSharedMemory, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXTQRELEASESHAREDMEMORY,
+ PVRSRVBridgeRGXTQReleaseSharedMemory, NULL);
+
SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY,
PVRSRVBridgeRGXSetTransferContextProperty, NULL);
UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2);
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXTQGETSHAREDMEMORY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXTQRELEASESHAREDMEMORY);
+
UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ,
PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY);
{
PVRSRV_ERROR eError;
RI_HANDLE psRIHandleInt = NULL;
- PVR_UNREFERENCED_PARAMETER(hBridge);
eError =
- RIWriteProcListEntryKM(ui32TextBSize, puiTextB, ui64Size, ui64DevVAddr, &psRIHandleInt);
+ RIWriteProcListEntryKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32TextBSize, puiTextB, ui64Size, ui64DevVAddr, &psRIHandleInt);
*phRIHandle = psRIHandleInt;
return eError;
IMG_UINT64 ui64Size;
IMG_HANDLE hPMRHandle;
const IMG_CHAR *puiTextB;
+ IMG_UINT32 ui32TextBSize;
IMG_BOOL bIsImport;
IMG_BOOL bIsSuballoc;
- IMG_UINT32 ui32TextBSize;
} __packed PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY;
/* Bridge out structure for RIWriteMEMDESCEntry */
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
}
psRIWriteProcListEntryOUT->eError =
- RIWriteProcListEntryKM(psRIWriteProcListEntryIN->ui32TextBSize,
+ RIWriteProcListEntryKM(psConnection, OSGetDevNode(psConnection),
+ psRIWriteProcListEntryIN->ui32TextBSize,
uiTextBInt,
psRIWriteProcListEntryIN->ui64Size,
psRIWriteProcListEntryIN->ui64DevVAddr, &psRIHandleInt);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
/* Bridge in structure for FindProcessMemStats */
typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG
{
- IMG_UINT32 *pui32MemStatsArray;
- IMG_BOOL bbAllProcessStats;
+ IMG_UINT64 *pui64MemStatsArray;
IMG_UINT32 ui32ArrSize;
IMG_UINT32 ui32PID;
+ IMG_BOOL bbAllProcessStats;
} __packed PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS;
/* Bridge out structure for FindProcessMemStats */
typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG
{
- IMG_UINT32 *pui32MemStatsArray;
+ IMG_UINT64 *pui64MemStatsArray;
PVRSRV_ERROR eError;
} __packed PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psAlignmentCheckIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
return 0;
}
+static_assert(8 <= IMG_UINT32_MAX, "8 must not be larger than IMG_UINT32_MAX");
+
static IMG_INT
PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT8 * psGetMultiCoreInfoIN_UI8,
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psGetMultiCoreInfoIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
return 0;
}
+static_assert(PVRSRV_PROCESS_STAT_TYPE_COUNT <= IMG_UINT32_MAX,
+ "PVRSRV_PROCESS_STAT_TYPE_COUNT must not be larger than IMG_UINT32_MAX");
+
static IMG_INT
PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry,
IMG_UINT8 * psFindProcessMemStatsIN_UI8,
(PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsOUT_UI8,
0);
- IMG_UINT32 *pui32MemStatsArrayInt = NULL;
+ IMG_UINT64 *pui64MemStatsArrayInt = NULL;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
- ((IMG_UINT64) psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) + 0;
+ ((IMG_UINT64) psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64)) + 0;
if (psFindProcessMemStatsIN->ui32ArrSize > PVRSRV_PROCESS_STAT_TYPE_COUNT)
{
PVR_UNREFERENCED_PARAMETER(psConnection);
- psFindProcessMemStatsOUT->pui32MemStatsArray = psFindProcessMemStatsIN->pui32MemStatsArray;
+ psFindProcessMemStatsOUT->pui64MemStatsArray = psFindProcessMemStatsIN->pui64MemStatsArray;
if (ui64BufferSize > IMG_UINT32_MAX)
{
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psFindProcessMemStatsIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
if (psFindProcessMemStatsIN->ui32ArrSize != 0)
{
- pui32MemStatsArrayInt =
- (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
- ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32);
+ pui64MemStatsArrayInt =
+ (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64);
}
psFindProcessMemStatsOUT->eError =
PVRSRVFindProcessMemStatsKM(psFindProcessMemStatsIN->ui32PID,
psFindProcessMemStatsIN->ui32ArrSize,
psFindProcessMemStatsIN->bbAllProcessStats,
- pui32MemStatsArrayInt);
+ pui64MemStatsArrayInt);
/* Exit early if bridged call fails */
if (unlikely(psFindProcessMemStatsOUT->eError != PVRSRV_OK))
{
}
/* If dest ptr is non-null and we have data to copy */
- if ((pui32MemStatsArrayInt) &&
- ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) > 0))
+ if ((pui64MemStatsArrayInt) &&
+ ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64)) > 0))
{
if (unlikely
(OSCopyToUser
- (NULL, (void __user *)psFindProcessMemStatsOUT->pui32MemStatsArray,
- pui32MemStatsArrayInt,
- (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32))) != PVRSRV_OK))
+ (NULL, (void __user *)psFindProcessMemStatsOUT->pui64MemStatsArray,
+ pui64MemStatsArrayInt,
+ (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64))) != PVRSRV_OK))
{
psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
typedef struct PVRSRV_BRIDGE_IN_SYNCALLOCEVENT_TAG
{
const IMG_CHAR *puiClassName;
- IMG_BOOL bServerSync;
IMG_UINT32 ui32ClassNameSize;
IMG_UINT32 ui32FWAddr;
+ IMG_BOOL bServerSync;
} __packed PVRSRV_BRIDGE_IN_SYNCALLOCEVENT;
/* Bridge out structure for SyncAllocEvent */
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psSyncAllocEventIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psSyncFbTimelineCreatePVRIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psSyncFbFenceMergeIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psSyncFbFenceDumpIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psSyncFbTimelineCreateSWIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psSyncFbFenceCreateSWIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
{
IMG_HANDLE hhServerSyncPrimBlock;
const IMG_CHAR *puiClassName;
- IMG_BOOL bbServerSync;
IMG_UINT32 ui32ClassNameSize;
IMG_UINT32 ui32ui32FwBlockAddr;
IMG_UINT32 ui32ui32SyncOffset;
+ IMG_BOOL bbServerSync;
} __packed PVRSRV_BRIDGE_IN_SYNCRECORDADD;
/* Bridge out structure for SyncRecordAdd */
IMG_UINT32 ui32NextOffset = 0;
IMG_BYTE *pArrayArgsBuffer = NULL;
-#if !defined(INTEGRITY_OS)
IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
-#endif
IMG_UINT32 ui32BufferSize = 0;
IMG_UINT64 ui64BufferSize =
if (ui32BufferSize != 0)
{
-#if !defined(INTEGRITY_OS)
/* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
IMG_UINT32 ui32InBufferOffset =
PVR_ALIGN(sizeof(*psSyncRecordAddIN), sizeof(unsigned long));
pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
}
else
-#endif
{
pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
PVR_ASSERT(ui32BufferSize == ui32NextOffset);
#endif /* PVRSRV_NEED_PVR_ASSERT */
-#if defined(INTEGRITY_OS)
- if (pArrayArgsBuffer)
-#else
if (!bHaveEnoughSpace && pArrayArgsBuffer)
-#endif
OSFreeMemNoStats(pArrayArgsBuffer);
return 0;
--- /dev/null
+/*******************************************************************************
+@File
+@Title Client bridge header for cache
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for cache
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_CACHE_BRIDGE_H
+#define CLIENT_CACHE_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_cache_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpQueue(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumCacheOps,
+ IMG_HANDLE * phPMR,
+ IMG_UINT64 * pui64Address,
+ IMG_DEVMEM_OFFSET_T * puiOffset,
+ IMG_DEVMEM_SIZE_T * puiSize,
+ PVRSRV_CACHE_OP * piuCacheOp,
+ IMG_UINT32 ui32OpTimeline);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpExec(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_UINT64 ui64Address,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize, PVRSRV_CACHE_OP iuCacheOp);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpLog(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_UINT64 ui64Address,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_INT64 i64StartTime,
+ IMG_INT64 i64EndTime, PVRSRV_CACHE_OP iuCacheOp);
+
+#endif /* CLIENT_CACHE_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Direct client bridge for cache
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the client side of the bridge for cache
+ which is used in calls from Server context.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_cache_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "cache_ops.h"
+
+#include "cache_km.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpQueue(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumCacheOps,
+ IMG_HANDLE * phPMR,
+ IMG_UINT64 * pui64Address,
+ IMG_DEVMEM_OFFSET_T * puiOffset,
+ IMG_DEVMEM_SIZE_T * puiSize,
+ PVRSRV_CACHE_OP * piuCacheOp,
+ IMG_UINT32 ui32OpTimeline)
+{
+ PVRSRV_ERROR eError;
+ PMR **psPMRInt;
+
+ psPMRInt = (PMR **) phPMR;
+
+ eError =
+ CacheOpQueue(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32NumCacheOps,
+ psPMRInt, pui64Address, puiOffset, puiSize, piuCacheOp, ui32OpTimeline);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpExec(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_UINT64 ui64Address,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize, PVRSRV_CACHE_OP iuCacheOp)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError = CacheOpValExec(psPMRInt, ui64Address, uiOffset, uiSize, iuCacheOp);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpLog(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_UINT64 ui64Address,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_INT64 i64StartTime,
+ IMG_INT64 i64EndTime, PVRSRV_CACHE_OP iuCacheOp)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ CacheOpLog(psPMRInt,
+ ui64Address, uiOffset, uiSize, i64StartTime, i64EndTime, iuCacheOp);
+
+ return eError;
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for cache
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for cache
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_CACHE_BRIDGE_H
+#define COMMON_CACHE_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "cache_ops.h"
+
+#define PVRSRV_BRIDGE_CACHE_CMD_FIRST 0
+#define PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE PVRSRV_BRIDGE_CACHE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CACHE_CACHEOPEXEC PVRSRV_BRIDGE_CACHE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CACHE_CACHEOPLOG PVRSRV_BRIDGE_CACHE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CACHE_CMD_LAST (PVRSRV_BRIDGE_CACHE_CMD_FIRST+2)
+
+/*******************************************
+ CacheOpQueue
+ *******************************************/
+
+/* Bridge in structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPQUEUE_TAG
+{
+ PVRSRV_CACHE_OP *piuCacheOp;
+ IMG_UINT64 *pui64Address;
+ IMG_DEVMEM_OFFSET_T *puiOffset;
+ IMG_DEVMEM_SIZE_T *puiSize;
+ IMG_HANDLE *phPMR;
+ IMG_UINT32 ui32NumCacheOps;
+ IMG_UINT32 ui32OpTimeline;
+} __packed PVRSRV_BRIDGE_IN_CACHEOPQUEUE;
+
+/* Bridge out structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPQUEUE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_CACHEOPQUEUE;
+
+/*******************************************
+ CacheOpExec
+ *******************************************/
+
+/* Bridge in structure for CacheOpExec */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPEXEC_TAG
+{
+ IMG_UINT64 ui64Address;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_HANDLE hPMR;
+ PVRSRV_CACHE_OP iuCacheOp;
+} __packed PVRSRV_BRIDGE_IN_CACHEOPEXEC;
+
+/* Bridge out structure for CacheOpExec */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPEXEC_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_CACHEOPEXEC;
+
+/*******************************************
+ CacheOpLog
+ *******************************************/
+
+/* Bridge in structure for CacheOpLog */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPLOG_TAG
+{
+ IMG_INT64 i64EndTime;
+ IMG_INT64 i64StartTime;
+ IMG_UINT64 ui64Address;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_HANDLE hPMR;
+ PVRSRV_CACHE_OP iuCacheOp;
+} __packed PVRSRV_BRIDGE_IN_CACHEOPLOG;
+
+/* Bridge out structure for CacheOpLog */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPLOG_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_CACHEOPLOG;
+
+#endif /* COMMON_CACHE_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for cache
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for cache
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "cache_km.h"
+
+#include "common_cache_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static_assert(CACHE_BATCH_MAX <= IMG_UINT32_MAX,
+ "CACHE_BATCH_MAX must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psCacheOpQueueIN_UI8,
+ IMG_UINT8 * psCacheOpQueueOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_CACHEOPQUEUE *psCacheOpQueueIN =
+ (PVRSRV_BRIDGE_IN_CACHEOPQUEUE *) IMG_OFFSET_ADDR(psCacheOpQueueIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *psCacheOpQueueOUT =
+ (PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *) IMG_OFFSET_ADDR(psCacheOpQueueOUT_UI8, 0);
+
+ PMR **psPMRInt = NULL;
+ IMG_HANDLE *hPMRInt2 = NULL;
+ IMG_UINT64 *ui64AddressInt = NULL;
+ IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL;
+ IMG_DEVMEM_SIZE_T *uiSizeInt = NULL;
+ PVRSRV_CACHE_OP *iuCacheOpInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)) +
+ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) +
+ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) +
+ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) +
+ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) +
+ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) + 0;
+
+ if (unlikely(psCacheOpQueueIN->ui32NumCacheOps > CACHE_BATCH_MAX))
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto CacheOpQueue_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto CacheOpQueue_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psCacheOpQueueIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto CacheOpQueue_exit;
+ }
+ }
+ }
+
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ psPMRInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psPMRInt, 0, psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *));
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *);
+ hPMRInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hPMRInt2, (const void __user *)psCacheOpQueueIN->phPMR,
+ psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ ui64AddressInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui64AddressInt, (const void __user *)psCacheOpQueueIN->pui64Address,
+ psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) != PVRSRV_OK)
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ uiOffsetInt =
+ (IMG_DEVMEM_OFFSET_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiOffsetInt, (const void __user *)psCacheOpQueueIN->puiOffset,
+ psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK)
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ uiSizeInt = (IMG_DEVMEM_SIZE_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiSizeInt, (const void __user *)psCacheOpQueueIN->puiSize,
+ psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK)
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ iuCacheOpInt =
+ (PVRSRV_CACHE_OP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, iuCacheOpInt, (const void __user *)psCacheOpQueueIN->piuCacheOp,
+ psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) != PVRSRV_OK)
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++)
+ {
+ /* Look up the address from the handle */
+ psCacheOpQueueOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt[i],
+ hPMRInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psCacheOpQueueOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto CacheOpQueue_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psCacheOpQueueOUT->eError =
+ CacheOpQueue(psConnection, OSGetDevNode(psConnection),
+ psCacheOpQueueIN->ui32NumCacheOps,
+ psPMRInt,
+ ui64AddressInt,
+ uiOffsetInt, uiSizeInt, iuCacheOpInt, psCacheOpQueueIN->ui32OpTimeline);
+
+CacheOpQueue_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ if (hPMRInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt && psPMRInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psCacheOpQueueOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeCacheOpExec(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psCacheOpExecIN_UI8,
+ IMG_UINT8 * psCacheOpExecOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_CACHEOPEXEC *psCacheOpExecIN =
+ (PVRSRV_BRIDGE_IN_CACHEOPEXEC *) IMG_OFFSET_ADDR(psCacheOpExecIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_CACHEOPEXEC *psCacheOpExecOUT =
+ (PVRSRV_BRIDGE_OUT_CACHEOPEXEC *) IMG_OFFSET_ADDR(psCacheOpExecOUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psCacheOpExecIN->hPMR;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psCacheOpExecOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psCacheOpExecOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto CacheOpExec_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psCacheOpExecOUT->eError =
+ CacheOpValExec(psPMRInt,
+ psCacheOpExecIN->ui64Address,
+ psCacheOpExecIN->uiOffset,
+ psCacheOpExecIN->uiSize, psCacheOpExecIN->iuCacheOp);
+
+CacheOpExec_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeCacheOpLog(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psCacheOpLogIN_UI8,
+ IMG_UINT8 * psCacheOpLogOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_CACHEOPLOG *psCacheOpLogIN =
+ (PVRSRV_BRIDGE_IN_CACHEOPLOG *) IMG_OFFSET_ADDR(psCacheOpLogIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_CACHEOPLOG *psCacheOpLogOUT =
+ (PVRSRV_BRIDGE_OUT_CACHEOPLOG *) IMG_OFFSET_ADDR(psCacheOpLogOUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psCacheOpLogIN->hPMR;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psCacheOpLogOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psCacheOpLogOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto CacheOpLog_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psCacheOpLogOUT->eError =
+ CacheOpLog(psPMRInt,
+ psCacheOpLogIN->ui64Address,
+ psCacheOpLogIN->uiOffset,
+ psCacheOpLogIN->uiSize,
+ psCacheOpLogIN->i64StartTime,
+ psCacheOpLogIN->i64EndTime, psCacheOpLogIN->iuCacheOp);
+
+CacheOpLog_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitCACHEBridge(void);
+void DeinitCACHEBridge(void);
+
+/*
+ * Register all CACHE functions with services
+ */
+PVRSRV_ERROR InitCACHEBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE,
+ PVRSRVBridgeCacheOpQueue, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC,
+ PVRSRVBridgeCacheOpExec, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG,
+ PVRSRVBridgeCacheOpLog, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cache functions with services
+ */
+void DeinitCACHEBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for cmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for cmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_CMM_BRIDGE_H
+#define COMMON_CMM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+
+#define PVRSRV_BRIDGE_CMM_CMD_FIRST 0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX PVRSRV_BRIDGE_CMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CMM_CMD_LAST (PVRSRV_BRIDGE_CMM_CMD_FIRST+2)
+
+/*******************************************
+ DevmemIntExportCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntExportCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX_TAG
+{
+ IMG_HANDLE hContext;
+ IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX;
+
+/* Bridge out structure for DevmemIntExportCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX_TAG
+{
+ IMG_HANDLE hContextExport;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX;
+
+/*******************************************
+ DevmemIntUnexportCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnexportCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX_TAG
+{
+ IMG_HANDLE hContextExport;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX;
+
+/* Bridge out structure for DevmemIntUnexportCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX;
+
+/*******************************************
+ DevmemIntAcquireRemoteCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntAcquireRemoteCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX_TAG
+{
+ IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX;
+
+/* Bridge out structure for DevmemIntAcquireRemoteCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX_TAG
+{
+ IMG_HANDLE hContext;
+ IMG_HANDLE hPrivData;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX;
+
+#endif /* COMMON_CMM_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for cmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for cmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pmr.h"
+#include "devicemem_server.h"
+
+#include "common_cmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _DevmemIntExportCtxpsContextExportIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = DevmemIntUnexportCtx((DEVMEMINT_CTX_EXPORT *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntExportCtx(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntExportCtxIN_UI8,
+ IMG_UINT8 * psDevmemIntExportCtxOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntExportCtxIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntExportCtxOUT_UI8,
+ 0);
+
+ IMG_HANDLE hContext = psDevmemIntExportCtxIN->hContext;
+ DEVMEMINT_CTX *psContextInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntExportCtxIN->hPMR;
+ PMR *psPMRInt = NULL;
+ DEVMEMINT_CTX_EXPORT *psContextExportInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemIntExportCtxOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psContextInt,
+ hContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+ if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntExportCtx_exit;
+ }
+
+ /* Look up the address from the handle */
+ psDevmemIntExportCtxOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntExportCtx_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemIntExportCtxOUT->eError =
+ DevmemIntExportCtx(psContextInt, psPMRInt, &psContextExportInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK))
+ {
+ goto DevmemIntExportCtx_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemIntExportCtxOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psDevmemIntExportCtxOUT->
+ hContextExport,
+ (void *)psContextExportInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ (PFN_HANDLE_RELEASE) &
+ _DevmemIntExportCtxpsContextExportIntRelease);
+ if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntExportCtx_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntExportCtx_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+ {
+ if (psContextExportInt)
+ {
+ LockHandle(KERNEL_HANDLE_BASE);
+ DevmemIntUnexportCtx(psContextExportInt);
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnexportCtx(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntUnexportCtxIN_UI8,
+ IMG_UINT8 * psDevmemIntUnexportCtxOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntUnexportCtxIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *)
+ IMG_OFFSET_ADDR(psDevmemIntUnexportCtxOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemIntUnexportCtxOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntUnexportCtxIN->hContextExport,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+ if (unlikely((psDevmemIntUnexportCtxOUT->eError != PVRSRV_OK) &&
+ (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psDevmemIntUnexportCtxOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntUnexportCtx_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntUnexportCtx_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _DevmemIntAcquireRemoteCtxpsContextIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntAcquireRemoteCtxIN_UI8,
+ IMG_UINT8 * psDevmemIntAcquireRemoteCtxOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *)
+ IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *)
+ IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxOUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psDevmemIntAcquireRemoteCtxIN->hPMR;
+ PMR *psPMRInt = NULL;
+ DEVMEMINT_CTX *psContextInt = NULL;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+ psDevmemIntAcquireRemoteCtxOUT->hContext = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemIntAcquireRemoteCtxOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntAcquireRemoteCtx_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemIntAcquireRemoteCtxOUT->eError =
+ DevmemIntAcquireRemoteCtx(psPMRInt, &psContextInt, &hPrivDataInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK))
+ {
+ goto DevmemIntAcquireRemoteCtx_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemIntAcquireRemoteCtxOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psDevmemIntAcquireRemoteCtxOUT->hContext,
+ (void *)psContextInt, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ (PFN_HANDLE_RELEASE) &
+ _DevmemIntAcquireRemoteCtxpsContextIntRelease);
+ if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntAcquireRemoteCtx_exit;
+ }
+
+ psDevmemIntAcquireRemoteCtxOUT->eError =
+ PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+ &psDevmemIntAcquireRemoteCtxOUT->hPrivData,
+ (void *)hPrivDataInt, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psDevmemIntAcquireRemoteCtxOUT->hContext);
+ if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntAcquireRemoteCtx_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntAcquireRemoteCtx_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+ {
+ if (psDevmemIntAcquireRemoteCtxOUT->hContext)
+ {
+ PVRSRV_ERROR eError;
+
+ /* Lock over handle creation cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE)
+ psDevmemIntAcquireRemoteCtxOUT->
+ hContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psContextInt = NULL;
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ }
+
+ if (psContextInt)
+ {
+ DevmemIntCtxDestroy(psContextInt);
+ }
+ }
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+#endif /* EXCLUDE_CMM_BRIDGE */
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+PVRSRV_ERROR InitCMMBridge(void);
+void DeinitCMMBridge(void);
+
+/*
+ * Register all CMM functions with services
+ */
+PVRSRV_ERROR InitCMMBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX,
+ PVRSRVBridgeDevmemIntExportCtx, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX,
+ PVRSRVBridgeDevmemIntUnexportCtx, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX,
+ PVRSRVBridgeDevmemIntAcquireRemoteCtx, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cmm functions with services
+ */
+void DeinitCMMBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX);
+
+}
+#else /* EXCLUDE_CMM_BRIDGE */
+/* This bridge is conditional on EXCLUDE_CMM_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitCMMBridge() \
+ PVRSRV_OK
+
+#define DeinitCMMBridge()
+
+#endif /* EXCLUDE_CMM_BRIDGE */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Client bridge header for devicememhistory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for devicememhistory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+#define CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_devicememhistory_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR * puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 * pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR * puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 * pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR * puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 * pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR * puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 * pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR * puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 * pui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 * pui32FreePageIndices,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 * pui32AllocationIndexOut);
+
+#endif /* CLIENT_DEVICEMEMHISTORY_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Direct client bridge for devicememhistory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the client side of the bridge for devicememhistory
+ which is used in calls from Server context.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_devicememhistory_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "img_types.h"
+#include "img_defs.h"
+#include "devicemem_typedefs.h"
+
+#include "pmr.h"
+#include "devicemem_history_server.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR * puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 * pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevicememHistoryMapKM(psPMRInt,
+ uiOffset,
+ sDevVAddr,
+ uiSize,
+ puiText,
+ ui32Log2PageSize, ui32AllocationIndex, pui32AllocationIndexOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR * puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 * pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevicememHistoryUnmapKM(psPMRInt,
+ uiOffset,
+ sDevVAddr,
+ uiSize,
+ puiText,
+ ui32Log2PageSize, ui32AllocationIndex, pui32AllocationIndexOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR * puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 * pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ DevicememHistoryMapVRangeKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ sBaseDevVAddr,
+ ui32ui32StartPage,
+ ui32NumPages,
+ uiAllocSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocationIndex, pui32AllocationIndexOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR * puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 * pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ DevicememHistoryUnmapVRangeKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ sBaseDevVAddr,
+ ui32ui32StartPage,
+ ui32NumPages,
+ uiAllocSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocationIndex, pui32AllocationIndexOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR * puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 * pui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 * pui32FreePageIndices,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 * pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevicememHistorySparseChangeKM(psPMRInt,
+ uiOffset,
+ sDevVAddr,
+ uiSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocPageCount,
+ pui32AllocPageIndices,
+ ui32FreePageCount,
+ pui32FreePageIndices,
+ ui32AllocationIndex, pui32AllocationIndexOut);
+
+ return eError;
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for devicememhistory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for devicememhistory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DEVICEMEMHISTORY_BRIDGE_H
+#define COMMON_DEVICEMEMHISTORY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "devicemem_typedefs.h"
+
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST 0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4)
+
+/*******************************************
+ DevicememHistoryMap
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP_TAG
+{
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_HANDLE hPMR;
+ const IMG_CHAR *puiText;
+ IMG_UINT32 ui32AllocationIndex;
+ IMG_UINT32 ui32Log2PageSize;
+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP;
+
+/* Bridge out structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32AllocationIndexOut;
+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP;
+
+/*******************************************
+ DevicememHistoryUnmap
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP_TAG
+{
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_HANDLE hPMR;
+ const IMG_CHAR *puiText;
+ IMG_UINT32 ui32AllocationIndex;
+ IMG_UINT32 ui32Log2PageSize;
+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP;
+
+/* Bridge out structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32AllocationIndexOut;
+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP;
+
+/*******************************************
+ DevicememHistoryMapVRange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMapVRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE_TAG
+{
+ IMG_DEV_VIRTADDR sBaseDevVAddr;
+ IMG_DEVMEM_SIZE_T uiAllocSize;
+ const IMG_CHAR *puiText;
+ IMG_UINT32 ui32AllocationIndex;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32NumPages;
+ IMG_UINT32 ui32ui32StartPage;
+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE;
+
+/* Bridge out structure for DevicememHistoryMapVRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32AllocationIndexOut;
+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE;
+
+/*******************************************
+ DevicememHistoryUnmapVRange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmapVRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE_TAG
+{
+ IMG_DEV_VIRTADDR sBaseDevVAddr;
+ IMG_DEVMEM_SIZE_T uiAllocSize;
+ const IMG_CHAR *puiText;
+ IMG_UINT32 ui32AllocationIndex;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32NumPages;
+ IMG_UINT32 ui32ui32StartPage;
+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE;
+
+/* Bridge out structure for DevicememHistoryUnmapVRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32AllocationIndexOut;
+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE;
+
+/*******************************************
+ DevicememHistorySparseChange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistorySparseChange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE_TAG
+{
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_HANDLE hPMR;
+ IMG_UINT32 *pui32AllocPageIndices;
+ IMG_UINT32 *pui32FreePageIndices;
+ const IMG_CHAR *puiText;
+ IMG_UINT32 ui32AllocPageCount;
+ IMG_UINT32 ui32AllocationIndex;
+ IMG_UINT32 ui32FreePageCount;
+ IMG_UINT32 ui32Log2PageSize;
+} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE;
+
+/* Bridge out structure for DevicememHistorySparseChange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32AllocationIndexOut;
+} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE;
+
+#endif /* COMMON_DEVICEMEMHISTORY_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for devicememhistory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for devicememhistory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pmr.h"
+#include "devicemem_history_server.h"
+
+#include "common_devicememhistory_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevicememHistoryMapIN_UI8,
+ IMG_UINT8 * psDevicememHistoryMapOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *psDevicememHistoryMapIN =
+ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *) IMG_OFFSET_ADDR(psDevicememHistoryMapIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *psDevicememHistoryMapOUT =
+ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *) IMG_OFFSET_ADDR(psDevicememHistoryMapOUT_UI8,
+ 0);
+
+ IMG_HANDLE hPMR = psDevicememHistoryMapIN->hPMR;
+ PMR *psPMRInt = NULL;
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto DevicememHistoryMap_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryMapIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryMap_exit;
+ }
+ }
+ }
+
+ {
+ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiTextInt, (const void __user *)psDevicememHistoryMapIN->puiText,
+ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryMap_exit;
+ }
+ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevicememHistoryMapOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psDevicememHistoryMapOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevicememHistoryMap_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevicememHistoryMapOUT->eError =
+ DevicememHistoryMapKM(psPMRInt,
+ psDevicememHistoryMapIN->uiOffset,
+ psDevicememHistoryMapIN->sDevVAddr,
+ psDevicememHistoryMapIN->uiSize,
+ uiTextInt,
+ psDevicememHistoryMapIN->ui32Log2PageSize,
+ psDevicememHistoryMapIN->ui32AllocationIndex,
+ &psDevicememHistoryMapOUT->ui32AllocationIndexOut);
+
+DevicememHistoryMap_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psDevicememHistoryMapOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevicememHistoryUnmapIN_UI8,
+ IMG_UINT8 * psDevicememHistoryUnmapOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapIN =
+ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *)
+ IMG_OFFSET_ADDR(psDevicememHistoryUnmapIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapOUT =
+ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *)
+ IMG_OFFSET_ADDR(psDevicememHistoryUnmapOUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psDevicememHistoryUnmapIN->hPMR;
+ PMR *psPMRInt = NULL;
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto DevicememHistoryUnmap_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryUnmapIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryUnmap_exit;
+ }
+ }
+ }
+
+ {
+ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiTextInt, (const void __user *)psDevicememHistoryUnmapIN->puiText,
+ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryUnmap_exit;
+ }
+ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevicememHistoryUnmapOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psDevicememHistoryUnmapOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevicememHistoryUnmap_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevicememHistoryUnmapOUT->eError =
+ DevicememHistoryUnmapKM(psPMRInt,
+ psDevicememHistoryUnmapIN->uiOffset,
+ psDevicememHistoryUnmapIN->sDevVAddr,
+ psDevicememHistoryUnmapIN->uiSize,
+ uiTextInt,
+ psDevicememHistoryUnmapIN->ui32Log2PageSize,
+ psDevicememHistoryUnmapIN->ui32AllocationIndex,
+ &psDevicememHistoryUnmapOUT->ui32AllocationIndexOut);
+
+DevicememHistoryUnmap_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psDevicememHistoryUnmapOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevicememHistoryMapVRangeIN_UI8,
+ IMG_UINT8 * psDevicememHistoryMapVRangeOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeIN =
+ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *)
+ IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeOUT =
+ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *)
+ IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeOUT_UI8, 0);
+
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto DevicememHistoryMapVRange_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryMapVRangeIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryMapVRange_exit;
+ }
+ }
+ }
+
+ {
+ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiTextInt, (const void __user *)psDevicememHistoryMapVRangeIN->puiText,
+ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryMapVRange_exit;
+ }
+ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+ }
+
+ psDevicememHistoryMapVRangeOUT->eError =
+ DevicememHistoryMapVRangeKM(psConnection, OSGetDevNode(psConnection),
+ psDevicememHistoryMapVRangeIN->sBaseDevVAddr,
+ psDevicememHistoryMapVRangeIN->ui32ui32StartPage,
+ psDevicememHistoryMapVRangeIN->ui32NumPages,
+ psDevicememHistoryMapVRangeIN->uiAllocSize,
+ uiTextInt,
+ psDevicememHistoryMapVRangeIN->ui32Log2PageSize,
+ psDevicememHistoryMapVRangeIN->ui32AllocationIndex,
+ &psDevicememHistoryMapVRangeOUT->ui32AllocationIndexOut);
+
+DevicememHistoryMapVRange_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psDevicememHistoryMapVRangeOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevicememHistoryUnmapVRangeIN_UI8,
+ IMG_UINT8 * psDevicememHistoryUnmapVRangeOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeIN =
+ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *)
+ IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeOUT =
+ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *)
+ IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeOUT_UI8, 0);
+
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto DevicememHistoryUnmapVRange_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer =
+ (IMG_BYTE *) (void *)psDevicememHistoryUnmapVRangeIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psDevicememHistoryUnmapVRangeOUT->eError =
+ PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryUnmapVRange_exit;
+ }
+ }
+ }
+
+ {
+ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiTextInt, (const void __user *)psDevicememHistoryUnmapVRangeIN->puiText,
+ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryUnmapVRange_exit;
+ }
+ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+ }
+
+ psDevicememHistoryUnmapVRangeOUT->eError =
+ DevicememHistoryUnmapVRangeKM(psConnection, OSGetDevNode(psConnection),
+ psDevicememHistoryUnmapVRangeIN->sBaseDevVAddr,
+ psDevicememHistoryUnmapVRangeIN->ui32ui32StartPage,
+ psDevicememHistoryUnmapVRangeIN->ui32NumPages,
+ psDevicememHistoryUnmapVRangeIN->uiAllocSize,
+ uiTextInt,
+ psDevicememHistoryUnmapVRangeIN->ui32Log2PageSize,
+ psDevicememHistoryUnmapVRangeIN->ui32AllocationIndex,
+ &psDevicememHistoryUnmapVRangeOUT->
+ ui32AllocationIndexOut);
+
+DevicememHistoryUnmapVRange_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psDevicememHistoryUnmapVRangeOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX,
+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX,
+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevicememHistorySparseChangeIN_UI8,
+ IMG_UINT8 * psDevicememHistorySparseChangeOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeIN =
+ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *)
+ IMG_OFFSET_ADDR(psDevicememHistorySparseChangeIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeOUT =
+ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *)
+ IMG_OFFSET_ADDR(psDevicememHistorySparseChangeOUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psDevicememHistorySparseChangeIN->hPMR;
+ PMR *psPMRInt = NULL;
+ IMG_CHAR *uiTextInt = NULL;
+ IMG_UINT32 *ui32AllocPageIndicesInt = NULL;
+ IMG_UINT32 *ui32FreePageIndicesInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) +
+ ((IMG_UINT64) psDevicememHistorySparseChangeIN->ui32AllocPageCount *
+ sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psDevicememHistorySparseChangeIN->ui32FreePageCount *
+ sizeof(IMG_UINT32)) + 0;
+
+ if (unlikely
+ (psDevicememHistorySparseChangeIN->ui32AllocPageCount >
+ PMR_MAX_SUPPORTED_4K_PAGE_COUNT))
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto DevicememHistorySparseChange_exit;
+ }
+
+ if (unlikely
+ (psDevicememHistorySparseChangeIN->ui32FreePageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT))
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto DevicememHistorySparseChange_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto DevicememHistorySparseChange_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer =
+ (IMG_BYTE *) (void *)psDevicememHistorySparseChangeIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psDevicememHistorySparseChangeOUT->eError =
+ PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistorySparseChange_exit;
+ }
+ }
+ }
+
+ {
+ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiTextInt,
+ (const void __user *)psDevicememHistorySparseChangeIN->puiText,
+ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistorySparseChange_exit;
+ }
+ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+ }
+ if (psDevicememHistorySparseChangeIN->ui32AllocPageCount != 0)
+ {
+ ui32AllocPageIndicesInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32AllocPageIndicesInt,
+ (const void __user *)psDevicememHistorySparseChangeIN->pui32AllocPageIndices,
+ psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32)) !=
+ PVRSRV_OK)
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistorySparseChange_exit;
+ }
+ }
+ if (psDevicememHistorySparseChangeIN->ui32FreePageCount != 0)
+ {
+ ui32FreePageIndicesInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32FreePageIndicesInt,
+ (const void __user *)psDevicememHistorySparseChangeIN->pui32FreePageIndices,
+ psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32)) !=
+ PVRSRV_OK)
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistorySparseChange_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevicememHistorySparseChangeOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psDevicememHistorySparseChangeOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevicememHistorySparseChange_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevicememHistorySparseChangeOUT->eError =
+ DevicememHistorySparseChangeKM(psPMRInt,
+ psDevicememHistorySparseChangeIN->uiOffset,
+ psDevicememHistorySparseChangeIN->sDevVAddr,
+ psDevicememHistorySparseChangeIN->uiSize,
+ uiTextInt,
+ psDevicememHistorySparseChangeIN->ui32Log2PageSize,
+ psDevicememHistorySparseChangeIN->ui32AllocPageCount,
+ ui32AllocPageIndicesInt,
+ psDevicememHistorySparseChangeIN->ui32FreePageCount,
+ ui32FreePageIndicesInt,
+ psDevicememHistorySparseChangeIN->ui32AllocationIndex,
+ &psDevicememHistorySparseChangeOUT->
+ ui32AllocationIndexOut);
+
+DevicememHistorySparseChange_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psDevicememHistorySparseChangeOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pDEVICEMEMHISTORYBridgeLock;
+
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void);
+void DeinitDEVICEMEMHISTORYBridge(void);
+
+/*
+ * Register all DEVICEMEMHISTORY functions with services
+ */
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void)
+{
+ PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pDEVICEMEMHISTORYBridgeLock), "OSLockCreate");
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP,
+ PVRSRVBridgeDevicememHistoryMap, pDEVICEMEMHISTORYBridgeLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP,
+ PVRSRVBridgeDevicememHistoryUnmap, pDEVICEMEMHISTORYBridgeLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE,
+ PVRSRVBridgeDevicememHistoryMapVRange, pDEVICEMEMHISTORYBridgeLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE,
+ PVRSRVBridgeDevicememHistoryUnmapVRange, pDEVICEMEMHISTORYBridgeLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE,
+ PVRSRVBridgeDevicememHistorySparseChange,
+ pDEVICEMEMHISTORYBridgeLock);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all devicememhistory functions with services
+ */
+void DeinitDEVICEMEMHISTORYBridge(void)
+{
+ OSLockDestroy(pDEVICEMEMHISTORYBridgeLock);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY,
+ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for di
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for di
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DI_BRIDGE_H
+#define COMMON_DI_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_tlcommon.h"
+#include "pvr_dicommon.h"
+
+#define PVRSRV_BRIDGE_DI_CMD_FIRST 0
+#define PVRSRV_BRIDGE_DI_DICREATECONTEXT PVRSRV_BRIDGE_DI_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT PVRSRV_BRIDGE_DI_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DI_DIREADENTRY PVRSRV_BRIDGE_DI_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DI_DIWRITEENTRY PVRSRV_BRIDGE_DI_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DI_DILISTALLENTRIES PVRSRV_BRIDGE_DI_CMD_FIRST+4
+#define PVRSRV_BRIDGE_DI_CMD_LAST (PVRSRV_BRIDGE_DI_CMD_FIRST+4)
+
+/*******************************************
+ DICreateContext
+ *******************************************/
+
+/* Bridge in structure for DICreateContext */
+typedef struct PVRSRV_BRIDGE_IN_DICREATECONTEXT_TAG
+{
+ IMG_CHAR *puiStreamName;
+} __packed PVRSRV_BRIDGE_IN_DICREATECONTEXT;
+
+/* Bridge out structure for DICreateContext */
+typedef struct PVRSRV_BRIDGE_OUT_DICREATECONTEXT_TAG
+{
+ IMG_HANDLE hContext;
+ IMG_CHAR *puiStreamName;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DICREATECONTEXT;
+
+/*******************************************
+ DIDestroyContext
+ *******************************************/
+
+/* Bridge in structure for DIDestroyContext */
+typedef struct PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT_TAG
+{
+ IMG_HANDLE hContext;
+} __packed PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT;
+
+/* Bridge out structure for DIDestroyContext */
+typedef struct PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT;
+
+/*******************************************
+ DIReadEntry
+ *******************************************/
+
+/* Bridge in structure for DIReadEntry */
+typedef struct PVRSRV_BRIDGE_IN_DIREADENTRY_TAG
+{
+ IMG_UINT64 ui64Offset;
+ IMG_UINT64 ui64Size;
+ IMG_HANDLE hContext;
+ const IMG_CHAR *puiEntryPath;
+} __packed PVRSRV_BRIDGE_IN_DIREADENTRY;
+
+/* Bridge out structure for DIReadEntry */
+typedef struct PVRSRV_BRIDGE_OUT_DIREADENTRY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DIREADENTRY;
+
+/*******************************************
+ DIWriteEntry
+ *******************************************/
+
+/* Bridge in structure for DIWriteEntry */
+typedef struct PVRSRV_BRIDGE_IN_DIWRITEENTRY_TAG
+{
+ IMG_HANDLE hContext;
+ const IMG_CHAR *puiEntryPath;
+ const IMG_CHAR *puiValue;
+ IMG_UINT32 ui32ValueSize;
+} __packed PVRSRV_BRIDGE_IN_DIWRITEENTRY;
+
+/* Bridge out structure for DIWriteEntry */
+typedef struct PVRSRV_BRIDGE_OUT_DIWRITEENTRY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DIWRITEENTRY;
+
+/*******************************************
+ DIListAllEntries
+ *******************************************/
+
+/* Bridge in structure for DIListAllEntries */
+typedef struct PVRSRV_BRIDGE_IN_DILISTALLENTRIES_TAG
+{
+ IMG_HANDLE hContext;
+} __packed PVRSRV_BRIDGE_IN_DILISTALLENTRIES;
+
+/* Bridge out structure for DIListAllEntries */
+typedef struct PVRSRV_BRIDGE_OUT_DILISTALLENTRIES_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DILISTALLENTRIES;
+
+#endif /* COMMON_DI_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for di
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for di
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "di_impl_brg.h"
+
+#include "common_di_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _DICreateContextpsContextIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = DIDestroyContextKM((DI_CONTEXT *) pvData);
+ return eError;
+}
+
+static_assert(PRVSRVTL_MAX_STREAM_NAME_SIZE <= IMG_UINT32_MAX,
+ "PRVSRVTL_MAX_STREAM_NAME_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDICreateContextIN_UI8,
+ IMG_UINT8 * psDICreateContextOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DICREATECONTEXT *psDICreateContextIN =
+ (PVRSRV_BRIDGE_IN_DICREATECONTEXT *) IMG_OFFSET_ADDR(psDICreateContextIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DICREATECONTEXT *psDICreateContextOUT =
+ (PVRSRV_BRIDGE_OUT_DICREATECONTEXT *) IMG_OFFSET_ADDR(psDICreateContextOUT_UI8, 0);
+
+ IMG_CHAR *puiStreamNameInt = NULL;
+ DI_CONTEXT *psContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0;
+
+ PVR_UNREFERENCED_PARAMETER(psDICreateContextIN);
+
+ psDICreateContextOUT->puiStreamName = psDICreateContextIN->puiStreamName;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psDICreateContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto DICreateContext_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psDICreateContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDICreateContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psDICreateContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DICreateContext_exit;
+ }
+ }
+ }
+
+ if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL)
+ {
+ puiStreamNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+ }
+
+ psDICreateContextOUT->eError = DICreateContextKM(puiStreamNameInt, &psContextInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psDICreateContextOUT->eError != PVRSRV_OK))
+ {
+ goto DICreateContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDICreateContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psDICreateContextOUT->hContext,
+ (void *)psContextInt,
+ PVRSRV_HANDLE_TYPE_DI_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ (PFN_HANDLE_RELEASE) &
+ _DICreateContextpsContextIntRelease);
+ if (unlikely(psDICreateContextOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DICreateContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((puiStreamNameInt) && ((PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psDICreateContextOUT->puiStreamName, puiStreamNameInt,
+ (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR))) != PVRSRV_OK))
+ {
+ psDICreateContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DICreateContext_exit;
+ }
+ }
+
+DICreateContext_exit:
+
+ if (psDICreateContextOUT->eError != PVRSRV_OK)
+ {
+ if (psContextInt)
+ {
+ DIDestroyContextKM(psContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psDICreateContextOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDIDestroyContext(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDIDestroyContextIN_UI8,
+ IMG_UINT8 * psDIDestroyContextOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT *psDIDestroyContextIN =
+ (PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT *) IMG_OFFSET_ADDR(psDIDestroyContextIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT *psDIDestroyContextOUT =
+ (PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT *) IMG_OFFSET_ADDR(psDIDestroyContextOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDIDestroyContextOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDIDestroyContextIN->hContext,
+ PVRSRV_HANDLE_TYPE_DI_CONTEXT);
+ if (unlikely((psDIDestroyContextOUT->eError != PVRSRV_OK) &&
+ (psDIDestroyContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psDIDestroyContextOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(psDIDestroyContextOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto DIDestroyContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DIDestroyContext_exit:
+
+ return 0;
+}
+
+static_assert(DI_IMPL_BRG_PATH_LEN <= IMG_UINT32_MAX,
+ "DI_IMPL_BRG_PATH_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDIReadEntryIN_UI8,
+ IMG_UINT8 * psDIReadEntryOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DIREADENTRY *psDIReadEntryIN =
+ (PVRSRV_BRIDGE_IN_DIREADENTRY *) IMG_OFFSET_ADDR(psDIReadEntryIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DIREADENTRY *psDIReadEntryOUT =
+ (PVRSRV_BRIDGE_OUT_DIREADENTRY *) IMG_OFFSET_ADDR(psDIReadEntryOUT_UI8, 0);
+
+ IMG_HANDLE hContext = psDIReadEntryIN->hContext;
+ DI_CONTEXT *psContextInt = NULL;
+ IMG_CHAR *uiEntryPathInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) + 0;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psDIReadEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto DIReadEntry_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psDIReadEntryIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDIReadEntryIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psDIReadEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DIReadEntry_exit;
+ }
+ }
+ }
+
+ {
+ uiEntryPathInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiEntryPathInt, (const void __user *)psDIReadEntryIN->puiEntryPath,
+ DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psDIReadEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DIReadEntry_exit;
+ }
+ ((IMG_CHAR *) uiEntryPathInt)[(DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDIReadEntryOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psContextInt,
+ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE);
+ if (unlikely(psDIReadEntryOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DIReadEntry_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDIReadEntryOUT->eError =
+ DIReadEntryKM(psContextInt,
+ uiEntryPathInt, psDIReadEntryIN->ui64Offset, psDIReadEntryIN->ui64Size);
+
+DIReadEntry_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psDIReadEntryOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static_assert(DI_IMPL_BRG_PATH_LEN <= IMG_UINT32_MAX,
+ "DI_IMPL_BRG_PATH_LEN must not be larger than IMG_UINT32_MAX");
+static_assert(DI_IMPL_BRG_PATH_LEN <= IMG_UINT32_MAX,
+ "DI_IMPL_BRG_PATH_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDIWriteEntryIN_UI8,
+ IMG_UINT8 * psDIWriteEntryOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DIWRITEENTRY *psDIWriteEntryIN =
+ (PVRSRV_BRIDGE_IN_DIWRITEENTRY *) IMG_OFFSET_ADDR(psDIWriteEntryIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DIWRITEENTRY *psDIWriteEntryOUT =
+ (PVRSRV_BRIDGE_OUT_DIWRITEENTRY *) IMG_OFFSET_ADDR(psDIWriteEntryOUT_UI8, 0);
+
+ IMG_HANDLE hContext = psDIWriteEntryIN->hContext;
+ DI_CONTEXT *psContextInt = NULL;
+ IMG_CHAR *uiEntryPathInt = NULL;
+ IMG_CHAR *uiValueInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) +
+ ((IMG_UINT64) psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psDIWriteEntryIN->ui32ValueSize > DI_IMPL_BRG_PATH_LEN))
+ {
+ psDIWriteEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto DIWriteEntry_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psDIWriteEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto DIWriteEntry_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psDIWriteEntryIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDIWriteEntryIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psDIWriteEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DIWriteEntry_exit;
+ }
+ }
+ }
+
+ {
+ uiEntryPathInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiEntryPathInt, (const void __user *)psDIWriteEntryIN->puiEntryPath,
+ DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psDIWriteEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DIWriteEntry_exit;
+ }
+ ((IMG_CHAR *) uiEntryPathInt)[(DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) - 1] = '\0';
+ }
+ if (psDIWriteEntryIN->ui32ValueSize != 0)
+ {
+ uiValueInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiValueInt, (const void __user *)psDIWriteEntryIN->puiValue,
+ psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psDIWriteEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DIWriteEntry_exit;
+ }
+ ((IMG_CHAR *) uiValueInt)[(psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR)) -
+ 1] = '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDIWriteEntryOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psContextInt,
+ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE);
+ if (unlikely(psDIWriteEntryOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DIWriteEntry_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDIWriteEntryOUT->eError =
+ DIWriteEntryKM(psContextInt,
+ uiEntryPathInt, psDIWriteEntryIN->ui32ValueSize, uiValueInt);
+
+DIWriteEntry_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psDIWriteEntryOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDIListAllEntries(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDIListAllEntriesIN_UI8,
+ IMG_UINT8 * psDIListAllEntriesOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DILISTALLENTRIES *psDIListAllEntriesIN =
+ (PVRSRV_BRIDGE_IN_DILISTALLENTRIES *) IMG_OFFSET_ADDR(psDIListAllEntriesIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DILISTALLENTRIES *psDIListAllEntriesOUT =
+ (PVRSRV_BRIDGE_OUT_DILISTALLENTRIES *) IMG_OFFSET_ADDR(psDIListAllEntriesOUT_UI8, 0);
+
+ IMG_HANDLE hContext = psDIListAllEntriesIN->hContext;
+ DI_CONTEXT *psContextInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDIListAllEntriesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psContextInt,
+ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE);
+ if (unlikely(psDIListAllEntriesOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DIListAllEntries_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDIListAllEntriesOUT->eError = DIListAllEntriesKM(psContextInt);
+
+DIListAllEntries_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitDIBridge(void);
+void DeinitDIBridge(void);
+
+/*
+ * Register all DI functions with services
+ */
+PVRSRV_ERROR InitDIBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DICREATECONTEXT,
+ PVRSRVBridgeDICreateContext, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT,
+ PVRSRVBridgeDIDestroyContext, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIREADENTRY,
+ PVRSRVBridgeDIReadEntry, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIWRITEENTRY,
+ PVRSRVBridgeDIWriteEntry, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DILISTALLENTRIES,
+ PVRSRVBridgeDIListAllEntries, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all di functions with services
+ */
+void DeinitDIBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DICREATECONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIREADENTRY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIWRITEENTRY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DILISTALLENTRIES);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for dma
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for dma
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DMA_BRIDGE_H
+#define COMMON_DMA_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_DMA_CMD_FIRST 0
+#define PVRSRV_BRIDGE_DMA_DMATRANSFER PVRSRV_BRIDGE_DMA_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DMA_DMASPARSEMAPPINGTABLE PVRSRV_BRIDGE_DMA_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DMA_DMADEVICEPARAMS PVRSRV_BRIDGE_DMA_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DMA_CMD_LAST (PVRSRV_BRIDGE_DMA_CMD_FIRST+2)
+
+/*******************************************
+ DmaTransfer
+ *******************************************/
+
+/* Bridge in structure for DmaTransfer */
+typedef struct PVRSRV_BRIDGE_IN_DMATRANSFER_TAG
+{
+ IMG_UINT64 *pui64Address;
+ IMG_DEVMEM_OFFSET_T *puiOffset;
+ IMG_DEVMEM_SIZE_T *puiSize;
+ IMG_HANDLE *phPMR;
+ PVRSRV_TIMELINE hUpdateTimeline;
+ IMG_UINT32 ui32NumDMAs;
+ IMG_UINT32 ui32uiFlags;
+} __packed PVRSRV_BRIDGE_IN_DMATRANSFER;
+
+/* Bridge out structure for DmaTransfer */
+typedef struct PVRSRV_BRIDGE_OUT_DMATRANSFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DMATRANSFER;
+
+/*******************************************
+ DmaSparseMappingTable
+ *******************************************/
+
+/* Bridge in structure for DmaSparseMappingTable */
+typedef struct PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE_TAG
+{
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_HANDLE hPMR;
+ IMG_BOOL *pbTable;
+ IMG_UINT32 ui32SizeInPages;
+} __packed PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE;
+
+/* Bridge out structure for DmaSparseMappingTable */
+typedef struct PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE_TAG
+{
+ IMG_BOOL *pbTable;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE;
+
+/*******************************************
+ DmaDeviceParams
+ *******************************************/
+
+/* Bridge in structure for DmaDeviceParams */
+typedef struct PVRSRV_BRIDGE_IN_DMADEVICEPARAMS_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_DMADEVICEPARAMS;
+
+/* Bridge out structure for DmaDeviceParams */
+typedef struct PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32DmaBuffAlign;
+ IMG_UINT32 ui32DmaTransferMult;
+} __packed PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS;
+
+#endif /* COMMON_DMA_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for dma
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for dma
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "dma_km.h"
+
+#include "common_dma_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static_assert(MAX_DMA_OPS <= IMG_UINT32_MAX, "MAX_DMA_OPS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDmaTransfer(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDmaTransferIN_UI8,
+ IMG_UINT8 * psDmaTransferOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DMATRANSFER *psDmaTransferIN =
+ (PVRSRV_BRIDGE_IN_DMATRANSFER *) IMG_OFFSET_ADDR(psDmaTransferIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DMATRANSFER *psDmaTransferOUT =
+ (PVRSRV_BRIDGE_OUT_DMATRANSFER *) IMG_OFFSET_ADDR(psDmaTransferOUT_UI8, 0);
+
+ PMR **psPMRInt = NULL;
+ IMG_HANDLE *hPMRInt2 = NULL;
+ IMG_UINT64 *ui64AddressInt = NULL;
+ IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL;
+ IMG_DEVMEM_SIZE_T *uiSizeInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psDmaTransferIN->ui32NumDMAs * sizeof(PMR *)) +
+ ((IMG_UINT64) psDmaTransferIN->ui32NumDMAs * sizeof(IMG_HANDLE)) +
+ ((IMG_UINT64) psDmaTransferIN->ui32NumDMAs * sizeof(IMG_UINT64)) +
+ ((IMG_UINT64) psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_OFFSET_T)) +
+ ((IMG_UINT64) psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_SIZE_T)) + 0;
+
+ if (unlikely(psDmaTransferIN->ui32NumDMAs > MAX_DMA_OPS))
+ {
+ psDmaTransferOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto DmaTransfer_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psDmaTransferOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto DmaTransfer_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psDmaTransferIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDmaTransferIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psDmaTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DmaTransfer_exit;
+ }
+ }
+ }
+
+ if (psDmaTransferIN->ui32NumDMAs != 0)
+ {
+ psPMRInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psPMRInt, 0, psDmaTransferIN->ui32NumDMAs * sizeof(PMR *));
+ ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(PMR *);
+ hPMRInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hPMRInt2, (const void __user *)psDmaTransferIN->phPMR,
+ psDmaTransferIN->ui32NumDMAs * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ psDmaTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DmaTransfer_exit;
+ }
+ }
+ if (psDmaTransferIN->ui32NumDMAs != 0)
+ {
+ ui64AddressInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(IMG_UINT64);
+ }
+
+ /* Copy the data over */
+ if (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_UINT64) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui64AddressInt, (const void __user *)psDmaTransferIN->pui64Address,
+ psDmaTransferIN->ui32NumDMAs * sizeof(IMG_UINT64)) != PVRSRV_OK)
+ {
+ psDmaTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DmaTransfer_exit;
+ }
+ }
+ if (psDmaTransferIN->ui32NumDMAs != 0)
+ {
+ uiOffsetInt =
+ (IMG_DEVMEM_OFFSET_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_OFFSET_T);
+ }
+
+ /* Copy the data over */
+ if (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_OFFSET_T) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiOffsetInt, (const void __user *)psDmaTransferIN->puiOffset,
+ psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK)
+ {
+ psDmaTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DmaTransfer_exit;
+ }
+ }
+ if (psDmaTransferIN->ui32NumDMAs != 0)
+ {
+ uiSizeInt = (IMG_DEVMEM_SIZE_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_SIZE_T);
+ }
+
+ /* Copy the data over */
+ if (psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_SIZE_T) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiSizeInt, (const void __user *)psDmaTransferIN->puiSize,
+ psDmaTransferIN->ui32NumDMAs * sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK)
+ {
+ psDmaTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DmaTransfer_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psDmaTransferIN->ui32NumDMAs; i++)
+ {
+ /* Look up the address from the handle */
+ psDmaTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt[i],
+ hPMRInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psDmaTransferOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DmaTransfer_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDmaTransferOUT->eError =
+ DmaTransfer(psConnection, OSGetDevNode(psConnection),
+ psDmaTransferIN->ui32NumDMAs,
+ psPMRInt,
+ ui64AddressInt,
+ uiOffsetInt,
+ uiSizeInt, psDmaTransferIN->ui32uiFlags, psDmaTransferIN->hUpdateTimeline);
+
+DmaTransfer_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ if (hPMRInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psDmaTransferIN->ui32NumDMAs; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt && psPMRInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psDmaTransferOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static_assert(32 <= IMG_UINT32_MAX, "32 must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDmaSparseMappingTable(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDmaSparseMappingTableIN_UI8,
+ IMG_UINT8 * psDmaSparseMappingTableOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE *psDmaSparseMappingTableIN =
+ (PVRSRV_BRIDGE_IN_DMASPARSEMAPPINGTABLE *)
+ IMG_OFFSET_ADDR(psDmaSparseMappingTableIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE *psDmaSparseMappingTableOUT =
+ (PVRSRV_BRIDGE_OUT_DMASPARSEMAPPINGTABLE *)
+ IMG_OFFSET_ADDR(psDmaSparseMappingTableOUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psDmaSparseMappingTableIN->hPMR;
+ PMR *psPMRInt = NULL;
+ IMG_BOOL *pbTableInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psDmaSparseMappingTableIN->ui32SizeInPages * sizeof(IMG_BOOL)) + 0;
+
+ if (psDmaSparseMappingTableIN->ui32SizeInPages > 32)
+ {
+ psDmaSparseMappingTableOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto DmaSparseMappingTable_exit;
+ }
+
+ psDmaSparseMappingTableOUT->pbTable = psDmaSparseMappingTableIN->pbTable;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psDmaSparseMappingTableOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto DmaSparseMappingTable_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psDmaSparseMappingTableIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDmaSparseMappingTableIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psDmaSparseMappingTableOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DmaSparseMappingTable_exit;
+ }
+ }
+ }
+
+ if (psDmaSparseMappingTableIN->ui32SizeInPages != 0)
+ {
+ pbTableInt = (IMG_BOOL *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psDmaSparseMappingTableIN->ui32SizeInPages * sizeof(IMG_BOOL);
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDmaSparseMappingTableOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psDmaSparseMappingTableOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DmaSparseMappingTable_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDmaSparseMappingTableOUT->eError =
+ DmaSparseMappingTable(psPMRInt,
+ psDmaSparseMappingTableIN->uiOffset,
+ psDmaSparseMappingTableIN->ui32SizeInPages, pbTableInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psDmaSparseMappingTableOUT->eError != PVRSRV_OK))
+ {
+ goto DmaSparseMappingTable_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((pbTableInt) && ((psDmaSparseMappingTableIN->ui32SizeInPages * sizeof(IMG_BOOL)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psDmaSparseMappingTableOUT->pbTable, pbTableInt,
+ (psDmaSparseMappingTableIN->ui32SizeInPages * sizeof(IMG_BOOL))) !=
+ PVRSRV_OK))
+ {
+ psDmaSparseMappingTableOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DmaSparseMappingTable_exit;
+ }
+ }
+
+DmaSparseMappingTable_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psDmaSparseMappingTableOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDmaDeviceParams(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDmaDeviceParamsIN_UI8,
+ IMG_UINT8 * psDmaDeviceParamsOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DMADEVICEPARAMS *psDmaDeviceParamsIN =
+ (PVRSRV_BRIDGE_IN_DMADEVICEPARAMS *) IMG_OFFSET_ADDR(psDmaDeviceParamsIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS *psDmaDeviceParamsOUT =
+ (PVRSRV_BRIDGE_OUT_DMADEVICEPARAMS *) IMG_OFFSET_ADDR(psDmaDeviceParamsOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psDmaDeviceParamsIN);
+
+ psDmaDeviceParamsOUT->eError =
+ DmaDeviceParams(psConnection, OSGetDevNode(psConnection),
+ &psDmaDeviceParamsOUT->ui32DmaBuffAlign,
+ &psDmaDeviceParamsOUT->ui32DmaTransferMult);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitDMABridge(void);
+void DeinitDMABridge(void);
+
+/*
+ * Register all DMA functions with services
+ */
+PVRSRV_ERROR InitDMABridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMATRANSFER,
+ PVRSRVBridgeDmaTransfer, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMASPARSEMAPPINGTABLE,
+ PVRSRVBridgeDmaSparseMappingTable, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMADEVICEPARAMS,
+ PVRSRVBridgeDmaDeviceParams, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all dma functions with services
+ */
+void DeinitDMABridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMATRANSFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMASPARSEMAPPINGTABLE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMA, PVRSRV_BRIDGE_DMA_DMADEVICEPARAMS);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for dmabuf
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for dmabuf
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DMABUF_BRIDGE_H
+#define COMMON_DMABUF_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+
+#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST 0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DMABUF_CMD_LAST (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+3)
+
+/*******************************************
+ PhysmemImportDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF_TAG
+{
+ const IMG_CHAR *puiName;
+ IMG_INT ifd;
+ IMG_UINT32 ui32NameSize;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF;
+
+/* Bridge out structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG
+{
+ IMG_DEVMEM_ALIGN_T uiAlign;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_HANDLE hPMRPtr;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF;
+
+/*******************************************
+ PhysmemImportDmaBufLocked
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportDmaBufLocked */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED_TAG
+{
+ const IMG_CHAR *puiName;
+ IMG_INT ifd;
+ IMG_UINT32 ui32NameSize;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED;
+
+/* Bridge out structure for PhysmemImportDmaBufLocked */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED_TAG
+{
+ IMG_DEVMEM_ALIGN_T uiAlign;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_HANDLE hPMRPtr;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED;
+
+/*******************************************
+ PhysmemExportDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemExportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF_TAG
+{
+ IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF;
+
+/* Bridge out structure for PhysmemExportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_INT iFd;
+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF;
+
+/*******************************************
+ PhysmemImportSparseDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportSparseDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF_TAG
+{
+ IMG_DEVMEM_SIZE_T uiChunkSize;
+ IMG_UINT32 *pui32MappingTable;
+ const IMG_CHAR *puiName;
+ IMG_INT ifd;
+ IMG_UINT32 ui32NameSize;
+ IMG_UINT32 ui32NumPhysChunks;
+ IMG_UINT32 ui32NumVirtChunks;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF;
+
+/* Bridge out structure for PhysmemImportSparseDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF_TAG
+{
+ IMG_DEVMEM_ALIGN_T uiAlign;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_HANDLE hPMRPtr;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF;
+
+#endif /* COMMON_DMABUF_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for dmabuf
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for dmabuf
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "physmem_dmabuf.h"
+#include "pmr.h"
+
+#include "common_dmabuf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _PhysmemImportDmaBufpsPMRPtrIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PMRUnrefPMR((PMR *) pvData);
+ return eError;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPhysmemImportDmaBufIN_UI8,
+ IMG_UINT8 * psPhysmemImportDmaBufOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufIN =
+ (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemImportDmaBufIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufOUT =
+ (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemImportDmaBufOUT_UI8,
+ 0);
+
+ IMG_CHAR *uiNameInt = NULL;
+ PMR *psPMRPtrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psPhysmemImportDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN))
+ {
+ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PhysmemImportDmaBuf_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto PhysmemImportDmaBuf_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psPhysmemImportDmaBufIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportDmaBufIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PhysmemImportDmaBuf_exit;
+ }
+ }
+ }
+
+ if (psPhysmemImportDmaBufIN->ui32NameSize != 0)
+ {
+ uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiNameInt, (const void __user *)psPhysmemImportDmaBufIN->puiName,
+ psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemImportDmaBuf_exit;
+ }
+ ((IMG_CHAR *) uiNameInt)[(psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR))
+ - 1] = '\0';
+ }
+
+ psPhysmemImportDmaBufOUT->eError =
+ PhysmemImportDmaBuf(psConnection, OSGetDevNode(psConnection),
+ psPhysmemImportDmaBufIN->ifd,
+ psPhysmemImportDmaBufIN->uiFlags,
+ psPhysmemImportDmaBufIN->ui32NameSize,
+ uiNameInt,
+ &psPMRPtrInt,
+ &psPhysmemImportDmaBufOUT->uiSize,
+ &psPhysmemImportDmaBufOUT->uiAlign);
+ /* Exit early if bridged call fails */
+ if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK))
+ {
+ goto PhysmemImportDmaBuf_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psPhysmemImportDmaBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psPhysmemImportDmaBufOUT->
+ hPMRPtr, (void *)psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _PhysmemImportDmaBufpsPMRPtrIntRelease);
+ if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PhysmemImportDmaBuf_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+PhysmemImportDmaBuf_exit:
+
+ if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ LockHandle(KERNEL_HANDLE_BASE);
+ PMRUnrefPMR(psPMRPtrInt);
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psPhysmemImportDmaBufOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static PVRSRV_ERROR _PhysmemImportDmaBufLockedpsPMRPtrIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PMRUnrefUnlockPMR((PMR *) pvData);
+ return eError;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePhysmemImportDmaBufLocked(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPhysmemImportDmaBufLockedIN_UI8,
+ IMG_UINT8 * psPhysmemImportDmaBufLockedOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED *psPhysmemImportDmaBufLockedIN =
+ (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED *)
+ IMG_OFFSET_ADDR(psPhysmemImportDmaBufLockedIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED *psPhysmemImportDmaBufLockedOUT =
+ (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED *)
+ IMG_OFFSET_ADDR(psPhysmemImportDmaBufLockedOUT_UI8, 0);
+
+ IMG_CHAR *uiNameInt = NULL;
+ PMR *psPMRPtrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psPhysmemImportDmaBufLockedIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN))
+ {
+ psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PhysmemImportDmaBufLocked_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto PhysmemImportDmaBufLocked_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psPhysmemImportDmaBufLockedIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportDmaBufLockedIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PhysmemImportDmaBufLocked_exit;
+ }
+ }
+ }
+
+ if (psPhysmemImportDmaBufLockedIN->ui32NameSize != 0)
+ {
+ uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiNameInt, (const void __user *)psPhysmemImportDmaBufLockedIN->puiName,
+ psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemImportDmaBufLocked_exit;
+ }
+ ((IMG_CHAR *)
+ uiNameInt)[(psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ psPhysmemImportDmaBufLockedOUT->eError =
+ PhysmemImportDmaBufLocked(psConnection, OSGetDevNode(psConnection),
+ psPhysmemImportDmaBufLockedIN->ifd,
+ psPhysmemImportDmaBufLockedIN->uiFlags,
+ psPhysmemImportDmaBufLockedIN->ui32NameSize,
+ uiNameInt,
+ &psPMRPtrInt,
+ &psPhysmemImportDmaBufLockedOUT->uiSize,
+ &psPhysmemImportDmaBufLockedOUT->uiAlign);
+ /* Exit early if bridged call fails */
+ if (unlikely(psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK))
+ {
+ goto PhysmemImportDmaBufLocked_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psPhysmemImportDmaBufLockedOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psPhysmemImportDmaBufLockedOUT->hPMRPtr, (void *)psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _PhysmemImportDmaBufLockedpsPMRPtrIntRelease);
+ if (unlikely(psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PhysmemImportDmaBufLocked_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+PhysmemImportDmaBufLocked_exit:
+
+ if (psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ LockHandle(KERNEL_HANDLE_BASE);
+ PMRUnrefUnlockPMR(psPMRPtrInt);
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psPhysmemImportDmaBufLockedOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPhysmemExportDmaBufIN_UI8,
+ IMG_UINT8 * psPhysmemExportDmaBufOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufIN =
+ (PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemExportDmaBufIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufOUT =
+ (PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemExportDmaBufOUT_UI8,
+ 0);
+
+ IMG_HANDLE hPMR = psPhysmemExportDmaBufIN->hPMR;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPhysmemExportDmaBufOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psPhysmemExportDmaBufOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PhysmemExportDmaBuf_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPhysmemExportDmaBufOUT->eError =
+ PhysmemExportDmaBuf(psConnection, OSGetDevNode(psConnection),
+ psPMRInt, &psPhysmemExportDmaBufOUT->iFd);
+
+PhysmemExportDmaBuf_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static PVRSRV_ERROR _PhysmemImportSparseDmaBufpsPMRPtrIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PMRUnrefPMR((PMR *) pvData);
+ return eError;
+}
+
+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX,
+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPhysmemImportSparseDmaBufIN_UI8,
+ IMG_UINT8 * psPhysmemImportSparseDmaBufOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufIN =
+ (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *)
+ IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufOUT =
+ (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *)
+ IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufOUT_UI8, 0);
+
+ IMG_UINT32 *ui32MappingTableInt = NULL;
+ IMG_CHAR *uiNameInt = NULL;
+ PMR *psPMRPtrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely
+ (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_4K_PAGE_COUNT))
+ {
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+
+ if (unlikely(psPhysmemImportSparseDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN))
+ {
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportSparseDmaBufIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+ }
+ }
+
+ if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks != 0)
+ {
+ ui32MappingTableInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32MappingTableInt,
+ (const void __user *)psPhysmemImportSparseDmaBufIN->pui32MappingTable,
+ psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) !=
+ PVRSRV_OK)
+ {
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+ }
+ if (psPhysmemImportSparseDmaBufIN->ui32NameSize != 0)
+ {
+ uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiNameInt, (const void __user *)psPhysmemImportSparseDmaBufIN->puiName,
+ psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+ ((IMG_CHAR *)
+ uiNameInt)[(psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ psPhysmemImportSparseDmaBufOUT->eError =
+ PhysmemImportSparseDmaBuf(psConnection, OSGetDevNode(psConnection),
+ psPhysmemImportSparseDmaBufIN->ifd,
+ psPhysmemImportSparseDmaBufIN->uiFlags,
+ psPhysmemImportSparseDmaBufIN->uiChunkSize,
+ psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks,
+ psPhysmemImportSparseDmaBufIN->ui32NumVirtChunks,
+ ui32MappingTableInt,
+ psPhysmemImportSparseDmaBufIN->ui32NameSize,
+ uiNameInt,
+ &psPMRPtrInt,
+ &psPhysmemImportSparseDmaBufOUT->uiSize,
+ &psPhysmemImportSparseDmaBufOUT->uiAlign);
+ /* Exit early if bridged call fails */
+ if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK))
+ {
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psPhysmemImportSparseDmaBufOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psPhysmemImportSparseDmaBufOUT->hPMRPtr, (void *)psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _PhysmemImportSparseDmaBufpsPMRPtrIntRelease);
+ if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+PhysmemImportSparseDmaBuf_exit:
+
+ if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ LockHandle(KERNEL_HANDLE_BASE);
+ PMRUnrefPMR(psPMRPtrInt);
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psPhysmemImportSparseDmaBufOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitDMABUFBridge(void);
+void DeinitDMABUFBridge(void);
+
+/*
+ * Register all DMABUF functions with services
+ */
+PVRSRV_ERROR InitDMABUFBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF,
+ PVRSRVBridgePhysmemImportDmaBuf, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED,
+ PVRSRVBridgePhysmemImportDmaBufLocked, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF,
+ PVRSRVBridgePhysmemExportDmaBuf, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF,
+ PVRSRVBridgePhysmemImportSparseDmaBuf, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all dmabuf functions with services
+ */
+void DeinitDMABUFBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF,
+ PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF,
+ PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Client bridge header for htbuffer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for htbuffer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_HTBUFFER_BRIDGE_H
+#define CLIENT_HTBUFFER_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_htbuffer_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumGroups,
+ IMG_UINT32 * pui32GroupEnable,
+ IMG_UINT32 ui32LogLevel,
+ IMG_UINT32 ui32EnablePID,
+ IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode);
+
+#endif /* CLIENT_HTBUFFER_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Direct client bridge for htbuffer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the client side of the bridge for htbuffer
+ which is used in calls from Server context.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_htbuffer_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "htbuffer_types.h"
+
+#include "htbserver.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumGroups,
+ IMG_UINT32 * pui32GroupEnable,
+ IMG_UINT32 ui32LogLevel,
+ IMG_UINT32 ui32EnablePID,
+ IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ eError =
+ HTBControlKM(ui32NumGroups,
+ pui32GroupEnable, ui32LogLevel, ui32EnablePID, ui32LogMode, ui32OpMode);
+
+ return eError;
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for htbuffer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for htbuffer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_HTBUFFER_BRIDGE_H
+#define COMMON_HTBUFFER_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "htbuffer_types.h"
+
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST 0
+#define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0)
+
+/*******************************************
+ HTBControl
+ *******************************************/
+
+/* Bridge in structure for HTBControl */
+typedef struct PVRSRV_BRIDGE_IN_HTBCONTROL_TAG
+{
+ IMG_UINT32 *pui32GroupEnable;
+ IMG_UINT32 ui32EnablePID;
+ IMG_UINT32 ui32LogLevel;
+ IMG_UINT32 ui32LogMode;
+ IMG_UINT32 ui32NumGroups;
+ IMG_UINT32 ui32OpMode;
+} __packed PVRSRV_BRIDGE_IN_HTBCONTROL;
+
+/* Bridge out structure for HTBControl */
+typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_HTBCONTROL;
+
+#endif /* COMMON_HTBUFFER_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for htbuffer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for htbuffer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "htbserver.h"
+
+#include "common_htbuffer_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static_assert(HTB_FLAG_NUM_EL <= IMG_UINT32_MAX,
+ "HTB_FLAG_NUM_EL must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psHTBControlIN_UI8,
+ IMG_UINT8 * psHTBControlOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_HTBCONTROL *psHTBControlIN =
+ (PVRSRV_BRIDGE_IN_HTBCONTROL *) IMG_OFFSET_ADDR(psHTBControlIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_HTBCONTROL *psHTBControlOUT =
+ (PVRSRV_BRIDGE_OUT_HTBCONTROL *) IMG_OFFSET_ADDR(psHTBControlOUT_UI8, 0);
+
+ IMG_UINT32 *ui32GroupEnableInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) + 0;
+
+ if (unlikely(psHTBControlIN->ui32NumGroups > HTB_FLAG_NUM_EL))
+ {
+ psHTBControlOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto HTBControl_exit;
+ }
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psHTBControlOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto HTBControl_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHTBControlIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psHTBControlOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HTBControl_exit;
+ }
+ }
+ }
+
+ if (psHTBControlIN->ui32NumGroups != 0)
+ {
+ ui32GroupEnableInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32GroupEnableInt,
+ (const void __user *)psHTBControlIN->pui32GroupEnable,
+ psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psHTBControlOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HTBControl_exit;
+ }
+ }
+
+ psHTBControlOUT->eError =
+ HTBControlKM(psHTBControlIN->ui32NumGroups,
+ ui32GroupEnableInt,
+ psHTBControlIN->ui32LogLevel,
+ psHTBControlIN->ui32EnablePID,
+ psHTBControlIN->ui32LogMode, psHTBControlIN->ui32OpMode);
+
+HTBControl_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psHTBControlOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pHTBUFFERBridgeLock;
+
+#endif /* EXCLUDE_HTBUFFER_BRIDGE */
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+PVRSRV_ERROR InitHTBUFFERBridge(void);
+void DeinitHTBUFFERBridge(void);
+
+/*
+ * Register all HTBUFFER functions with services
+ */
+PVRSRV_ERROR InitHTBUFFERBridge(void)
+{
+ PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock), "OSLockCreate");
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL,
+ PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all htbuffer functions with services
+ */
+void DeinitHTBUFFERBridge(void)
+{
+ OSLockDestroy(pHTBUFFERBridgeLock);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL);
+
+}
+#else /* EXCLUDE_HTBUFFER_BRIDGE */
+/* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitHTBUFFERBridge() \
+ PVRSRV_OK
+
+#define DeinitHTBUFFERBridge()
+
+#endif /* EXCLUDE_HTBUFFER_BRIDGE */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Client bridge header for mm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for mm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_MM_BRIDGE_H
+#define CLIENT_MM_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_mm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_HANDLE * phPMRExport,
+ IMG_UINT64 * pui64Size,
+ IMG_UINT32 * pui32Log2Contig,
+ IMG_UINT64 * pui64Password);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMRExport);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRGetUID(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR, IMG_UINT64 * pui64UID);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hBuffer, IMG_HANDLE * phExtMem);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRExport,
+ IMG_UINT64 ui64uiPassword,
+ IMG_UINT64 ui64uiSize,
+ IMG_UINT32 ui32uiLog2Contig, IMG_HANDLE * phPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtHandle,
+ IMG_HANDLE * phPMR,
+ IMG_DEVMEM_SIZE_T * puiSize,
+ IMG_DEVMEM_ALIGN_T * puiAlign);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 * pui32MappingTable,
+ IMG_UINT32 ui32Log2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32AnnotationLength,
+ const IMG_CHAR * puiAnnotation,
+ IMG_PID ui32PID,
+ IMG_HANDLE * phPMRPtr,
+ IMG_UINT32 ui32PDumpFlags,
+ PVRSRV_MEMALLOCFLAGS_T * puiOutFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
+ IMG_BOOL bbKernelMemoryCtx,
+ IMG_HANDLE * phDevMemServerContext,
+ IMG_HANDLE * phPrivData,
+ IMG_UINT32 * pui32CPUCacheLineSize);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerContext);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapIndex,
+ IMG_DEV_VIRTADDR sHeapBaseAddr,
+ IMG_UINT32 ui32Log2DataPageSize,
+ IMG_HANDLE * phDevmemHeapPtr);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HANDLE hDevmemHeap);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ IMG_HANDLE * phMapping);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ IMG_HANDLE * phReservation);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge,
+ IMG_HANDLE hSrvDevMemHeap,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 * pui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 * pui32FreePageIndices,
+ IMG_UINT32 ui32SparseFlags,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT64 ui64CPUVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysicalPgOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR sAddress);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT64 ui64FBSCEntries);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge,
+ IMG_UINT32 * pui32NumHeapConfigs);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapCount(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 * pui32NumHeaps);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapConfigNameBufSz,
+ IMG_CHAR * puiHeapConfigName);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapIndex,
+ IMG_UINT32 ui32HeapNameBufSz,
+ IMG_CHAR * puiHeapNameOut,
+ IMG_DEV_VIRTADDR * psDevVAddrBase,
+ IMG_DEVMEM_SIZE_T * puiHeapLength,
+ IMG_DEVMEM_SIZE_T * puiReservedRegionLength,
+ IMG_UINT32 * pui32Log2DataPageSizeOut,
+ IMG_UINT32 * pui32Log2ImportAlignmentOut);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_BOOL bRegister);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PhysHeapCount,
+ PVRSRV_PHYS_HEAP * peaPhysHeapID,
+ PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge,
+ PVRSRV_PHYS_HEAP * peHeap);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR * psFaultAddress);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVStatsUpdateOOMStat(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32ui32StatType,
+ IMG_PID ui32pid);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntReserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ IMG_HANDLE * phReservation);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnreserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysPageOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32VirtPageOffset);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnmapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_UINT32 ui32VirtPageOffset,
+ IMG_UINT32 ui32PageCount);
+
+#endif /* CLIENT_MM_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Direct client bridge for mm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the client side of the bridge for mm
+ which is used in calls from Server context.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_mm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pvrsrv_memallocflags.h"
+#include "pvrsrv_memalloc_physheap.h"
+#include "devicemem_typedefs.h"
+
+#include "pvrsrv_memalloc_physheap.h"
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+#include "devicemem_utils.h"
+#include "process_stats.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_HANDLE * phPMRExport,
+ IMG_UINT64 * pui64Size,
+ IMG_UINT32 * pui32Log2Contig,
+ IMG_UINT64 * pui64Password)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PMR_EXPORT *psPMRExportInt = NULL;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError = PMRExportPMR(psPMRInt, &psPMRExportInt, pui64Size, pui32Log2Contig, pui64Password);
+
+ *phPMRExport = psPMRExportInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMRExport)
+{
+ PVRSRV_ERROR eError;
+ PMR_EXPORT *psPMRExportInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+ eError = PMRUnexportPMR(psPMRExportInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRGetUID(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR, IMG_UINT64 * pui64UID)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError = PMRGetUID(psPMRInt, pui64UID);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hBuffer, IMG_HANDLE * phExtMem)
+{
+ PVRSRV_ERROR eError;
+ PMR *psBufferInt;
+ PMR *psExtMemInt = NULL;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psBufferInt = (PMR *) hBuffer;
+
+ eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt);
+
+ *phExtMem = psExtMemInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem)
+{
+ PVRSRV_ERROR eError;
+ PMR *psExtMemInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psExtMemInt = (PMR *) hExtMem;
+
+ eError = PMRUnmakeLocalImportHandle(psExtMemInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRExport,
+ IMG_UINT64 ui64uiPassword,
+ IMG_UINT64 ui64uiSize,
+ IMG_UINT32 ui32uiLog2Contig, IMG_HANDLE * phPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR_EXPORT *psPMRExportInt;
+ PMR *psPMRInt = NULL;
+
+ psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+ eError =
+ PhysmemImportPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ psPMRExportInt,
+ ui64uiPassword, ui64uiSize, ui32uiLog2Contig, &psPMRInt);
+
+ *phPMR = psPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtHandle,
+ IMG_HANDLE * phPMR,
+ IMG_DEVMEM_SIZE_T * puiSize,
+ IMG_DEVMEM_ALIGN_T * puiAlign)
+{
+ PVRSRV_ERROR eError;
+ PMR *psExtHandleInt;
+ PMR *psPMRInt = NULL;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psExtHandleInt = (PMR *) hExtHandle;
+
+ eError = PMRLocalImportPMR(psExtHandleInt, &psPMRInt, puiSize, puiAlign);
+
+ *phPMR = psPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError = PMRUnrefPMR(psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError = PMRUnrefUnlockPMR(psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 * pui32MappingTable,
+ IMG_UINT32 ui32Log2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32AnnotationLength,
+ const IMG_CHAR * puiAnnotation,
+ IMG_PID ui32PID,
+ IMG_HANDLE * phPMRPtr,
+ IMG_UINT32 ui32PDumpFlags,
+ PVRSRV_MEMALLOCFLAGS_T * puiOutFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRPtrInt = NULL;
+
+ eError =
+ PhysmemNewRamBackedPMR_direct(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ uiSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ ui32Log2PageSize,
+ uiFlags,
+ ui32AnnotationLength,
+ puiAnnotation,
+ ui32PID, &psPMRPtrInt, ui32PDumpFlags, puiOutFlags);
+
+ *phPMRPtr = psPMRPtrInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
+ IMG_BOOL bbKernelMemoryCtx,
+ IMG_HANDLE * phDevMemServerContext,
+ IMG_HANDLE * phPrivData,
+ IMG_UINT32 * pui32CPUCacheLineSize)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevMemServerContextInt = NULL;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+ eError =
+ DevmemIntCtxCreate(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ bbKernelMemoryCtx,
+ &psDevMemServerContextInt, &hPrivDataInt, pui32CPUCacheLineSize);
+
+ *phDevMemServerContext = psDevMemServerContextInt;
+ *phPrivData = hPrivDataInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerContext)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemServerContextInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+ eError = DevmemIntCtxDestroy(psDevmemServerContextInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapIndex,
+ IMG_DEV_VIRTADDR sHeapBaseAddr,
+ IMG_UINT32 ui32Log2DataPageSize,
+ IMG_HANDLE * phDevmemHeapPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemCtxInt;
+ DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntHeapCreate(psDevmemCtxInt,
+ ui32HeapConfigIndex,
+ ui32HeapIndex,
+ sHeapBaseAddr, ui32Log2DataPageSize, &psDevmemHeapPtrInt);
+
+ *phDevmemHeapPtr = psDevmemHeapPtrInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HANDLE hDevmemHeap)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP *psDevmemHeapInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemHeapInt = (DEVMEMINT_HEAP *) hDevmemHeap;
+
+ eError = DevmemIntHeapDestroy(psDevmemHeapInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ IMG_HANDLE * phMapping)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP *psDevmemServerHeapInt;
+ DEVMEMINT_RESERVATION *psReservationInt;
+ PMR *psPMRInt;
+ DEVMEMINT_MAPPING *psMappingInt = NULL;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntMapPMR(psDevmemServerHeapInt,
+ psReservationInt, psPMRInt, uiMapFlags, &psMappingInt);
+
+ *phMapping = psMappingInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_MAPPING *psMappingInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+
+ eError = DevmemIntUnmapPMR(psMappingInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ IMG_HANDLE * phReservation)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP *psDevmemServerHeapInt;
+ DEVMEMINT_RESERVATION *psReservationInt = NULL;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+
+ eError =
+ DevmemIntReserveRange(psDevmemServerHeapInt, sAddress, uiLength, &psReservationInt);
+
+ *phReservation = psReservationInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, IMG_HANDLE hReservation)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_RESERVATION *psReservationInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+ eError = DevmemIntUnreserveRange(psReservationInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge,
+ IMG_HANDLE hSrvDevMemHeap,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 * pui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 * pui32FreePageIndices,
+ IMG_UINT32 ui32SparseFlags,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddr, IMG_UINT64 ui64CPUVAddr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP *psSrvDevMemHeapInt;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntChangeSparse(psSrvDevMemHeapInt,
+ psPMRInt,
+ ui32AllocPageCount,
+ pui32AllocPageIndices,
+ ui32FreePageCount,
+ pui32FreePageIndices,
+ ui32SparseFlags, uiFlags, sDevVAddr, ui64CPUVAddr);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysicalPgOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_RESERVATION *psReservationInt;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntMapPages(psReservationInt,
+ psPMRInt, ui32PageCount, ui32PhysicalPgOffset, uiFlags, sDevVAddr);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_RESERVATION *psReservationInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+ eError = DevmemIntUnmapPages(psReservationInt, sDevVAddr, ui32PageCount);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR sAddress)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemCtxInt;
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntIsVDevAddrValid(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ psDevmemCtxInt, sAddress);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT64 ui64FBSCEntries)
+{
+#if defined(RGX_FEATURE_FBCDC)
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemCtxInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError = DevmemIntInvalidateFBSCTable(psDevmemCtxInt, ui64FBSCEntries);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hDevmemCtx);
+ PVR_UNREFERENCED_PARAMETER(ui64FBSCEntries);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge,
+ IMG_UINT32 * pui32NumHeapConfigs)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ HeapCfgHeapConfigCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ pui32NumHeapConfigs);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapCount(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 * pui32NumHeaps)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ HeapCfgHeapCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32HeapConfigIndex, pui32NumHeaps);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapConfigNameBufSz,
+ IMG_CHAR * puiHeapConfigName)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ HeapCfgHeapConfigName(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32HeapConfigIndex, ui32HeapConfigNameBufSz, puiHeapConfigName);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapIndex,
+ IMG_UINT32 ui32HeapNameBufSz,
+ IMG_CHAR * puiHeapNameOut,
+ IMG_DEV_VIRTADDR * psDevVAddrBase,
+ IMG_DEVMEM_SIZE_T * puiHeapLength,
+ IMG_DEVMEM_SIZE_T * puiReservedRegionLength,
+ IMG_UINT32 * pui32Log2DataPageSizeOut,
+ IMG_UINT32 * pui32Log2ImportAlignmentOut)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ HeapCfgHeapDetails(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32HeapConfigIndex,
+ ui32HeapIndex,
+ ui32HeapNameBufSz,
+ puiHeapNameOut,
+ psDevVAddrBase,
+ puiHeapLength,
+ puiReservedRegionLength,
+ pui32Log2DataPageSizeOut, pui32Log2ImportAlignmentOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_BOOL bRegister)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemCtxInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError = DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, bRegister);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PhysHeapCount,
+ PVRSRV_PHYS_HEAP * peaPhysHeapID,
+ PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ PVRSRVPhysHeapGetMemInfoKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32PhysHeapCount, peaPhysHeapID, pasapPhysHeapMemStats);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge,
+ PVRSRV_PHYS_HEAP * peHeap)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ PVRSRVGetDefaultPhysicalHeapKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), peHeap);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR * psFaultAddress)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemCtxInt;
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntGetFaultAddress(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ psDevmemCtxInt, psFaultAddress);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVStatsUpdateOOMStat(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32ui32StatType,
+ IMG_PID ui32pid)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ PVRSRV_ERROR eError;
+
+ eError =
+ PVRSRVStatsUpdateOOMStat(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32ui32StatType, ui32pid);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32ui32StatType);
+ PVR_UNREFERENCED_PARAMETER(ui32pid);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntReserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ IMG_HANDLE * phReservation)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP *psDevmemServerHeapInt;
+ DEVMEMXINT_RESERVATION *psReservationInt = NULL;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+
+ eError =
+ DevmemXIntReserveRange(psDevmemServerHeapInt, sAddress, uiLength, &psReservationInt);
+
+ *phReservation = psReservationInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnreserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMXINT_RESERVATION *psReservationInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation;
+
+ eError = DevmemXIntUnreserveRange(psReservationInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntMapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysPageOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32VirtPageOffset)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMXINT_RESERVATION *psReservationInt;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemXIntMapPages(psReservationInt,
+ psPMRInt,
+ ui32PageCount, ui32PhysPageOffset, uiFlags, ui32VirtPageOffset);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemXIntUnmapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_UINT32 ui32VirtPageOffset,
+ IMG_UINT32 ui32PageCount)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMXINT_RESERVATION *psReservationInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMXINT_RESERVATION *) hReservation;
+
+ eError = DevmemXIntUnmapPages(psReservationInt, ui32VirtPageOffset, ui32PageCount);
+
+ return eError;
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for mm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for mm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_MM_BRIDGE_H
+#define COMMON_MM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+#include "pvrsrv_memalloc_physheap.h"
+#include "devicemem_typedefs.h"
+
+#define PVRSRV_BRIDGE_MM_CMD_FIRST 0
+#define PVRSRV_BRIDGE_MM_PMREXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_MM_PMRGETUID PVRSRV_BRIDGE_MM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_MM_PMRIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_MM_PMRUNREFPMR PVRSRV_BRIDGE_MM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8
+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+9
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+10
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+11
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+12
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+13
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+14
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+15
+#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+16
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+17
+#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+18
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+19
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+20
+#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+21
+#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE PVRSRV_BRIDGE_MM_CMD_FIRST+22
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+23
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+24
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+25
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+26
+#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+27
+#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO PVRSRV_BRIDGE_MM_CMD_FIRST+28
+#define PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP PVRSRV_BRIDGE_MM_CMD_FIRST+29
+#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+30
+#define PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT PVRSRV_BRIDGE_MM_CMD_FIRST+31
+#define PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+32
+#define PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+33
+#define PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+34
+#define PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+35
+#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+35)
+
+/*******************************************
+ PMRExportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMREXPORTPMR_TAG
+{
+ IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_PMREXPORTPMR;
+
+/* Bridge out structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMREXPORTPMR_TAG
+{
+ IMG_UINT64 ui64Password;
+ IMG_UINT64 ui64Size;
+ IMG_HANDLE hPMRExport;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Log2Contig;
+} __packed PVRSRV_BRIDGE_OUT_PMREXPORTPMR;
+
+/*******************************************
+ PMRUnexportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR_TAG
+{
+ IMG_HANDLE hPMRExport;
+} __packed PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR;
+
+/* Bridge out structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR;
+
+/*******************************************
+ PMRGetUID
+ *******************************************/
+
+/* Bridge in structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_IN_PMRGETUID_TAG
+{
+ IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_PMRGETUID;
+
+/* Bridge out structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_OUT_PMRGETUID_TAG
+{
+ IMG_UINT64 ui64UID;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRGETUID;
+
+/*******************************************
+ PMRMakeLocalImportHandle
+ *******************************************/
+
+/* Bridge in structure for PMRMakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE_TAG
+{
+ IMG_HANDLE hBuffer;
+} __packed PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE;
+
+/* Bridge out structure for PMRMakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE_TAG
+{
+ IMG_HANDLE hExtMem;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE;
+
+/*******************************************
+ PMRUnmakeLocalImportHandle
+ *******************************************/
+
+/* Bridge in structure for PMRUnmakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE_TAG
+{
+ IMG_HANDLE hExtMem;
+} __packed PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE;
+
+/* Bridge out structure for PMRUnmakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE;
+
+/*******************************************
+ PMRImportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRIMPORTPMR_TAG
+{
+ IMG_UINT64 ui64uiPassword;
+ IMG_UINT64 ui64uiSize;
+ IMG_HANDLE hPMRExport;
+ IMG_UINT32 ui32uiLog2Contig;
+} __packed PVRSRV_BRIDGE_IN_PMRIMPORTPMR;
+
+/* Bridge out structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRIMPORTPMR_TAG
+{
+ IMG_HANDLE hPMR;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRIMPORTPMR;
+
+/*******************************************
+ PMRLocalImportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR_TAG
+{
+ IMG_HANDLE hExtHandle;
+} __packed PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR;
+
+/* Bridge out structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR_TAG
+{
+ IMG_DEVMEM_ALIGN_T uiAlign;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_HANDLE hPMR;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR;
+
+/*******************************************
+ PMRUnrefPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFPMR_TAG
+{
+ IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_PMRUNREFPMR;
+
+/* Bridge out structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRUNREFPMR;
+
+/*******************************************
+ PMRUnrefUnlockPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefUnlockPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG
+{
+ IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR;
+
+/* Bridge out structure for PMRUnrefUnlockPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR;
+
+/*******************************************
+ PhysmemNewRamBackedPMR
+ *******************************************/
+
+/* Bridge in structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 *pui32MappingTable;
+ const IMG_CHAR *puiAnnotation;
+ IMG_UINT32 ui32AnnotationLength;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32NumPhysChunks;
+ IMG_UINT32 ui32NumVirtChunks;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_PID ui32PID;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR;
+
+/* Bridge out structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+ IMG_HANDLE hPMRPtr;
+ PVRSRV_ERROR eError;
+ PVRSRV_MEMALLOCFLAGS_T uiOutFlags;
+} __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR;
+
+/*******************************************
+ DevmemIntCtxCreate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE_TAG
+{
+ IMG_BOOL bbKernelMemoryCtx;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE;
+
+/* Bridge out structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE_TAG
+{
+ IMG_HANDLE hDevMemServerContext;
+ IMG_HANDLE hPrivData;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32CPUCacheLineSize;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE;
+
+/*******************************************
+ DevmemIntCtxDestroy
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY_TAG
+{
+ IMG_HANDLE hDevmemServerContext;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY;
+
+/* Bridge out structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY;
+
+/*******************************************
+ DevmemIntHeapCreate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG
+{
+ IMG_DEV_VIRTADDR sHeapBaseAddr;
+ IMG_HANDLE hDevmemCtx;
+ IMG_UINT32 ui32HeapConfigIndex;
+ IMG_UINT32 ui32HeapIndex;
+ IMG_UINT32 ui32Log2DataPageSize;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE;
+
+/* Bridge out structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE_TAG
+{
+ IMG_HANDLE hDevmemHeapPtr;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE;
+
+/*******************************************
+ DevmemIntHeapDestroy
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY_TAG
+{
+ IMG_HANDLE hDevmemHeap;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY;
+
+/* Bridge out structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY;
+
+/*******************************************
+ DevmemIntMapPMR
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG
+{
+ IMG_HANDLE hDevmemServerHeap;
+ IMG_HANDLE hPMR;
+ IMG_HANDLE hReservation;
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR;
+
+/* Bridge out structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG
+{
+ IMG_HANDLE hMapping;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR;
+
+/*******************************************
+ DevmemIntUnmapPMR
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG
+{
+ IMG_HANDLE hMapping;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR;
+
+/* Bridge out structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR;
+
+/*******************************************
+ DevmemIntReserveRange
+ *******************************************/
+
+/* Bridge in structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG
+{
+ IMG_DEV_VIRTADDR sAddress;
+ IMG_DEVMEM_SIZE_T uiLength;
+ IMG_HANDLE hDevmemServerHeap;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE;
+
+/* Bridge out structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG
+{
+ IMG_HANDLE hReservation;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE;
+
+/*******************************************
+ DevmemIntUnreserveRange
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE_TAG
+{
+ IMG_HANDLE hReservation;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE;
+
+/* Bridge out structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE;
+
+/*******************************************
+ ChangeSparseMem
+ *******************************************/
+
+/* Bridge in structure for ChangeSparseMem */
+typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG
+{
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_UINT64 ui64CPUVAddr;
+ IMG_HANDLE hPMR;
+ IMG_HANDLE hSrvDevMemHeap;
+ IMG_UINT32 *pui32AllocPageIndices;
+ IMG_UINT32 *pui32FreePageIndices;
+ IMG_UINT32 ui32AllocPageCount;
+ IMG_UINT32 ui32FreePageCount;
+ IMG_UINT32 ui32SparseFlags;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_CHANGESPARSEMEM;
+
+/* Bridge out structure for ChangeSparseMem */
+typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM;
+
+/*******************************************
+ DevmemIntMapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES_TAG
+{
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_HANDLE hPMR;
+ IMG_HANDLE hReservation;
+ IMG_UINT32 ui32PageCount;
+ IMG_UINT32 ui32PhysicalPgOffset;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES;
+
+/* Bridge out structure for DevmemIntMapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES;
+
+/*******************************************
+ DevmemIntUnmapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES_TAG
+{
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_HANDLE hReservation;
+ IMG_UINT32 ui32PageCount;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES;
+
+/* Bridge out structure for DevmemIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES;
+
+/*******************************************
+ DevmemIsVDevAddrValid
+ *******************************************/
+
+/* Bridge in structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID_TAG
+{
+ IMG_DEV_VIRTADDR sAddress;
+ IMG_HANDLE hDevmemCtx;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID;
+
+/* Bridge out structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID;
+
+/*******************************************
+ DevmemInvalidateFBSCTable
+ *******************************************/
+
+/* Bridge in structure for DevmemInvalidateFBSCTable */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE_TAG
+{
+ IMG_UINT64 ui64FBSCEntries;
+ IMG_HANDLE hDevmemCtx;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE;
+
+/* Bridge out structure for DevmemInvalidateFBSCTable */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE;
+
+/*******************************************
+ HeapCfgHeapConfigCount
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT;
+
+/* Bridge out structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32NumHeapConfigs;
+} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT;
+
+/*******************************************
+ HeapCfgHeapCount
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT_TAG
+{
+ IMG_UINT32 ui32HeapConfigIndex;
+} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT;
+
+/* Bridge out structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32NumHeaps;
+} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT;
+
+/*******************************************
+ HeapCfgHeapConfigName
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME_TAG
+{
+ IMG_CHAR *puiHeapConfigName;
+ IMG_UINT32 ui32HeapConfigIndex;
+ IMG_UINT32 ui32HeapConfigNameBufSz;
+} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME;
+
+/* Bridge out structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME_TAG
+{
+ IMG_CHAR *puiHeapConfigName;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME;
+
+/*******************************************
+ HeapCfgHeapDetails
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS_TAG
+{
+ IMG_CHAR *puiHeapNameOut;
+ IMG_UINT32 ui32HeapConfigIndex;
+ IMG_UINT32 ui32HeapIndex;
+ IMG_UINT32 ui32HeapNameBufSz;
+} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS;
+
+/* Bridge out structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG
+{
+ IMG_DEV_VIRTADDR sDevVAddrBase;
+ IMG_DEVMEM_SIZE_T uiHeapLength;
+ IMG_DEVMEM_SIZE_T uiReservedRegionLength;
+ IMG_CHAR *puiHeapNameOut;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Log2DataPageSizeOut;
+ IMG_UINT32 ui32Log2ImportAlignmentOut;
+} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS;
+
+/*******************************************
+ DevmemIntRegisterPFNotifyKM
+ *******************************************/
+
+/* Bridge in structure for DevmemIntRegisterPFNotifyKM */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG
+{
+ IMG_HANDLE hDevmemCtx;
+ IMG_BOOL bRegister;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM;
+
+/* Bridge out structure for DevmemIntRegisterPFNotifyKM */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM;
+
+/*******************************************
+ PhysHeapGetMemInfo
+ *******************************************/
+
+/* Bridge in structure for PhysHeapGetMemInfo */
+typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO_TAG
+{
+ PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats;
+ PVRSRV_PHYS_HEAP *peaPhysHeapID;
+ IMG_UINT32 ui32PhysHeapCount;
+} __packed PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO;
+
+/* Bridge out structure for PhysHeapGetMemInfo */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO_TAG
+{
+ PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO;
+
+/*******************************************
+ GetDefaultPhysicalHeap
+ *******************************************/
+
+/* Bridge in structure for GetDefaultPhysicalHeap */
+typedef struct PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP;
+
+/* Bridge out structure for GetDefaultPhysicalHeap */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP_TAG
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_PHYS_HEAP eHeap;
+} __packed PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP;
+
+/*******************************************
+ DevmemGetFaultAddress
+ *******************************************/
+
+/* Bridge in structure for DevmemGetFaultAddress */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS_TAG
+{
+ IMG_HANDLE hDevmemCtx;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS;
+
+/* Bridge out structure for DevmemGetFaultAddress */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS_TAG
+{
+ IMG_DEV_VIRTADDR sFaultAddress;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS;
+
+/*******************************************
+ PVRSRVStatsUpdateOOMStat
+ *******************************************/
+
+/* Bridge in structure for PVRSRVStatsUpdateOOMStat */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT_TAG
+{
+ IMG_PID ui32pid;
+ IMG_UINT32 ui32ui32StatType;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT;
+
+/* Bridge out structure for PVRSRVStatsUpdateOOMStat */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT;
+
+/*******************************************
+ DevmemXIntReserveRange
+ *******************************************/
+
+/* Bridge in structure for DevmemXIntReserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE_TAG
+{
+ IMG_DEV_VIRTADDR sAddress;
+ IMG_DEVMEM_SIZE_T uiLength;
+ IMG_HANDLE hDevmemServerHeap;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE;
+
+/* Bridge out structure for DevmemXIntReserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE_TAG
+{
+ IMG_HANDLE hReservation;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE;
+
+/*******************************************
+ DevmemXIntUnreserveRange
+ *******************************************/
+
+/* Bridge in structure for DevmemXIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE_TAG
+{
+ IMG_HANDLE hReservation;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE;
+
+/* Bridge out structure for DevmemXIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE;
+
+/*******************************************
+ DevmemXIntMapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemXIntMapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_HANDLE hReservation;
+ IMG_UINT32 ui32PageCount;
+ IMG_UINT32 ui32PhysPageOffset;
+ IMG_UINT32 ui32VirtPageOffset;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES;
+
+/* Bridge out structure for DevmemXIntMapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES;
+
+/*******************************************
+ DevmemXIntUnmapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemXIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES_TAG
+{
+ IMG_HANDLE hReservation;
+ IMG_UINT32 ui32PageCount;
+ IMG_UINT32 ui32VirtPageOffset;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES;
+
+/* Bridge out structure for DevmemXIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES;
+
+#endif /* COMMON_MM_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for mm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for mm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pvrsrv_memalloc_physheap.h"
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+#include "devicemem_utils.h"
+#include "process_stats.h"
+
+#include "common_mm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+static PVRSRV_ERROR ReleasePMRExport(void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ return PVRSRV_OK;
+}
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _PMRExportPMRpsPMRExportIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PMRUnexportPMR((PMR_EXPORT *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRExportPMRIN_UI8,
+ IMG_UINT8 * psPMRExportPMROUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMREXPORTPMR *psPMRExportPMRIN =
+ (PVRSRV_BRIDGE_IN_PMREXPORTPMR *) IMG_OFFSET_ADDR(psPMRExportPMRIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMREXPORTPMR *psPMRExportPMROUT =
+ (PVRSRV_BRIDGE_OUT_PMREXPORTPMR *) IMG_OFFSET_ADDR(psPMRExportPMROUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psPMRExportPMRIN->hPMR;
+ PMR *psPMRInt = NULL;
+ PMR_EXPORT *psPMRExportInt = NULL;
+ IMG_HANDLE hPMRExportInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPMRExportPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRExportPMR_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPMRExportPMROUT->eError =
+ PMRExportPMR(psPMRInt,
+ &psPMRExportInt,
+ &psPMRExportPMROUT->ui64Size,
+ &psPMRExportPMROUT->ui32Log2Contig, &psPMRExportPMROUT->ui64Password);
+ /* Exit early if bridged call fails */
+ if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK))
+ {
+ goto PMRExportPMR_exit;
+ }
+
+ /*
+ * For cases where we need a cross process handle we actually allocate two.
+ *
+ * The first one is a connection specific handle and it gets given the real
+ * release function. This handle does *NOT* get returned to the caller. It's
+ * purpose is to release any leaked resources when we either have a bad or
+ * abnormally terminated client. If we didn't do this then the resource
+ * wouldn't be freed until driver unload. If the resource is freed normally,
+ * this handle can be looked up via the cross process handle and then
+ * released accordingly.
+ *
+ * The second one is a cross process handle and it gets given a noop release
+ * function. This handle does get returned to the caller.
+ */
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psPMRExportPMROUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &hPMRExportInt, (void *)psPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) & _PMRExportPMRpsPMRExportIntRelease);
+ if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto PMRExportPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Lock over handle creation. */
+ LockHandle(KERNEL_HANDLE_BASE);
+ psPMRExportPMROUT->eError = PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE,
+ &psPMRExportPMROUT->hPMRExport,
+ (void *)psPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ ReleasePMRExport);
+ if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ goto PMRExportPMR_exit;
+ }
+ /* Release now we have created handles. */
+ UnlockHandle(KERNEL_HANDLE_BASE);
+
+PMRExportPMR_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psPMRExportPMROUT->eError != PVRSRV_OK)
+ {
+ if (psPMRExportPMROUT->hPMRExport)
+ {
+ PVRSRV_ERROR eError;
+
+ /* Lock over handle creation cleanup. */
+ LockHandle(KERNEL_HANDLE_BASE);
+
+ eError = PVRSRVDestroyHandleUnlocked(KERNEL_HANDLE_BASE,
+ (IMG_HANDLE) psPMRExportPMROUT->
+ hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle(KERNEL_HANDLE_BASE);
+
+ }
+
+ if (hPMRExportInt)
+ {
+ PVRSRV_ERROR eError;
+ /* Lock over handle creation cleanup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ eError =
+ PVRSRVDestroyHandleUnlocked(psConnection->psProcessHandleBase->
+ psHandleBase, hPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psPMRExportInt = NULL;
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ }
+
+ if (psPMRExportInt)
+ {
+ LockHandle(KERNEL_HANDLE_BASE);
+ PMRUnexportPMR(psPMRExportInt);
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRUnexportPMRIN_UI8,
+ IMG_UINT8 * psPMRUnexportPMROUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *psPMRUnexportPMRIN =
+ (PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *) IMG_OFFSET_ADDR(psPMRUnexportPMRIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *psPMRUnexportPMROUT =
+ (PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *) IMG_OFFSET_ADDR(psPMRUnexportPMROUT_UI8, 0);
+
+ PMR_EXPORT *psPMRExportInt = NULL;
+ IMG_HANDLE hPMRExportInt = NULL;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* Lock over handle destruction. */
+ LockHandle(KERNEL_HANDLE_BASE);
+ psPMRUnexportPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+ (void **)&psPMRExportInt,
+ (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, IMG_FALSE);
+ if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError)));
+ }
+ PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(KERNEL_HANDLE_BASE);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ /*
+ * Find the connection specific handle that represents the same data
+ * as the cross process handle as releasing it will actually call the
+ * data's real release function (see the function where the cross
+ * process handle is allocated for more details).
+ */
+ psPMRUnexportPMROUT->eError =
+ PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &hPMRExportInt,
+ psPMRExportInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError)));
+ }
+ PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+ psPMRUnexportPMROUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hPMRExportInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if (unlikely((psPMRUnexportPMROUT->eError != PVRSRV_OK) &&
+ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError)));
+ }
+ PVR_ASSERT((psPMRUnexportPMROUT->eError == PVRSRV_OK) ||
+ (psPMRUnexportPMROUT->eError == PVRSRV_ERROR_RETRY));
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Lock over handle destruction. */
+ LockHandle(KERNEL_HANDLE_BASE);
+
+ psPMRUnexportPMROUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(KERNEL_HANDLE_BASE,
+ (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if (unlikely((psPMRUnexportPMROUT->eError != PVRSRV_OK) &&
+ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError)));
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ goto PMRUnexportPMR_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(KERNEL_HANDLE_BASE);
+
+PMRUnexportPMR_exit:
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRGetUIDIN_UI8,
+ IMG_UINT8 * psPMRGetUIDOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRGETUID *psPMRGetUIDIN =
+ (PVRSRV_BRIDGE_IN_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRGETUID *psPMRGetUIDOUT =
+ (PVRSRV_BRIDGE_OUT_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDOUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psPMRGetUIDIN->hPMR;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPMRGetUIDOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psPMRGetUIDOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRGetUID_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPMRGetUIDOUT->eError = PMRGetUID(psPMRInt, &psPMRGetUIDOUT->ui64UID);
+
+PMRGetUID_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static PVRSRV_ERROR _PMRMakeLocalImportHandlepsExtMemIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PMRUnmakeLocalImportHandle((PMR *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgePMRMakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRMakeLocalImportHandleIN_UI8,
+ IMG_UINT8 * psPMRMakeLocalImportHandleOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleIN =
+ (PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *)
+ IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleOUT =
+ (PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *)
+ IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleOUT_UI8, 0);
+
+ IMG_HANDLE hBuffer = psPMRMakeLocalImportHandleIN->hBuffer;
+ PMR *psBufferInt = NULL;
+ PMR *psExtMemInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPMRMakeLocalImportHandleOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psBufferInt,
+ hBuffer,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, IMG_TRUE);
+ if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRMakeLocalImportHandle_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPMRMakeLocalImportHandleOUT->eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK))
+ {
+ goto PMRMakeLocalImportHandle_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psPMRMakeLocalImportHandleOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &psPMRMakeLocalImportHandleOUT->hExtMem, (void *)psExtMemInt,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _PMRMakeLocalImportHandlepsExtMemIntRelease);
+ if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto PMRMakeLocalImportHandle_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+PMRMakeLocalImportHandle_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psBufferInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hBuffer, PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+ {
+ if (psExtMemInt)
+ {
+ LockHandle(KERNEL_HANDLE_BASE);
+ PMRUnmakeLocalImportHandle(psExtMemInt);
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRUnmakeLocalImportHandleIN_UI8,
+ IMG_UINT8 * psPMRUnmakeLocalImportHandleOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleIN =
+ (PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *)
+ IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleOUT =
+ (PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *)
+ IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psPMRUnmakeLocalImportHandleOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (IMG_HANDLE) psPMRUnmakeLocalImportHandleIN->hExtMem,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+ if (unlikely((psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_OK) &&
+ (psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psPMRUnmakeLocalImportHandleOUT->eError)));
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto PMRUnmakeLocalImportHandle_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+PMRUnmakeLocalImportHandle_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _PMRImportPMRpsPMRIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PMRUnrefPMR((PMR *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRImportPMRIN_UI8,
+ IMG_UINT8 * psPMRImportPMROUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRIMPORTPMR *psPMRImportPMRIN =
+ (PVRSRV_BRIDGE_IN_PMRIMPORTPMR *) IMG_OFFSET_ADDR(psPMRImportPMRIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *psPMRImportPMROUT =
+ (PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *) IMG_OFFSET_ADDR(psPMRImportPMROUT_UI8, 0);
+
+ IMG_HANDLE hPMRExport = psPMRImportPMRIN->hPMRExport;
+ PMR_EXPORT *psPMRExportInt = NULL;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(KERNEL_HANDLE_BASE);
+
+ /* Look up the address from the handle */
+ psPMRImportPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+ (void **)&psPMRExportInt,
+ hPMRExport, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, IMG_TRUE);
+ if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ goto PMRImportPMR_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(KERNEL_HANDLE_BASE);
+
+ psPMRImportPMROUT->eError =
+ PhysmemImportPMR(psConnection, OSGetDevNode(psConnection),
+ psPMRExportInt,
+ psPMRImportPMRIN->ui64uiPassword,
+ psPMRImportPMRIN->ui64uiSize,
+ psPMRImportPMRIN->ui32uiLog2Contig, &psPMRInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK))
+ {
+ goto PMRImportPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psPMRImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psPMRImportPMROUT->hPMR,
+ (void *)psPMRInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _PMRImportPMRpsPMRIntRelease);
+ if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRImportPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+PMRImportPMR_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(KERNEL_HANDLE_BASE);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRExportInt)
+ {
+ PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+ hPMRExport, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(KERNEL_HANDLE_BASE);
+
+ if (psPMRImportPMROUT->eError != PVRSRV_OK)
+ {
+ if (psPMRInt)
+ {
+ LockHandle(KERNEL_HANDLE_BASE);
+ PMRUnrefPMR(psPMRInt);
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ }
+ }
+
+ return 0;
+}
+
+static PVRSRV_ERROR _PMRLocalImportPMRpsPMRIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PMRUnrefPMR((PMR *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgePMRLocalImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRLocalImportPMRIN_UI8,
+ IMG_UINT8 * psPMRLocalImportPMROUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *psPMRLocalImportPMRIN =
+ (PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *) IMG_OFFSET_ADDR(psPMRLocalImportPMRIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *psPMRLocalImportPMROUT =
+ (PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *) IMG_OFFSET_ADDR(psPMRLocalImportPMROUT_UI8, 0);
+
+ IMG_HANDLE hExtHandle = psPMRLocalImportPMRIN->hExtHandle;
+ PMR *psExtHandleInt = NULL;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPMRLocalImportPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (void **)&psExtHandleInt,
+ hExtHandle, PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, IMG_TRUE);
+ if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto PMRLocalImportPMR_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psPMRLocalImportPMROUT->eError =
+ PMRLocalImportPMR(psExtHandleInt,
+ &psPMRInt,
+ &psPMRLocalImportPMROUT->uiSize, &psPMRLocalImportPMROUT->uiAlign);
+ /* Exit early if bridged call fails */
+ if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK))
+ {
+ goto PMRLocalImportPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psPMRLocalImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psPMRLocalImportPMROUT->hPMR,
+ (void *)psPMRInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _PMRLocalImportPMRpsPMRIntRelease);
+ if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRLocalImportPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+PMRLocalImportPMR_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psExtHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hExtHandle, PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ if (psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+ {
+ if (psPMRInt)
+ {
+ LockHandle(KERNEL_HANDLE_BASE);
+ PMRUnrefPMR(psPMRInt);
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRUnrefPMRIN_UI8,
+ IMG_UINT8 * psPMRUnrefPMROUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRUNREFPMR *psPMRUnrefPMRIN =
+ (PVRSRV_BRIDGE_IN_PMRUNREFPMR *) IMG_OFFSET_ADDR(psPMRUnrefPMRIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRUNREFPMR *psPMRUnrefPMROUT =
+ (PVRSRV_BRIDGE_OUT_PMRUNREFPMR *) IMG_OFFSET_ADDR(psPMRUnrefPMROUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psPMRUnrefPMROUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psPMRUnrefPMRIN->hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ if (unlikely((psPMRUnrefPMROUT->eError != PVRSRV_OK) &&
+ (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnrefPMROUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRUnrefPMR_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+PMRUnrefPMR_exit:
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRUnrefUnlockPMRIN_UI8,
+ IMG_UINT8 * psPMRUnrefUnlockPMROUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMRIN =
+ (PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMRIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMROUT =
+ (PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMROUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psPMRUnrefUnlockPMROUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psPMRUnrefUnlockPMRIN->hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ if (unlikely((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK) &&
+ (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnrefUnlockPMROUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRUnrefUnlockPMR_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+PMRUnrefUnlockPMR_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _PhysmemNewRamBackedPMRpsPMRPtrIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PMRUnrefPMR((PMR *) pvData);
+ return eError;
+}
+
+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX,
+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPhysmemNewRamBackedPMRIN_UI8,
+ IMG_UINT8 * psPhysmemNewRamBackedPMROUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMRIN =
+ (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *)
+ IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMRIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMROUT =
+ (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *)
+ IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMROUT_UI8, 0);
+
+ IMG_UINT32 *ui32MappingTableInt = NULL;
+ IMG_CHAR *uiAnnotationInt = NULL;
+ PMR *psPMRPtrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely
+ (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_4K_PAGE_COUNT))
+ {
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+
+ if (unlikely(psPhysmemNewRamBackedPMRIN->ui32AnnotationLength > DEVMEM_ANNOTATION_MAX_LEN))
+ {
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemNewRamBackedPMRIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+ }
+ }
+
+ if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks != 0)
+ {
+ ui32MappingTableInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32MappingTableInt,
+ (const void __user *)psPhysmemNewRamBackedPMRIN->pui32MappingTable,
+ psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) !=
+ PVRSRV_OK)
+ {
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+ }
+ if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength != 0)
+ {
+ uiAnnotationInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiAnnotationInt,
+ (const void __user *)psPhysmemNewRamBackedPMRIN->puiAnnotation,
+ psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) !=
+ PVRSRV_OK)
+ {
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+ ((IMG_CHAR *)
+ uiAnnotationInt)[(psPhysmemNewRamBackedPMRIN->ui32AnnotationLength *
+ sizeof(IMG_CHAR)) - 1] = '\0';
+ }
+
+ psPhysmemNewRamBackedPMROUT->eError =
+ PhysmemNewRamBackedPMR(psConnection, OSGetDevNode(psConnection),
+ psPhysmemNewRamBackedPMRIN->uiSize,
+ psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks,
+ psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks,
+ ui32MappingTableInt,
+ psPhysmemNewRamBackedPMRIN->ui32Log2PageSize,
+ psPhysmemNewRamBackedPMRIN->uiFlags,
+ psPhysmemNewRamBackedPMRIN->ui32AnnotationLength,
+ uiAnnotationInt,
+ psPhysmemNewRamBackedPMRIN->ui32PID,
+ &psPMRPtrInt,
+ psPhysmemNewRamBackedPMRIN->ui32PDumpFlags,
+ &psPhysmemNewRamBackedPMROUT->uiOutFlags);
+ /* Exit early if bridged call fails */
+ if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK))
+ {
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psPhysmemNewRamBackedPMROUT->
+ hPMRPtr,
+ (void *)psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _PhysmemNewRamBackedPMRpsPMRPtrIntRelease);
+ if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+PhysmemNewRamBackedPMR_exit:
+
+ if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ LockHandle(KERNEL_HANDLE_BASE);
+ PMRUnrefPMR(psPMRPtrInt);
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psPhysmemNewRamBackedPMROUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static PVRSRV_ERROR _DevmemIntCtxCreatepsDevMemServerContextIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntCtxCreateIN_UI8,
+ IMG_UINT8 * psDevmemIntCtxCreateOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateOUT_UI8,
+ 0);
+
+ DEVMEMINT_CTX *psDevMemServerContextInt = NULL;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+ psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL;
+
+ psDevmemIntCtxCreateOUT->eError =
+ DevmemIntCtxCreate(psConnection, OSGetDevNode(psConnection),
+ psDevmemIntCtxCreateIN->bbKernelMemoryCtx,
+ &psDevMemServerContextInt,
+ &hPrivDataInt, &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize);
+ /* Exit early if bridged call fails */
+ if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK))
+ {
+ goto DevmemIntCtxCreate_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psDevmemIntCtxCreateOUT->
+ hDevMemServerContext,
+ (void *)
+ psDevMemServerContextInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _DevmemIntCtxCreatepsDevMemServerContextIntRelease);
+ if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntCtxCreate_exit;
+ }
+
+ psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+ &psDevmemIntCtxCreateOUT->
+ hPrivData,
+ (void *)hPrivDataInt,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psDevmemIntCtxCreateOUT->
+ hDevMemServerContext);
+ if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntCtxCreate_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntCtxCreate_exit:
+
+ if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+ {
+ if (psDevmemIntCtxCreateOUT->hDevMemServerContext)
+ {
+ PVRSRV_ERROR eError;
+
+ /* Lock over handle creation cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntCtxCreateOUT->
+ hDevMemServerContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psDevMemServerContextInt = NULL;
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ }
+
+ if (psDevMemServerContextInt)
+ {
+ DevmemIntCtxDestroy(psDevMemServerContextInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntCtxDestroyIN_UI8,
+ IMG_UINT8 * psDevmemIntCtxDestroyOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyOUT_UI8,
+ 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemIntCtxDestroyOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntCtxDestroyIN->
+ hDevmemServerContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ if (unlikely
+ ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK)
+ && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psDevmemIntCtxDestroyOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntCtxDestroy_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntCtxDestroy_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = DevmemIntHeapDestroy((DEVMEMINT_HEAP *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntHeapCreateIN_UI8,
+ IMG_UINT8 * psDevmemIntHeapCreateOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateOUT_UI8,
+ 0);
+
+ IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx;
+ DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+ DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemIntHeapCreateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psDevmemCtxInt,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+ if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntHeapCreate_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemIntHeapCreateOUT->eError =
+ DevmemIntHeapCreate(psDevmemCtxInt,
+ psDevmemIntHeapCreateIN->ui32HeapConfigIndex,
+ psDevmemIntHeapCreateIN->ui32HeapIndex,
+ psDevmemIntHeapCreateIN->sHeapBaseAddr,
+ psDevmemIntHeapCreateIN->ui32Log2DataPageSize, &psDevmemHeapPtrInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK))
+ {
+ goto DevmemIntHeapCreate_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psDevmemIntHeapCreateOUT->
+ hDevmemHeapPtr,
+ (void *)psDevmemHeapPtrInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease);
+ if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntHeapCreate_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntHeapCreate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+ {
+ if (psDevmemHeapPtrInt)
+ {
+ DevmemIntHeapDestroy(psDevmemHeapPtrInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntHeapDestroyIN_UI8,
+ IMG_UINT8 * psDevmemIntHeapDestroyOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *) IMG_OFFSET_ADDR(psDevmemIntHeapDestroyIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *)
+ IMG_OFFSET_ADDR(psDevmemIntHeapDestroyOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemIntHeapDestroyOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ if (unlikely((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) &&
+ (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psDevmemIntHeapDestroyOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntHeapDestroy_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntHeapDestroy_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _DevmemIntMapPMRpsMappingIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = DevmemIntUnmapPMR((DEVMEMINT_MAPPING *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntMapPMRIN_UI8,
+ IMG_UINT8 * psDevmemIntMapPMROUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMRIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMROUT_UI8, 0);
+
+ IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap;
+ DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL;
+ IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation;
+ DEVMEMINT_RESERVATION *psReservationInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR;
+ PMR *psPMRInt = NULL;
+ DEVMEMINT_MAPPING *psMappingInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemIntMapPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psDevmemServerHeapInt,
+ hDevmemServerHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE);
+ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntMapPMR_exit;
+ }
+
+ /* Look up the address from the handle */
+ psDevmemIntMapPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE);
+ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntMapPMR_exit;
+ }
+
+ /* Look up the address from the handle */
+ psDevmemIntMapPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntMapPMR_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemIntMapPMROUT->eError =
+ DevmemIntMapPMR(psDevmemServerHeapInt,
+ psReservationInt,
+ psPMRInt, psDevmemIntMapPMRIN->uiMapFlags, &psMappingInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+ {
+ goto DevmemIntMapPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemIntMapPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psDevmemIntMapPMROUT->hMapping,
+ (void *)psMappingInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _DevmemIntMapPMRpsMappingIntRelease);
+ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntMapPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntMapPMR_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psDevmemServerHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+
+ /* Unreference the previously looked up handle */
+ if (psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ }
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ if (psMappingInt)
+ {
+ DevmemIntUnmapPMR(psMappingInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntUnmapPMRIN_UI8,
+ IMG_UINT8 * psDevmemIntUnmapPMROUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMRIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMROUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemIntUnmapPMROUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntUnmapPMRIN->hMapping,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+ if (unlikely((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) &&
+ (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(psDevmemIntUnmapPMROUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntUnmapPMR_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntUnmapPMR_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _DevmemIntReserveRangepsReservationIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntReserveRangeIN_UI8,
+ IMG_UINT8 * psDevmemIntReserveRangeOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *)
+ IMG_OFFSET_ADDR(psDevmemIntReserveRangeIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *)
+ IMG_OFFSET_ADDR(psDevmemIntReserveRangeOUT_UI8, 0);
+
+ IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeIN->hDevmemServerHeap;
+ DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL;
+ DEVMEMINT_RESERVATION *psReservationInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemIntReserveRangeOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psDevmemServerHeapInt,
+ hDevmemServerHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE);
+ if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntReserveRange_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemIntReserveRangeOUT->eError =
+ DevmemIntReserveRange(psDevmemServerHeapInt,
+ psDevmemIntReserveRangeIN->sAddress,
+ psDevmemIntReserveRangeIN->uiLength, &psReservationInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK))
+ {
+ goto DevmemIntReserveRange_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psDevmemIntReserveRangeOUT->
+ hReservation,
+ (void *)psReservationInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _DevmemIntReserveRangepsReservationIntRelease);
+ if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntReserveRange_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntReserveRange_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psDevmemServerHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+ {
+ if (psReservationInt)
+ {
+ DevmemIntUnreserveRange(psReservationInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntUnreserveRangeIN_UI8,
+ IMG_UINT8 * psDevmemIntUnreserveRangeOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *)
+ IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *)
+ IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemIntUnreserveRangeOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntUnreserveRangeIN->
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ if (unlikely
+ ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK)
+ && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psDevmemIntUnreserveRangeOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntUnreserveRange_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemIntUnreserveRange_exit:
+
+ return 0;
+}
+
+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX,
+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+static_assert(PMR_MAX_SUPPORTED_4K_PAGE_COUNT <= IMG_UINT32_MAX,
+ "PMR_MAX_SUPPORTED_4K_PAGE_COUNT must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psChangeSparseMemIN_UI8,
+ IMG_UINT8 * psChangeSparseMemOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN =
+ (PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT =
+ (PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemOUT_UI8, 0);
+
+ IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap;
+ DEVMEMINT_HEAP *psSrvDevMemHeapInt = NULL;
+ IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR;
+ PMR *psPMRInt = NULL;
+ IMG_UINT32 *ui32AllocPageIndicesInt = NULL;
+ IMG_UINT32 *ui32FreePageIndicesInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0;
+
+ if (unlikely(psChangeSparseMemIN->ui32AllocPageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT))
+ {
+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto ChangeSparseMem_exit;
+ }
+
+ if (unlikely(psChangeSparseMemIN->ui32FreePageCount > PMR_MAX_SUPPORTED_4K_PAGE_COUNT))
+ {
+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto ChangeSparseMem_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto ChangeSparseMem_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psChangeSparseMemIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psChangeSparseMemIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ChangeSparseMem_exit;
+ }
+ }
+ }
+
+ if (psChangeSparseMemIN->ui32AllocPageCount != 0)
+ {
+ ui32AllocPageIndicesInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32AllocPageIndicesInt,
+ (const void __user *)psChangeSparseMemIN->pui32AllocPageIndices,
+ psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ChangeSparseMem_exit;
+ }
+ }
+ if (psChangeSparseMemIN->ui32FreePageCount != 0)
+ {
+ ui32FreePageIndicesInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32FreePageIndicesInt,
+ (const void __user *)psChangeSparseMemIN->pui32FreePageIndices,
+ psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ChangeSparseMem_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psChangeSparseMemOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSrvDevMemHeapInt,
+ hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE);
+ if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto ChangeSparseMem_exit;
+ }
+
+ /* Look up the address from the handle */
+ psChangeSparseMemOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto ChangeSparseMem_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psChangeSparseMemOUT->eError =
+ DevmemIntChangeSparse(psSrvDevMemHeapInt,
+ psPMRInt,
+ psChangeSparseMemIN->ui32AllocPageCount,
+ ui32AllocPageIndicesInt,
+ psChangeSparseMemIN->ui32FreePageCount,
+ ui32FreePageIndicesInt,
+ psChangeSparseMemIN->ui32SparseFlags,
+ psChangeSparseMemIN->uiFlags,
+ psChangeSparseMemIN->sDevVAddr,
+ psChangeSparseMemIN->ui64CPUVAddr);
+
+ChangeSparseMem_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psSrvDevMemHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psChangeSparseMemOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPages(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntMapPagesIN_UI8,
+ IMG_UINT8 * psDevmemIntMapPagesOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *psDevmemIntMapPagesIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntMapPagesIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *psDevmemIntMapPagesOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntMapPagesOUT_UI8, 0);
+
+ IMG_HANDLE hReservation = psDevmemIntMapPagesIN->hReservation;
+ DEVMEMINT_RESERVATION *psReservationInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntMapPagesIN->hPMR;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemIntMapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE);
+ if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntMapPages_exit;
+ }
+
+ /* Look up the address from the handle */
+ psDevmemIntMapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntMapPages_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemIntMapPagesOUT->eError =
+ DevmemIntMapPages(psReservationInt,
+ psPMRInt,
+ psDevmemIntMapPagesIN->ui32PageCount,
+ psDevmemIntMapPagesIN->ui32PhysicalPgOffset,
+ psDevmemIntMapPagesIN->uiFlags, psDevmemIntMapPagesIN->sDevVAddr);
+
+DevmemIntMapPages_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ }
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntUnmapPagesIN_UI8,
+ IMG_UINT8 * psDevmemIntUnmapPagesOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntUnmapPagesIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntUnmapPagesOUT_UI8,
+ 0);
+
+ IMG_HANDLE hReservation = psDevmemIntUnmapPagesIN->hReservation;
+ DEVMEMINT_RESERVATION *psReservationInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemIntUnmapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE);
+ if (unlikely(psDevmemIntUnmapPagesOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntUnmapPages_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemIntUnmapPagesOUT->eError =
+ DevmemIntUnmapPages(psReservationInt,
+ psDevmemIntUnmapPagesIN->sDevVAddr,
+ psDevmemIntUnmapPagesIN->ui32PageCount);
+
+DevmemIntUnmapPages_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIsVDevAddrValidIN_UI8,
+ IMG_UINT8 * psDevmemIsVDevAddrValidOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *)
+ IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *)
+ IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidOUT_UI8, 0);
+
+ IMG_HANDLE hDevmemCtx = psDevmemIsVDevAddrValidIN->hDevmemCtx;
+ DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemIsVDevAddrValidOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psDevmemCtxInt,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+ if (unlikely(psDevmemIsVDevAddrValidOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIsVDevAddrValid_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemIsVDevAddrValidOUT->eError =
+ DevmemIntIsVDevAddrValid(psConnection, OSGetDevNode(psConnection),
+ psDevmemCtxInt, psDevmemIsVDevAddrValidIN->sAddress);
+
+DevmemIsVDevAddrValid_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+#if defined(RGX_FEATURE_FBCDC)
+
+static IMG_INT
+PVRSRVBridgeDevmemInvalidateFBSCTable(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemInvalidateFBSCTableIN_UI8,
+ IMG_UINT8 * psDevmemInvalidateFBSCTableOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE *psDevmemInvalidateFBSCTableIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE *)
+ IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE *psDevmemInvalidateFBSCTableOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE *)
+ IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableOUT_UI8, 0);
+
+ IMG_HANDLE hDevmemCtx = psDevmemInvalidateFBSCTableIN->hDevmemCtx;
+ DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemInvalidateFBSCTableOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psDevmemCtxInt,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+ if (unlikely(psDevmemInvalidateFBSCTableOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemInvalidateFBSCTable_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemInvalidateFBSCTableOUT->eError =
+ DevmemIntInvalidateFBSCTable(psDevmemCtxInt,
+ psDevmemInvalidateFBSCTableIN->ui64FBSCEntries);
+
+DevmemInvalidateFBSCTable_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeDevmemInvalidateFBSCTable NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigCount(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psHeapCfgHeapConfigCountIN_UI8,
+ IMG_UINT8 * psHeapCfgHeapConfigCountOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountIN =
+ (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *)
+ IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountOUT =
+ (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *)
+ IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psHeapCfgHeapConfigCountIN);
+
+ psHeapCfgHeapConfigCountOUT->eError =
+ HeapCfgHeapConfigCount(psConnection, OSGetDevNode(psConnection),
+ &psHeapCfgHeapConfigCountOUT->ui32NumHeapConfigs);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapCount(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psHeapCfgHeapCountIN_UI8,
+ IMG_UINT8 * psHeapCfgHeapCountOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountIN =
+ (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *) IMG_OFFSET_ADDR(psHeapCfgHeapCountIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountOUT =
+ (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *) IMG_OFFSET_ADDR(psHeapCfgHeapCountOUT_UI8, 0);
+
+ psHeapCfgHeapCountOUT->eError =
+ HeapCfgHeapCount(psConnection, OSGetDevNode(psConnection),
+ psHeapCfgHeapCountIN->ui32HeapConfigIndex,
+ &psHeapCfgHeapCountOUT->ui32NumHeaps);
+
+ return 0;
+}
+
+static_assert(DEVMEM_HEAPNAME_MAXLENGTH <= IMG_UINT32_MAX,
+ "DEVMEM_HEAPNAME_MAXLENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psHeapCfgHeapConfigNameIN_UI8,
+ IMG_UINT8 * psHeapCfgHeapConfigNameOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameIN =
+ (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *)
+ IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameOUT =
+ (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *)
+ IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameOUT_UI8, 0);
+
+ IMG_CHAR *puiHeapConfigNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) +
+ 0;
+
+ if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz > DEVMEM_HEAPNAME_MAXLENGTH)
+ {
+ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto HeapCfgHeapConfigName_exit;
+ }
+
+ psHeapCfgHeapConfigNameOUT->puiHeapConfigName =
+ psHeapCfgHeapConfigNameIN->puiHeapConfigName;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto HeapCfgHeapConfigName_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHeapCfgHeapConfigNameIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HeapCfgHeapConfigName_exit;
+ }
+ }
+ }
+
+ if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz != 0)
+ {
+ puiHeapConfigNameInt =
+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR);
+ }
+
+ psHeapCfgHeapConfigNameOUT->eError =
+ HeapCfgHeapConfigName(psConnection, OSGetDevNode(psConnection),
+ psHeapCfgHeapConfigNameIN->ui32HeapConfigIndex,
+ psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz,
+ puiHeapConfigNameInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psHeapCfgHeapConfigNameOUT->eError != PVRSRV_OK))
+ {
+ goto HeapCfgHeapConfigName_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((puiHeapConfigNameInt) &&
+ ((psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psHeapCfgHeapConfigNameOUT->puiHeapConfigName,
+ puiHeapConfigNameInt,
+ (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR))) !=
+ PVRSRV_OK))
+ {
+ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HeapCfgHeapConfigName_exit;
+ }
+ }
+
+HeapCfgHeapConfigName_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psHeapCfgHeapConfigNameOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static_assert(DEVMEM_HEAPNAME_MAXLENGTH <= IMG_UINT32_MAX,
+ "DEVMEM_HEAPNAME_MAXLENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psHeapCfgHeapDetailsIN_UI8,
+ IMG_UINT8 * psHeapCfgHeapDetailsOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsIN =
+ (PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *) IMG_OFFSET_ADDR(psHeapCfgHeapDetailsIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsOUT =
+ (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *) IMG_OFFSET_ADDR(psHeapCfgHeapDetailsOUT_UI8,
+ 0);
+
+ IMG_CHAR *puiHeapNameOutInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) + 0;
+
+ if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz > DEVMEM_HEAPNAME_MAXLENGTH)
+ {
+ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto HeapCfgHeapDetails_exit;
+ }
+
+ psHeapCfgHeapDetailsOUT->puiHeapNameOut = psHeapCfgHeapDetailsIN->puiHeapNameOut;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto HeapCfgHeapDetails_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHeapCfgHeapDetailsIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HeapCfgHeapDetails_exit;
+ }
+ }
+ }
+
+ if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0)
+ {
+ puiHeapNameOutInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR);
+ }
+
+ psHeapCfgHeapDetailsOUT->eError =
+ HeapCfgHeapDetails(psConnection, OSGetDevNode(psConnection),
+ psHeapCfgHeapDetailsIN->ui32HeapConfigIndex,
+ psHeapCfgHeapDetailsIN->ui32HeapIndex,
+ psHeapCfgHeapDetailsIN->ui32HeapNameBufSz,
+ puiHeapNameOutInt,
+ &psHeapCfgHeapDetailsOUT->sDevVAddrBase,
+ &psHeapCfgHeapDetailsOUT->uiHeapLength,
+ &psHeapCfgHeapDetailsOUT->uiReservedRegionLength,
+ &psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut,
+ &psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut);
+ /* Exit early if bridged call fails */
+ if (unlikely(psHeapCfgHeapDetailsOUT->eError != PVRSRV_OK))
+ {
+ goto HeapCfgHeapDetails_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((puiHeapNameOutInt) &&
+ ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psHeapCfgHeapDetailsOUT->puiHeapNameOut,
+ puiHeapNameOutInt,
+ (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK))
+ {
+ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HeapCfgHeapDetails_exit;
+ }
+ }
+
+HeapCfgHeapDetails_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psHeapCfgHeapDetailsOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntRegisterPFNotifyKMIN_UI8,
+ IMG_UINT8 * psDevmemIntRegisterPFNotifyKMOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *)
+ IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *)
+ IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMOUT_UI8, 0);
+
+ IMG_HANDLE hDevmemCtx = psDevmemIntRegisterPFNotifyKMIN->hDevmemCtx;
+ DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemIntRegisterPFNotifyKMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psDevmemCtxInt,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+ if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntRegisterPFNotifyKM_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemIntRegisterPFNotifyKMOUT->eError =
+ DevmemIntRegisterPFNotifyKM(psDevmemCtxInt, psDevmemIntRegisterPFNotifyKMIN->bRegister);
+
+DevmemIntRegisterPFNotifyKM_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX,
+ "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePhysHeapGetMemInfo(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPhysHeapGetMemInfoIN_UI8,
+ IMG_UINT8 * psPhysHeapGetMemInfoOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoIN =
+ (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoOUT =
+ (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoOUT_UI8,
+ 0);
+
+ PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL;
+ PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStatsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) +
+ ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) +
+ 0;
+
+ if (unlikely(psPhysHeapGetMemInfoIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST))
+ {
+ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PhysHeapGetMemInfo_exit;
+ }
+
+ psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats =
+ psPhysHeapGetMemInfoIN->pasapPhysHeapMemStats;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto PhysHeapGetMemInfo_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psPhysHeapGetMemInfoIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfoIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PhysHeapGetMemInfo_exit;
+ }
+ }
+ }
+
+ if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0)
+ {
+ eaPhysHeapIDInt =
+ (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP);
+ }
+
+ /* Copy the data over */
+ if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, eaPhysHeapIDInt,
+ (const void __user *)psPhysHeapGetMemInfoIN->peaPhysHeapID,
+ psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) !=
+ PVRSRV_OK)
+ {
+ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysHeapGetMemInfo_exit;
+ }
+ }
+ if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0)
+ {
+ pasapPhysHeapMemStatsInt =
+ (PHYS_HEAP_MEM_STATS *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS);
+ }
+
+ psPhysHeapGetMemInfoOUT->eError =
+ PVRSRVPhysHeapGetMemInfoKM(psConnection, OSGetDevNode(psConnection),
+ psPhysHeapGetMemInfoIN->ui32PhysHeapCount,
+ eaPhysHeapIDInt, pasapPhysHeapMemStatsInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psPhysHeapGetMemInfoOUT->eError != PVRSRV_OK))
+ {
+ goto PhysHeapGetMemInfo_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((pasapPhysHeapMemStatsInt) &&
+ ((psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats,
+ pasapPhysHeapMemStatsInt,
+ (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS))) !=
+ PVRSRV_OK))
+ {
+ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysHeapGetMemInfo_exit;
+ }
+ }
+
+PhysHeapGetMemInfo_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psPhysHeapGetMemInfoOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetDefaultPhysicalHeap(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psGetDefaultPhysicalHeapIN_UI8,
+ IMG_UINT8 * psGetDefaultPhysicalHeapOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapIN =
+ (PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *)
+ IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapOUT =
+ (PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *)
+ IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psGetDefaultPhysicalHeapIN);
+
+ psGetDefaultPhysicalHeapOUT->eError =
+ PVRSRVGetDefaultPhysicalHeapKM(psConnection, OSGetDevNode(psConnection),
+ &psGetDefaultPhysicalHeapOUT->eHeap);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemGetFaultAddressIN_UI8,
+ IMG_UINT8 * psDevmemGetFaultAddressOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *)
+ IMG_OFFSET_ADDR(psDevmemGetFaultAddressIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *)
+ IMG_OFFSET_ADDR(psDevmemGetFaultAddressOUT_UI8, 0);
+
+ IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx;
+ DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemGetFaultAddressOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psDevmemCtxInt,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+ if (unlikely(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemGetFaultAddress_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemGetFaultAddressOUT->eError =
+ DevmemIntGetFaultAddress(psConnection, OSGetDevNode(psConnection),
+ psDevmemCtxInt, &psDevmemGetFaultAddressOUT->sFaultAddress);
+
+DevmemGetFaultAddress_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+
+static IMG_INT
+PVRSRVBridgePVRSRVStatsUpdateOOMStat(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPVRSRVStatsUpdateOOMStatIN_UI8,
+ IMG_UINT8 * psPVRSRVStatsUpdateOOMStatOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT *psPVRSRVStatsUpdateOOMStatIN =
+ (PVRSRV_BRIDGE_IN_PVRSRVSTATSUPDATEOOMSTAT *)
+ IMG_OFFSET_ADDR(psPVRSRVStatsUpdateOOMStatIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT *psPVRSRVStatsUpdateOOMStatOUT =
+ (PVRSRV_BRIDGE_OUT_PVRSRVSTATSUPDATEOOMSTAT *)
+ IMG_OFFSET_ADDR(psPVRSRVStatsUpdateOOMStatOUT_UI8, 0);
+
+ psPVRSRVStatsUpdateOOMStatOUT->eError =
+ PVRSRVStatsUpdateOOMStat(psConnection, OSGetDevNode(psConnection),
+ psPVRSRVStatsUpdateOOMStatIN->ui32ui32StatType,
+ psPVRSRVStatsUpdateOOMStatIN->ui32pid);
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgePVRSRVStatsUpdateOOMStat NULL
+#endif
+
+static PVRSRV_ERROR _DevmemXIntReserveRangepsReservationIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = DevmemXIntUnreserveRange((DEVMEMXINT_RESERVATION *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemXIntReserveRange(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemXIntReserveRangeIN_UI8,
+ IMG_UINT8 * psDevmemXIntReserveRangeOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE *psDevmemXIntReserveRangeIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMXINTRESERVERANGE *)
+ IMG_OFFSET_ADDR(psDevmemXIntReserveRangeIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE *psDevmemXIntReserveRangeOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTRESERVERANGE *)
+ IMG_OFFSET_ADDR(psDevmemXIntReserveRangeOUT_UI8, 0);
+
+ IMG_HANDLE hDevmemServerHeap = psDevmemXIntReserveRangeIN->hDevmemServerHeap;
+ DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL;
+ DEVMEMXINT_RESERVATION *psReservationInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemXIntReserveRangeOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psDevmemServerHeapInt,
+ hDevmemServerHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE);
+ if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemXIntReserveRange_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemXIntReserveRangeOUT->eError =
+ DevmemXIntReserveRange(psDevmemServerHeapInt,
+ psDevmemXIntReserveRangeIN->sAddress,
+ psDevmemXIntReserveRangeIN->uiLength, &psReservationInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK))
+ {
+ goto DevmemXIntReserveRange_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemXIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psDevmemXIntReserveRangeOUT->
+ hReservation,
+ (void *)psReservationInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _DevmemXIntReserveRangepsReservationIntRelease);
+ if (unlikely(psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemXIntReserveRange_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemXIntReserveRange_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psDevmemServerHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psDevmemXIntReserveRangeOUT->eError != PVRSRV_OK)
+ {
+ if (psReservationInt)
+ {
+ DevmemXIntUnreserveRange(psReservationInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemXIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemXIntUnreserveRangeIN_UI8,
+ IMG_UINT8 * psDevmemXIntUnreserveRangeOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE *psDevmemXIntUnreserveRangeIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMXINTUNRESERVERANGE *)
+ IMG_OFFSET_ADDR(psDevmemXIntUnreserveRangeIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE *psDevmemXIntUnreserveRangeOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTUNRESERVERANGE *)
+ IMG_OFFSET_ADDR(psDevmemXIntUnreserveRangeOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psDevmemXIntUnreserveRangeOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemXIntUnreserveRangeIN->
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION);
+ if (unlikely
+ ((psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_OK)
+ && (psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psDevmemXIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psDevmemXIntUnreserveRangeOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemXIntUnreserveRange_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+DevmemXIntUnreserveRange_exit:
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemXIntMapPages(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemXIntMapPagesIN_UI8,
+ IMG_UINT8 * psDevmemXIntMapPagesOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES *psDevmemXIntMapPagesIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMXINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntMapPagesIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES *psDevmemXIntMapPagesOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntMapPagesOUT_UI8,
+ 0);
+
+ IMG_HANDLE hReservation = psDevmemXIntMapPagesIN->hReservation;
+ DEVMEMXINT_RESERVATION *psReservationInt = NULL;
+ IMG_HANDLE hPMR = psDevmemXIntMapPagesIN->hPMR;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemXIntMapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE);
+ if (unlikely(psDevmemXIntMapPagesOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemXIntMapPages_exit;
+ }
+
+ /* Look up the address from the handle */
+ psDevmemXIntMapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psDevmemXIntMapPagesOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemXIntMapPages_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemXIntMapPagesOUT->eError =
+ DevmemXIntMapPages(psReservationInt,
+ psPMRInt,
+ psDevmemXIntMapPagesIN->ui32PageCount,
+ psDevmemXIntMapPagesIN->ui32PhysPageOffset,
+ psDevmemXIntMapPagesIN->uiFlags,
+ psDevmemXIntMapPagesIN->ui32VirtPageOffset);
+
+DevmemXIntMapPages_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION);
+ }
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDevmemXIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemXIntUnmapPagesIN_UI8,
+ IMG_UINT8 * psDevmemXIntUnmapPagesOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES *psDevmemXIntUnmapPagesIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMXINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemXIntUnmapPagesIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES *psDevmemXIntUnmapPagesOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMXINTUNMAPPAGES *)
+ IMG_OFFSET_ADDR(psDevmemXIntUnmapPagesOUT_UI8, 0);
+
+ IMG_HANDLE hReservation = psDevmemXIntUnmapPagesIN->hReservation;
+ DEVMEMXINT_RESERVATION *psReservationInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemXIntUnmapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION, IMG_TRUE);
+ if (unlikely(psDevmemXIntUnmapPagesOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemXIntUnmapPages_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemXIntUnmapPagesOUT->eError =
+ DevmemXIntUnmapPages(psReservationInt,
+ psDevmemXIntUnmapPagesIN->ui32VirtPageOffset,
+ psDevmemXIntUnmapPagesIN->ui32PageCount);
+
+DevmemXIntUnmapPages_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitMMBridge(void);
+void DeinitMMBridge(void);
+
+/*
+ * Register all MM functions with services
+ */
+PVRSRV_ERROR InitMMBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR,
+ PVRSRVBridgePMRExportPMR, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR,
+ PVRSRVBridgePMRUnexportPMR, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, PVRSRVBridgePMRGetUID,
+ NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE,
+ PVRSRVBridgePMRMakeLocalImportHandle, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE,
+ PVRSRVBridgePMRUnmakeLocalImportHandle, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR,
+ PVRSRVBridgePMRImportPMR, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR,
+ PVRSRVBridgePMRLocalImportPMR, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR,
+ PVRSRVBridgePMRUnrefPMR, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR,
+ PVRSRVBridgePMRUnrefUnlockPMR, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR,
+ PVRSRVBridgePhysmemNewRamBackedPMR, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE,
+ PVRSRVBridgeDevmemIntCtxCreate, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY,
+ PVRSRVBridgeDevmemIntCtxDestroy, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE,
+ PVRSRVBridgeDevmemIntHeapCreate, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY,
+ PVRSRVBridgeDevmemIntHeapDestroy, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR,
+ PVRSRVBridgeDevmemIntMapPMR, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR,
+ PVRSRVBridgeDevmemIntUnmapPMR, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE,
+ PVRSRVBridgeDevmemIntReserveRange, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE,
+ PVRSRVBridgeDevmemIntUnreserveRange, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM,
+ PVRSRVBridgeChangeSparseMem, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES,
+ PVRSRVBridgeDevmemIntMapPages, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES,
+ PVRSRVBridgeDevmemIntUnmapPages, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID,
+ PVRSRVBridgeDevmemIsVDevAddrValid, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE,
+ PVRSRVBridgeDevmemInvalidateFBSCTable, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT,
+ PVRSRVBridgeHeapCfgHeapConfigCount, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT,
+ PVRSRVBridgeHeapCfgHeapCount, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME,
+ PVRSRVBridgeHeapCfgHeapConfigName, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS,
+ PVRSRVBridgeHeapCfgHeapDetails, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM,
+ PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO,
+ PVRSRVBridgePhysHeapGetMemInfo, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP,
+ PVRSRVBridgeGetDefaultPhysicalHeap, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS,
+ PVRSRVBridgeDevmemGetFaultAddress, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT,
+ PVRSRVBridgePVRSRVStatsUpdateOOMStat, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE,
+ PVRSRVBridgeDevmemXIntReserveRange, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE,
+ PVRSRVBridgeDevmemXIntUnreserveRange, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES,
+ PVRSRVBridgeDevmemXIntMapPages, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES,
+ PVRSRVBridgeDevmemXIntUnmapPages, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all mm functions with services
+ */
+void DeinitMMBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVSTATSUPDATEOOMSTAT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTRESERVERANGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNRESERVERANGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTMAPPAGES);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMXINTUNMAPPAGES);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Client bridge header for pdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for pdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PDUMP_BRIDGE_H
+#define CLIENT_PDUMP_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdump_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpImageDescriptor(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32StringSize,
+ const IMG_CHAR * puiFileName,
+ IMG_DEV_VIRTADDR sDataDevAddr,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32LogicalWidth,
+ IMG_UINT32 ui32LogicalHeight,
+ IMG_UINT32 ui32PhysicalWidth,
+ IMG_UINT32 ui32PhysicalHeight,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_MEMLAYOUT eMemLayout,
+ IMG_FB_COMPRESSION eFBCompression,
+ const IMG_UINT32 * pui32FBCClearColour,
+ PDUMP_FBC_SWIZZLE eeFBCSwizzle,
+ IMG_DEV_VIRTADDR sHeaderDevAddr,
+ IMG_UINT32 ui32HeaderSize,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpComment(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32CommentSize,
+ IMG_CHAR * puiComment, IMG_UINT32 ui32Flags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpSetFrame(IMG_HANDLE hBridge, IMG_UINT32 ui32Frame);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpDataDescriptor(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32StringSize,
+ const IMG_CHAR * puiFileName,
+ IMG_DEV_VIRTADDR sDataDevAddr,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32HeaderType,
+ IMG_UINT32 ui32ElementType,
+ IMG_UINT32 ui32ElementCount,
+ IMG_UINT32 ui32PDumpFlags);
+
+#endif /* CLIENT_PDUMP_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Direct client bridge for pdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the client side of the bridge for pdump
+ which is used in calls from Server context.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pdump_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include <powervr/buffer_attribs.h>
+
+#include "devicemem_server.h"
+#include "pdump_km.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpImageDescriptor(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32StringSize,
+ const IMG_CHAR * puiFileName,
+ IMG_DEV_VIRTADDR sDataDevAddr,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32LogicalWidth,
+ IMG_UINT32 ui32LogicalHeight,
+ IMG_UINT32 ui32PhysicalWidth,
+ IMG_UINT32 ui32PhysicalHeight,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_MEMLAYOUT eMemLayout,
+ IMG_FB_COMPRESSION eFBCompression,
+ const IMG_UINT32 * pui32FBCClearColour,
+ PDUMP_FBC_SWIZZLE eeFBCSwizzle,
+ IMG_DEV_VIRTADDR sHeaderDevAddr,
+ IMG_UINT32 ui32HeaderSize,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemCtxInt;
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntPDumpImageDescriptor(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ psDevmemCtxInt,
+ ui32StringSize,
+ puiFileName,
+ sDataDevAddr,
+ ui32DataSize,
+ ui32LogicalWidth,
+ ui32LogicalHeight,
+ ui32PhysicalWidth,
+ ui32PhysicalHeight,
+ ePixelFormat,
+ eMemLayout,
+ eFBCompression,
+ pui32FBCClearColour,
+ eeFBCSwizzle,
+ sHeaderDevAddr, ui32HeaderSize, ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpComment(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32CommentSize,
+ IMG_CHAR * puiComment, IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ PDumpCommentKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32CommentSize, puiComment, ui32Flags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpSetFrame(IMG_HANDLE hBridge, IMG_UINT32 ui32Frame)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PDumpSetFrameKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ui32Frame);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpDataDescriptor(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32StringSize,
+ const IMG_CHAR * puiFileName,
+ IMG_DEV_VIRTADDR sDataDevAddr,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32HeaderType,
+ IMG_UINT32 ui32ElementType,
+ IMG_UINT32 ui32ElementCount,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemCtxInt;
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntPDumpDataDescriptor(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ psDevmemCtxInt,
+ ui32StringSize,
+ puiFileName,
+ sDataDevAddr,
+ ui32DataSize,
+ ui32HeaderType,
+ ui32ElementType, ui32ElementCount, ui32PDumpFlags);
+
+ return eError;
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for pdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for pdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PDUMP_BRIDGE_H
+#define COMMON_PDUMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include <powervr/buffer_attribs.h>
+
+#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST 0
+#define PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3)
+
+/*******************************************
+ PDumpImageDescriptor
+ *******************************************/
+
+/* Bridge in structure for PDumpImageDescriptor */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR_TAG
+{
+ IMG_DEV_VIRTADDR sDataDevAddr;
+ IMG_DEV_VIRTADDR sHeaderDevAddr;
+ IMG_HANDLE hDevmemCtx;
+ const IMG_UINT32 *pui32FBCClearColour;
+ const IMG_CHAR *puiFileName;
+ IMG_FB_COMPRESSION eFBCompression;
+ IMG_MEMLAYOUT eMemLayout;
+ PDUMP_PIXEL_FORMAT ePixelFormat;
+ PDUMP_FBC_SWIZZLE eeFBCSwizzle;
+ IMG_UINT32 ui32DataSize;
+ IMG_UINT32 ui32HeaderSize;
+ IMG_UINT32 ui32LogicalHeight;
+ IMG_UINT32 ui32LogicalWidth;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32PhysicalHeight;
+ IMG_UINT32 ui32PhysicalWidth;
+ IMG_UINT32 ui32StringSize;
+} __packed PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR;
+
+/* Bridge out structure for PDumpImageDescriptor */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR;
+
+/*******************************************
+ PVRSRVPDumpComment
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpComment */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT_TAG
+{
+ IMG_CHAR *puiComment;
+ IMG_UINT32 ui32CommentSize;
+ IMG_UINT32 ui32Flags;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT;
+
+/* Bridge out structure for PVRSRVPDumpComment */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT;
+
+/*******************************************
+ PVRSRVPDumpSetFrame
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpSetFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME_TAG
+{
+ IMG_UINT32 ui32Frame;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME;
+
+/* Bridge out structure for PVRSRVPDumpSetFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME;
+
+/*******************************************
+ PDumpDataDescriptor
+ *******************************************/
+
+/* Bridge in structure for PDumpDataDescriptor */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR_TAG
+{
+ IMG_DEV_VIRTADDR sDataDevAddr;
+ IMG_HANDLE hDevmemCtx;
+ const IMG_CHAR *puiFileName;
+ IMG_UINT32 ui32DataSize;
+ IMG_UINT32 ui32ElementCount;
+ IMG_UINT32 ui32ElementType;
+ IMG_UINT32 ui32HeaderType;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32StringSize;
+} __packed PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR;
+
+/* Bridge out structure for PDumpDataDescriptor */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR;
+
+#endif /* COMMON_PDUMP_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for pdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for pdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pdump_km.h"
+
+#include "common_pdump_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static_assert(PVRSRV_PDUMP_MAX_FILENAME_SIZE <= IMG_UINT32_MAX,
+ "PVRSRV_PDUMP_MAX_FILENAME_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(4 <= IMG_UINT32_MAX, "4 must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPDumpImageDescriptorIN_UI8,
+ IMG_UINT8 * psPDumpImageDescriptorOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR *psPDumpImageDescriptorIN =
+ (PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR *) IMG_OFFSET_ADDR(psPDumpImageDescriptorIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR *psPDumpImageDescriptorOUT =
+ (PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR *)
+ IMG_OFFSET_ADDR(psPDumpImageDescriptorOUT_UI8, 0);
+
+ IMG_HANDLE hDevmemCtx = psPDumpImageDescriptorIN->hDevmemCtx;
+ DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+ IMG_CHAR *uiFileNameInt = NULL;
+ IMG_UINT32 *ui32FBCClearColourInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) +
+ ((IMG_UINT64) 4 * sizeof(IMG_UINT32)) + 0;
+
+ if (unlikely(psPDumpImageDescriptorIN->ui32StringSize > PVRSRV_PDUMP_MAX_FILENAME_SIZE))
+ {
+ psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PDumpImageDescriptor_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto PDumpImageDescriptor_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psPDumpImageDescriptorIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPDumpImageDescriptorIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PDumpImageDescriptor_exit;
+ }
+ }
+ }
+
+ if (psPDumpImageDescriptorIN->ui32StringSize != 0)
+ {
+ uiFileNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiFileNameInt,
+ (const void __user *)psPDumpImageDescriptorIN->puiFileName,
+ psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PDumpImageDescriptor_exit;
+ }
+ ((IMG_CHAR *)
+ uiFileNameInt)[(psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ {
+ ui32FBCClearColourInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += 4 * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (4 * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32FBCClearColourInt,
+ (const void __user *)psPDumpImageDescriptorIN->pui32FBCClearColour,
+ 4 * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PDumpImageDescriptor_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPDumpImageDescriptorOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psDevmemCtxInt,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+ if (unlikely(psPDumpImageDescriptorOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PDumpImageDescriptor_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPDumpImageDescriptorOUT->eError =
+ DevmemIntPDumpImageDescriptor(psConnection, OSGetDevNode(psConnection),
+ psDevmemCtxInt,
+ psPDumpImageDescriptorIN->ui32StringSize,
+ uiFileNameInt,
+ psPDumpImageDescriptorIN->sDataDevAddr,
+ psPDumpImageDescriptorIN->ui32DataSize,
+ psPDumpImageDescriptorIN->ui32LogicalWidth,
+ psPDumpImageDescriptorIN->ui32LogicalHeight,
+ psPDumpImageDescriptorIN->ui32PhysicalWidth,
+ psPDumpImageDescriptorIN->ui32PhysicalHeight,
+ psPDumpImageDescriptorIN->ePixelFormat,
+ psPDumpImageDescriptorIN->eMemLayout,
+ psPDumpImageDescriptorIN->eFBCompression,
+ ui32FBCClearColourInt,
+ psPDumpImageDescriptorIN->eeFBCSwizzle,
+ psPDumpImageDescriptorIN->sHeaderDevAddr,
+ psPDumpImageDescriptorIN->ui32HeaderSize,
+ psPDumpImageDescriptorIN->ui32PDumpFlags);
+
+PDumpImageDescriptor_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psPDumpImageDescriptorOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static_assert(PVRSRV_PDUMP_MAX_COMMENT_SIZE <= IMG_UINT32_MAX,
+ "PVRSRV_PDUMP_MAX_COMMENT_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPVRSRVPDumpCommentIN_UI8,
+ IMG_UINT8 * psPVRSRVPDumpCommentOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentIN =
+ (PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT *) IMG_OFFSET_ADDR(psPVRSRVPDumpCommentIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentOUT =
+ (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT *) IMG_OFFSET_ADDR(psPVRSRVPDumpCommentOUT_UI8,
+ 0);
+
+ IMG_CHAR *uiCommentInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psPVRSRVPDumpCommentIN->ui32CommentSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psPVRSRVPDumpCommentIN->ui32CommentSize > PVRSRV_PDUMP_MAX_COMMENT_SIZE))
+ {
+ psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PVRSRVPDumpComment_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto PVRSRVPDumpComment_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psPVRSRVPDumpCommentIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPVRSRVPDumpCommentIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PVRSRVPDumpComment_exit;
+ }
+ }
+ }
+
+ if (psPVRSRVPDumpCommentIN->ui32CommentSize != 0)
+ {
+ uiCommentInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psPVRSRVPDumpCommentIN->ui32CommentSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPVRSRVPDumpCommentIN->ui32CommentSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiCommentInt, (const void __user *)psPVRSRVPDumpCommentIN->puiComment,
+ psPVRSRVPDumpCommentIN->ui32CommentSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PVRSRVPDumpComment_exit;
+ }
+ ((IMG_CHAR *)
+ uiCommentInt)[(psPVRSRVPDumpCommentIN->ui32CommentSize * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ psPVRSRVPDumpCommentOUT->eError =
+ PDumpCommentKM(psConnection, OSGetDevNode(psConnection),
+ psPVRSRVPDumpCommentIN->ui32CommentSize,
+ uiCommentInt, psPVRSRVPDumpCommentIN->ui32Flags);
+
+PVRSRVPDumpComment_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psPVRSRVPDumpCommentOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpSetFrame(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPVRSRVPDumpSetFrameIN_UI8,
+ IMG_UINT8 * psPVRSRVPDumpSetFrameOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameIN =
+ (PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME *) IMG_OFFSET_ADDR(psPVRSRVPDumpSetFrameIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameOUT =
+ (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME *) IMG_OFFSET_ADDR(psPVRSRVPDumpSetFrameOUT_UI8,
+ 0);
+
+ psPVRSRVPDumpSetFrameOUT->eError =
+ PDumpSetFrameKM(psConnection, OSGetDevNode(psConnection),
+ psPVRSRVPDumpSetFrameIN->ui32Frame);
+
+ return 0;
+}
+
+static_assert(PVRSRV_PDUMP_MAX_FILENAME_SIZE <= IMG_UINT32_MAX,
+ "PVRSRV_PDUMP_MAX_FILENAME_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePDumpDataDescriptor(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPDumpDataDescriptorIN_UI8,
+ IMG_UINT8 * psPDumpDataDescriptorOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR *psPDumpDataDescriptorIN =
+ (PVRSRV_BRIDGE_IN_PDUMPDATADESCRIPTOR *) IMG_OFFSET_ADDR(psPDumpDataDescriptorIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR *psPDumpDataDescriptorOUT =
+ (PVRSRV_BRIDGE_OUT_PDUMPDATADESCRIPTOR *) IMG_OFFSET_ADDR(psPDumpDataDescriptorOUT_UI8,
+ 0);
+
+ IMG_HANDLE hDevmemCtx = psPDumpDataDescriptorIN->hDevmemCtx;
+ DEVMEMINT_CTX *psDevmemCtxInt = NULL;
+ IMG_CHAR *uiFileNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psPDumpDataDescriptorIN->ui32StringSize > PVRSRV_PDUMP_MAX_FILENAME_SIZE))
+ {
+ psPDumpDataDescriptorOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PDumpDataDescriptor_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psPDumpDataDescriptorOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto PDumpDataDescriptor_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psPDumpDataDescriptorIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPDumpDataDescriptorIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psPDumpDataDescriptorOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PDumpDataDescriptor_exit;
+ }
+ }
+ }
+
+ if (psPDumpDataDescriptorIN->ui32StringSize != 0)
+ {
+ uiFileNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiFileNameInt, (const void __user *)psPDumpDataDescriptorIN->puiFileName,
+ psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psPDumpDataDescriptorOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PDumpDataDescriptor_exit;
+ }
+ ((IMG_CHAR *)
+ uiFileNameInt)[(psPDumpDataDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPDumpDataDescriptorOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psDevmemCtxInt,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+ if (unlikely(psPDumpDataDescriptorOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PDumpDataDescriptor_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPDumpDataDescriptorOUT->eError =
+ DevmemIntPDumpDataDescriptor(psConnection, OSGetDevNode(psConnection),
+ psDevmemCtxInt,
+ psPDumpDataDescriptorIN->ui32StringSize,
+ uiFileNameInt,
+ psPDumpDataDescriptorIN->sDataDevAddr,
+ psPDumpDataDescriptorIN->ui32DataSize,
+ psPDumpDataDescriptorIN->ui32HeaderType,
+ psPDumpDataDescriptorIN->ui32ElementType,
+ psPDumpDataDescriptorIN->ui32ElementCount,
+ psPDumpDataDescriptorIN->ui32PDumpFlags);
+
+PDumpDataDescriptor_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psPDumpDataDescriptorOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitPDUMPBridge(void);
+void DeinitPDUMPBridge(void);
+
+/*
+ * Register all PDUMP functions with services
+ */
+PVRSRV_ERROR InitPDUMPBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR,
+ PVRSRVBridgePDumpImageDescriptor, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT,
+ PVRSRVBridgePVRSRVPDumpComment, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME,
+ PVRSRVBridgePVRSRVPDumpSetFrame, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR,
+ PVRSRVBridgePDumpDataDescriptor, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdump functions with services
+ */
+void DeinitPDUMPBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPDATADESCRIPTOR);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Client bridge header for pdumpctrl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for pdumpctrl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PDUMPCTRL_BRIDGE_H
+#define CLIENT_PDUMPCTRL_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdumpctrl_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpGetState(IMG_HANDLE hBridge, IMG_UINT64 * pui64State);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpGetFrame(IMG_HANDLE hBridge, IMG_UINT32 * pui32Frame);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32Mode,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32Interval,
+ IMG_UINT32 ui32MaxParamFileSize,
+ IMG_UINT32 ui32AutoTermTimeout);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge,
+ IMG_BOOL * pbpbIsLastCaptureFrame);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpForceCaptureStop(IMG_HANDLE hBridge);
+
+#endif /* CLIENT_PDUMPCTRL_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Direct client bridge for pdumpctrl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the client side of the bridge for pdumpctrl
+ which is used in calls from Server context.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pdumpctrl_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+
+#include "pdump_km.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpGetState(IMG_HANDLE hBridge, IMG_UINT64 * pui64State)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ eError = PDumpGetStateKM(pui64State);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpGetFrame(IMG_HANDLE hBridge, IMG_UINT32 * pui32Frame)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PDumpGetFrameKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), pui32Frame);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32Mode,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32Interval,
+ IMG_UINT32 ui32MaxParamFileSize,
+ IMG_UINT32 ui32AutoTermTimeout)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ PDumpSetDefaultCaptureParamsKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32Mode,
+ ui32Start,
+ ui32End,
+ ui32Interval, ui32MaxParamFileSize, ui32AutoTermTimeout);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge,
+ IMG_BOOL * pbpbIsLastCaptureFrame)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ eError = PDumpIsLastCaptureFrameKM(pbpbIsLastCaptureFrame);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVPDumpForceCaptureStop(IMG_HANDLE hBridge)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PDumpForceCaptureStopKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge));
+ return eError;
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for pdumpctrl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for pdumpctrl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PDUMPCTRL_BRIDGE_H
+#define COMMON_PDUMPCTRL_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST (PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+4)
+
+/*******************************************
+ PVRSRVPDumpGetState
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpGetState */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE;
+
+/* Bridge out structure for PVRSRVPDumpGetState */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE_TAG
+{
+ IMG_UINT64 ui64State;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE;
+
+/*******************************************
+ PVRSRVPDumpGetFrame
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpGetFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME;
+
+/* Bridge out structure for PVRSRVPDumpGetFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Frame;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME;
+
+/*******************************************
+ PVRSRVPDumpSetDefaultCaptureParams
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpSetDefaultCaptureParams */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
+{
+ IMG_UINT32 ui32AutoTermTimeout;
+ IMG_UINT32 ui32End;
+ IMG_UINT32 ui32Interval;
+ IMG_UINT32 ui32MaxParamFileSize;
+ IMG_UINT32 ui32Mode;
+ IMG_UINT32 ui32Start;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS;
+
+/* Bridge out structure for PVRSRVPDumpSetDefaultCaptureParams */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS;
+
+/*******************************************
+ PVRSRVPDumpIsLastCaptureFrame
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpIsLastCaptureFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME;
+
+/* Bridge out structure for PVRSRVPDumpIsLastCaptureFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_BOOL bpbIsLastCaptureFrame;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME;
+
+/*******************************************
+ PVRSRVPDumpForceCaptureStop
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpForceCaptureStop */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP;
+
+/* Bridge out structure for PVRSRVPDumpForceCaptureStop */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP;
+
+#endif /* COMMON_PDUMPCTRL_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for pdumpctrl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for pdumpctrl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pdump_km.h"
+
+#include "common_pdumpctrl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpGetState(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPVRSRVPDumpGetStateIN_UI8,
+ IMG_UINT8 * psPVRSRVPDumpGetStateOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE *psPVRSRVPDumpGetStateIN =
+ (PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE *) IMG_OFFSET_ADDR(psPVRSRVPDumpGetStateIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE *psPVRSRVPDumpGetStateOUT =
+ (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE *) IMG_OFFSET_ADDR(psPVRSRVPDumpGetStateOUT_UI8,
+ 0);
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetStateIN);
+
+ psPVRSRVPDumpGetStateOUT->eError = PDumpGetStateKM(&psPVRSRVPDumpGetStateOUT->ui64State);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpGetFrame(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPVRSRVPDumpGetFrameIN_UI8,
+ IMG_UINT8 * psPVRSRVPDumpGetFrameOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameIN =
+ (PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME *) IMG_OFFSET_ADDR(psPVRSRVPDumpGetFrameIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameOUT =
+ (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME *) IMG_OFFSET_ADDR(psPVRSRVPDumpGetFrameOUT_UI8,
+ 0);
+
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetFrameIN);
+
+ psPVRSRVPDumpGetFrameOUT->eError =
+ PDumpGetFrameKM(psConnection, OSGetDevNode(psConnection),
+ &psPVRSRVPDumpGetFrameOUT->ui32Frame);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 *
+ psPVRSRVPDumpSetDefaultCaptureParamsIN_UI8,
+ IMG_UINT8 *
+ psPVRSRVPDumpSetDefaultCaptureParamsOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *psPVRSRVPDumpSetDefaultCaptureParamsIN
+ =
+ (PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *)
+ IMG_OFFSET_ADDR(psPVRSRVPDumpSetDefaultCaptureParamsIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS
+ *psPVRSRVPDumpSetDefaultCaptureParamsOUT =
+ (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *)
+ IMG_OFFSET_ADDR(psPVRSRVPDumpSetDefaultCaptureParamsOUT_UI8, 0);
+
+ psPVRSRVPDumpSetDefaultCaptureParamsOUT->eError =
+ PDumpSetDefaultCaptureParamsKM(psConnection, OSGetDevNode(psConnection),
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Mode,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Start,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32End,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Interval,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->
+ ui32MaxParamFileSize,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->
+ ui32AutoTermTimeout);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPVRSRVPDumpIsLastCaptureFrameIN_UI8,
+ IMG_UINT8 * psPVRSRVPDumpIsLastCaptureFrameOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME *psPVRSRVPDumpIsLastCaptureFrameIN =
+ (PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME *)
+ IMG_OFFSET_ADDR(psPVRSRVPDumpIsLastCaptureFrameIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME *psPVRSRVPDumpIsLastCaptureFrameOUT =
+ (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME *)
+ IMG_OFFSET_ADDR(psPVRSRVPDumpIsLastCaptureFrameOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpIsLastCaptureFrameIN);
+
+ psPVRSRVPDumpIsLastCaptureFrameOUT->eError =
+ PDumpIsLastCaptureFrameKM(&psPVRSRVPDumpIsLastCaptureFrameOUT->bpbIsLastCaptureFrame);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpForceCaptureStop(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPVRSRVPDumpForceCaptureStopIN_UI8,
+ IMG_UINT8 * psPVRSRVPDumpForceCaptureStopOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP *psPVRSRVPDumpForceCaptureStopIN =
+ (PVRSRV_BRIDGE_IN_PVRSRVPDUMPFORCECAPTURESTOP *)
+ IMG_OFFSET_ADDR(psPVRSRVPDumpForceCaptureStopIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP *psPVRSRVPDumpForceCaptureStopOUT =
+ (PVRSRV_BRIDGE_OUT_PVRSRVPDUMPFORCECAPTURESTOP *)
+ IMG_OFFSET_ADDR(psPVRSRVPDumpForceCaptureStopOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpForceCaptureStopIN);
+
+ psPVRSRVPDumpForceCaptureStopOUT->eError =
+ PDumpForceCaptureStopKM(psConnection, OSGetDevNode(psConnection));
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pPDUMPCTRLBridgeLock;
+
+PVRSRV_ERROR InitPDUMPCTRLBridge(void);
+void DeinitPDUMPCTRLBridge(void);
+
+/*
+ * Register all PDUMPCTRL functions with services
+ */
+PVRSRV_ERROR InitPDUMPCTRLBridge(void)
+{
+ PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pPDUMPCTRLBridgeLock), "OSLockCreate");
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE,
+ PVRSRVBridgePVRSRVPDumpGetState, pPDUMPCTRLBridgeLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME,
+ PVRSRVBridgePVRSRVPDumpGetFrame, pPDUMPCTRLBridgeLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+ PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS,
+ PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams, pPDUMPCTRLBridgeLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+ PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME,
+ PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame, pPDUMPCTRLBridgeLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+ PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP,
+ PVRSRVBridgePVRSRVPDumpForceCaptureStop, pPDUMPCTRLBridgeLock);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdumpctrl functions with services
+ */
+void DeinitPDUMPCTRLBridge(void)
+{
+ OSLockDestroy(pPDUMPCTRLBridgeLock);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+ PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+ PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+ PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+ PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL,
+ PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPFORCECAPTURESTOP);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Client bridge header for pdumpmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for pdumpmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PDUMPMM_BRIDGE_H
+#define CLIENT_PDUMPMM_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdumpmm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpLoadMem(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32PDumpFlags, IMG_BOOL bbZero);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpLoadMemValue32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpLoadMemValue64(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpSaveToFile(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR * puiFileName,
+ IMG_UINT32 ui32uiFileOffset);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpSymbolicAddr(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32MemspaceNameLen,
+ IMG_CHAR * puiMemspaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR * puiSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T * puiNewOffset,
+ IMG_DEVMEM_OFFSET_T * puiNextSymName);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpPol32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpCheck32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpCBP(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerContext,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR * puiFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PDumpFlags);
+
+#endif /* CLIENT_PDUMPMM_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Direct client bridge for pdumpmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the client side of the bridge for pdumpmm
+ which is used in calls from Server context.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pdumpmm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "physmem.h"
+#include "pdump_physmem.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpLoadMem(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32PDumpFlags, IMG_BOOL bbZero)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError = PMRPDumpLoadMem(psPMRInt, uiOffset, uiSize, ui32PDumpFlags, bbZero);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpLoadMemValue32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError = PMRPDumpLoadMemValue32(psPMRInt, uiOffset, ui32Value, ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpLoadMemValue64(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError = PMRPDumpLoadMemValue64(psPMRInt, uiOffset, ui64Value, ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpSaveToFile(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR * puiFileName,
+ IMG_UINT32 ui32uiFileOffset)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpSaveToFile(psPMRInt,
+ uiOffset, uiSize, ui32ArraySize, puiFileName, ui32uiFileOffset);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpSymbolicAddr(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32MemspaceNameLen,
+ IMG_CHAR * puiMemspaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR * puiSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T * puiNewOffset,
+ IMG_DEVMEM_OFFSET_T * puiNextSymName)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMR_PDumpSymbolicAddr(psPMRInt,
+ uiOffset,
+ ui32MemspaceNameLen,
+ puiMemspaceName,
+ ui32SymbolicAddrLen,
+ puiSymbolicAddr, puiNewOffset, puiNextSymName);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpPol32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError = PMRPDumpPol32(psPMRInt, uiOffset, ui32Value, ui32Mask, eOperator, ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpCheck32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpCheck32(psPMRInt, uiOffset, ui32Value, ui32Mask, eOperator, ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePMRPDumpCBP(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError = PMRPDumpCBP(psPMRInt, uiReadOffset, uiWriteOffset, uiPacketSize, uiBufferSize);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerContext,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR * puiFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemServerContextInt;
+
+ psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+ eError =
+ DevmemIntPDumpSaveToFileVirtual(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ psDevmemServerContextInt,
+ sAddress,
+ uiSize,
+ ui32ArraySize,
+ puiFileName, ui32FileOffset, ui32PDumpFlags);
+
+ return eError;
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for pdumpmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for pdumpmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PDUMPMM_BRIDGE_H
+#define COMMON_PDUMPMM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#define PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+8
+#define PVRSRV_BRIDGE_PDUMPMM_CMD_LAST (PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+8)
+
+/*******************************************
+ PMRPDumpLoadMem
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMem */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM_TAG
+{
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_HANDLE hPMR;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_BOOL bbZero;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM;
+
+/* Bridge out structure for PMRPDumpLoadMem */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM;
+
+/*******************************************
+ PMRPDumpLoadMemValue32
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMemValue32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32_TAG
+{
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_HANDLE hPMR;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32Value;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32;
+
+/* Bridge out structure for PMRPDumpLoadMemValue32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32;
+
+/*******************************************
+ PMRPDumpLoadMemValue64
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMemValue64 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64_TAG
+{
+ IMG_UINT64 ui64Value;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_HANDLE hPMR;
+ IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64;
+
+/* Bridge out structure for PMRPDumpLoadMemValue64 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64;
+
+/*******************************************
+ PMRPDumpSaveToFile
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpSaveToFile */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE_TAG
+{
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_HANDLE hPMR;
+ const IMG_CHAR *puiFileName;
+ IMG_UINT32 ui32ArraySize;
+ IMG_UINT32 ui32uiFileOffset;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE;
+
+/* Bridge out structure for PMRPDumpSaveToFile */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE;
+
+/*******************************************
+ PMRPDumpSymbolicAddr
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpSymbolicAddr */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR_TAG
+{
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_HANDLE hPMR;
+ IMG_CHAR *puiMemspaceName;
+ IMG_CHAR *puiSymbolicAddr;
+ IMG_UINT32 ui32MemspaceNameLen;
+ IMG_UINT32 ui32SymbolicAddrLen;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR;
+
+/* Bridge out structure for PMRPDumpSymbolicAddr */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR_TAG
+{
+ IMG_DEVMEM_OFFSET_T uiNewOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ IMG_CHAR *puiMemspaceName;
+ IMG_CHAR *puiSymbolicAddr;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR;
+
+/*******************************************
+ PMRPDumpPol32
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpPol32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPPOL32_TAG
+{
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_HANDLE hPMR;
+ PDUMP_POLL_OPERATOR eOperator;
+ IMG_UINT32 ui32Mask;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32Value;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPPOL32;
+
+/* Bridge out structure for PMRPDumpPol32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32;
+
+/*******************************************
+ PMRPDumpCheck32
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpCheck32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32_TAG
+{
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_HANDLE hPMR;
+ PDUMP_POLL_OPERATOR eOperator;
+ IMG_UINT32 ui32Mask;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32Value;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32;
+
+/* Bridge out structure for PMRPDumpCheck32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32;
+
+/*******************************************
+ PMRPDumpCBP
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPCBP_TAG
+{
+ IMG_DEVMEM_SIZE_T uiBufferSize;
+ IMG_DEVMEM_SIZE_T uiPacketSize;
+ IMG_DEVMEM_OFFSET_T uiReadOffset;
+ IMG_DEVMEM_OFFSET_T uiWriteOffset;
+ IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_PMRPDUMPCBP;
+
+/* Bridge out structure for PMRPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPCBP_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PMRPDUMPCBP;
+
+/*******************************************
+ DevmemIntPDumpSaveToFileVirtual
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPDumpSaveToFileVirtual */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG
+{
+ IMG_DEV_VIRTADDR sAddress;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_HANDLE hDevmemServerContext;
+ const IMG_CHAR *puiFileName;
+ IMG_UINT32 ui32ArraySize;
+ IMG_UINT32 ui32FileOffset;
+ IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL;
+
+/* Bridge out structure for DevmemIntPDumpSaveToFileVirtual */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL;
+
+#endif /* COMMON_PDUMPMM_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for pdumpmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for pdumpmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "physmem.h"
+#include "pdump_physmem.h"
+
+#include "common_pdumpmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMem(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRPDumpLoadMemIN_UI8,
+ IMG_UINT8 * psPMRPDumpLoadMemOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM *psPMRPDumpLoadMemIN =
+ (PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM *) IMG_OFFSET_ADDR(psPMRPDumpLoadMemIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM *psPMRPDumpLoadMemOUT =
+ (PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM *) IMG_OFFSET_ADDR(psPMRPDumpLoadMemOUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psPMRPDumpLoadMemIN->hPMR;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPMRPDumpLoadMemOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psPMRPDumpLoadMemOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRPDumpLoadMem_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPMRPDumpLoadMemOUT->eError =
+ PMRPDumpLoadMem(psPMRInt,
+ psPMRPDumpLoadMemIN->uiOffset,
+ psPMRPDumpLoadMemIN->uiSize,
+ psPMRPDumpLoadMemIN->ui32PDumpFlags, psPMRPDumpLoadMemIN->bbZero);
+
+PMRPDumpLoadMem_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMemValue32(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRPDumpLoadMemValue32IN_UI8,
+ IMG_UINT8 * psPMRPDumpLoadMemValue32OUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32IN =
+ (PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32 *)
+ IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue32IN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32OUT =
+ (PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32 *)
+ IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue32OUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psPMRPDumpLoadMemValue32IN->hPMR;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPMRPDumpLoadMemValue32OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psPMRPDumpLoadMemValue32OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRPDumpLoadMemValue32_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPMRPDumpLoadMemValue32OUT->eError =
+ PMRPDumpLoadMemValue32(psPMRInt,
+ psPMRPDumpLoadMemValue32IN->uiOffset,
+ psPMRPDumpLoadMemValue32IN->ui32Value,
+ psPMRPDumpLoadMemValue32IN->ui32PDumpFlags);
+
+PMRPDumpLoadMemValue32_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMemValue64(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRPDumpLoadMemValue64IN_UI8,
+ IMG_UINT8 * psPMRPDumpLoadMemValue64OUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64IN =
+ (PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64 *)
+ IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue64IN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64OUT =
+ (PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64 *)
+ IMG_OFFSET_ADDR(psPMRPDumpLoadMemValue64OUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psPMRPDumpLoadMemValue64IN->hPMR;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPMRPDumpLoadMemValue64OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psPMRPDumpLoadMemValue64OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRPDumpLoadMemValue64_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPMRPDumpLoadMemValue64OUT->eError =
+ PMRPDumpLoadMemValue64(psPMRInt,
+ psPMRPDumpLoadMemValue64IN->uiOffset,
+ psPMRPDumpLoadMemValue64IN->ui64Value,
+ psPMRPDumpLoadMemValue64IN->ui32PDumpFlags);
+
+PMRPDumpLoadMemValue64_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static_assert(PVRSRV_PDUMP_MAX_FILENAME_SIZE <= IMG_UINT32_MAX,
+ "PVRSRV_PDUMP_MAX_FILENAME_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRPDumpSaveToFileIN_UI8,
+ IMG_UINT8 * psPMRPDumpSaveToFileOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileIN =
+ (PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE *) IMG_OFFSET_ADDR(psPMRPDumpSaveToFileIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileOUT =
+ (PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE *) IMG_OFFSET_ADDR(psPMRPDumpSaveToFileOUT_UI8,
+ 0);
+
+ IMG_HANDLE hPMR = psPMRPDumpSaveToFileIN->hPMR;
+ PMR *psPMRInt = NULL;
+ IMG_CHAR *uiFileNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psPMRPDumpSaveToFileIN->ui32ArraySize > PVRSRV_PDUMP_MAX_FILENAME_SIZE))
+ {
+ psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PMRPDumpSaveToFile_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto PMRPDumpSaveToFile_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psPMRPDumpSaveToFileIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPMRPDumpSaveToFileIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PMRPDumpSaveToFile_exit;
+ }
+ }
+ }
+
+ if (psPMRPDumpSaveToFileIN->ui32ArraySize != 0)
+ {
+ uiFileNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiFileNameInt, (const void __user *)psPMRPDumpSaveToFileIN->puiFileName,
+ psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PMRPDumpSaveToFile_exit;
+ }
+ ((IMG_CHAR *)
+ uiFileNameInt)[(psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPMRPDumpSaveToFileOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psPMRPDumpSaveToFileOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRPDumpSaveToFile_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPMRPDumpSaveToFileOUT->eError =
+ PMRPDumpSaveToFile(psPMRInt,
+ psPMRPDumpSaveToFileIN->uiOffset,
+ psPMRPDumpSaveToFileIN->uiSize,
+ psPMRPDumpSaveToFileIN->ui32ArraySize,
+ uiFileNameInt, psPMRPDumpSaveToFileIN->ui32uiFileOffset);
+
+PMRPDumpSaveToFile_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psPMRPDumpSaveToFileOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static_assert(PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH <= IMG_UINT32_MAX,
+ "PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH must not be larger than IMG_UINT32_MAX");
+static_assert(PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH <= IMG_UINT32_MAX,
+ "PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRPDumpSymbolicAddrIN_UI8,
+ IMG_UINT8 * psPMRPDumpSymbolicAddrOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrIN =
+ (PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR *) IMG_OFFSET_ADDR(psPMRPDumpSymbolicAddrIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrOUT =
+ (PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR *)
+ IMG_OFFSET_ADDR(psPMRPDumpSymbolicAddrOUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psPMRPDumpSymbolicAddrIN->hPMR;
+ PMR *psPMRInt = NULL;
+ IMG_CHAR *puiMemspaceNameInt = NULL;
+ IMG_CHAR *puiSymbolicAddrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR)) +
+ ((IMG_UINT64) psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR)) + 0;
+
+ if (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen > PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH)
+ {
+ psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+
+ if (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen > PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH)
+ {
+ psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+
+ psPMRPDumpSymbolicAddrOUT->puiMemspaceName = psPMRPDumpSymbolicAddrIN->puiMemspaceName;
+ psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr = psPMRPDumpSymbolicAddrIN->puiSymbolicAddr;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psPMRPDumpSymbolicAddrIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPMRPDumpSymbolicAddrIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+ }
+ }
+
+ if (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen != 0)
+ {
+ puiMemspaceNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR);
+ }
+
+ if (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen != 0)
+ {
+ puiSymbolicAddrInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR);
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPMRPDumpSymbolicAddrOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psPMRPDumpSymbolicAddrOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPMRPDumpSymbolicAddrOUT->eError =
+ PMR_PDumpSymbolicAddr(psPMRInt,
+ psPMRPDumpSymbolicAddrIN->uiOffset,
+ psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen,
+ puiMemspaceNameInt,
+ psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen,
+ puiSymbolicAddrInt,
+ &psPMRPDumpSymbolicAddrOUT->uiNewOffset,
+ &psPMRPDumpSymbolicAddrOUT->uiNextSymName);
+ /* Exit early if bridged call fails */
+ if (unlikely(psPMRPDumpSymbolicAddrOUT->eError != PVRSRV_OK))
+ {
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((puiMemspaceNameInt) &&
+ ((psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psPMRPDumpSymbolicAddrOUT->puiMemspaceName,
+ puiMemspaceNameInt,
+ (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR))) !=
+ PVRSRV_OK))
+ {
+ psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((puiSymbolicAddrInt) &&
+ ((psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr,
+ puiSymbolicAddrInt,
+ (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR))) !=
+ PVRSRV_OK))
+ {
+ psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+ }
+
+PMRPDumpSymbolicAddr_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psPMRPDumpSymbolicAddrOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpPol32(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRPDumpPol32IN_UI8,
+ IMG_UINT8 * psPMRPDumpPol32OUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRPDUMPPOL32 *psPMRPDumpPol32IN =
+ (PVRSRV_BRIDGE_IN_PMRPDUMPPOL32 *) IMG_OFFSET_ADDR(psPMRPDumpPol32IN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32 *psPMRPDumpPol32OUT =
+ (PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32 *) IMG_OFFSET_ADDR(psPMRPDumpPol32OUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psPMRPDumpPol32IN->hPMR;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPMRPDumpPol32OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psPMRPDumpPol32OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRPDumpPol32_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPMRPDumpPol32OUT->eError =
+ PMRPDumpPol32(psPMRInt,
+ psPMRPDumpPol32IN->uiOffset,
+ psPMRPDumpPol32IN->ui32Value,
+ psPMRPDumpPol32IN->ui32Mask,
+ psPMRPDumpPol32IN->eOperator, psPMRPDumpPol32IN->ui32PDumpFlags);
+
+PMRPDumpPol32_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpCheck32(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRPDumpCheck32IN_UI8,
+ IMG_UINT8 * psPMRPDumpCheck32OUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32 *psPMRPDumpCheck32IN =
+ (PVRSRV_BRIDGE_IN_PMRPDUMPCHECK32 *) IMG_OFFSET_ADDR(psPMRPDumpCheck32IN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32 *psPMRPDumpCheck32OUT =
+ (PVRSRV_BRIDGE_OUT_PMRPDUMPCHECK32 *) IMG_OFFSET_ADDR(psPMRPDumpCheck32OUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psPMRPDumpCheck32IN->hPMR;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPMRPDumpCheck32OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psPMRPDumpCheck32OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRPDumpCheck32_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPMRPDumpCheck32OUT->eError =
+ PMRPDumpCheck32(psPMRInt,
+ psPMRPDumpCheck32IN->uiOffset,
+ psPMRPDumpCheck32IN->ui32Value,
+ psPMRPDumpCheck32IN->ui32Mask,
+ psPMRPDumpCheck32IN->eOperator, psPMRPDumpCheck32IN->ui32PDumpFlags);
+
+PMRPDumpCheck32_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePMRPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPMRPDumpCBPIN_UI8,
+ IMG_UINT8 * psPMRPDumpCBPOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PMRPDUMPCBP *psPMRPDumpCBPIN =
+ (PVRSRV_BRIDGE_IN_PMRPDUMPCBP *) IMG_OFFSET_ADDR(psPMRPDumpCBPIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PMRPDUMPCBP *psPMRPDumpCBPOUT =
+ (PVRSRV_BRIDGE_OUT_PMRPDUMPCBP *) IMG_OFFSET_ADDR(psPMRPDumpCBPOUT_UI8, 0);
+
+ IMG_HANDLE hPMR = psPMRPDumpCBPIN->hPMR;
+ PMR *psPMRInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psPMRPDumpCBPOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psPMRPDumpCBPOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto PMRPDumpCBP_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psPMRPDumpCBPOUT->eError =
+ PMRPDumpCBP(psPMRInt,
+ psPMRPDumpCBPIN->uiReadOffset,
+ psPMRPDumpCBPIN->uiWriteOffset,
+ psPMRPDumpCBPIN->uiPacketSize, psPMRPDumpCBPIN->uiBufferSize);
+
+PMRPDumpCBP_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static_assert(PVRSRV_PDUMP_MAX_FILENAME_SIZE <= IMG_UINT32_MAX,
+ "PVRSRV_PDUMP_MAX_FILENAME_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDevmemIntPDumpSaveToFileVirtualIN_UI8,
+ IMG_UINT8 * psDevmemIntPDumpSaveToFileVirtualOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *psDevmemIntPDumpSaveToFileVirtualIN =
+ (PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *)
+ IMG_OFFSET_ADDR(psDevmemIntPDumpSaveToFileVirtualIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *psDevmemIntPDumpSaveToFileVirtualOUT =
+ (PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *)
+ IMG_OFFSET_ADDR(psDevmemIntPDumpSaveToFileVirtualOUT_UI8, 0);
+
+ IMG_HANDLE hDevmemServerContext = psDevmemIntPDumpSaveToFileVirtualIN->hDevmemServerContext;
+ DEVMEMINT_CTX *psDevmemServerContextInt = NULL;
+ IMG_CHAR *uiFileNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR)) +
+ 0;
+
+ if (unlikely
+ (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize > PVRSRV_PDUMP_MAX_FILENAME_SIZE))
+ {
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+ PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto DevmemIntPDumpSaveToFileVirtual_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto DevmemIntPDumpSaveToFileVirtual_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psDevmemIntPDumpSaveToFileVirtualIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer =
+ (IMG_BYTE *) (void *)psDevmemIntPDumpSaveToFileVirtualIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+ PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevmemIntPDumpSaveToFileVirtual_exit;
+ }
+ }
+ }
+
+ if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize != 0)
+ {
+ uiFileNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiFileNameInt,
+ (const void __user *)psDevmemIntPDumpSaveToFileVirtualIN->puiFileName,
+ psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR)) !=
+ PVRSRV_OK)
+ {
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevmemIntPDumpSaveToFileVirtual_exit;
+ }
+ ((IMG_CHAR *)
+ uiFileNameInt)[(psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize *
+ sizeof(IMG_CHAR)) - 1] = '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psDevmemServerContextInt,
+ hDevmemServerContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE);
+ if (unlikely(psDevmemIntPDumpSaveToFileVirtualOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto DevmemIntPDumpSaveToFileVirtual_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+ DevmemIntPDumpSaveToFileVirtual(psConnection, OSGetDevNode(psConnection),
+ psDevmemServerContextInt,
+ psDevmemIntPDumpSaveToFileVirtualIN->sAddress,
+ psDevmemIntPDumpSaveToFileVirtualIN->uiSize,
+ psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize,
+ uiFileNameInt,
+ psDevmemIntPDumpSaveToFileVirtualIN->ui32FileOffset,
+ psDevmemIntPDumpSaveToFileVirtualIN->ui32PDumpFlags);
+
+DevmemIntPDumpSaveToFileVirtual_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psDevmemServerContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemServerContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psDevmemIntPDumpSaveToFileVirtualOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitPDUMPMMBridge(void);
+void DeinitPDUMPMMBridge(void);
+
+/*
+ * Register all PDUMPMM functions with services
+ */
+PVRSRV_ERROR InitPDUMPMMBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM,
+ PVRSRVBridgePMRPDumpLoadMem, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32,
+ PVRSRVBridgePMRPDumpLoadMemValue32, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64,
+ PVRSRVBridgePMRPDumpLoadMemValue64, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE,
+ PVRSRVBridgePMRPDumpSaveToFile, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR,
+ PVRSRVBridgePMRPDumpSymbolicAddr, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32,
+ PVRSRVBridgePMRPDumpPol32, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32,
+ PVRSRVBridgePMRPDumpCheck32, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP,
+ PVRSRVBridgePMRPDumpCBP, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM,
+ PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL,
+ PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdumpmm functions with services
+ */
+void DeinitPDUMPMMBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM,
+ PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM,
+ PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCHECK32);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM,
+ PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Client bridge header for pvrtl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for pvrtl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PVRTL_BRIDGE_H
+#define CLIENT_PVRTL_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pvrtl_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLOpenStream(IMG_HANDLE hBridge,
+ const IMG_CHAR * puiName,
+ IMG_UINT32 ui32Mode,
+ IMG_HANDLE * phSD, IMG_HANDLE * phTLPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLCloseStream(IMG_HANDLE hBridge, IMG_HANDLE hSD);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLAcquireData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 * pui32ReadOffset,
+ IMG_UINT32 * pui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLReleaseData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32ReadOffset, IMG_UINT32 ui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLDiscoverStreams(IMG_HANDLE hBridge,
+ const IMG_CHAR * puiNamePattern,
+ IMG_UINT32 ui32Size,
+ IMG_CHAR * puiStreams,
+ IMG_UINT32 * pui32NumFound);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLReserveStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 * pui32BufferOffset,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32 * pui32Available);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLCommitStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD, IMG_UINT32 ui32ReqSize);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLWriteData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size, IMG_BYTE * pui8Data);
+
+#endif /* CLIENT_PVRTL_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Direct client bridge for pvrtl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the client side of the bridge for pvrtl
+ which is used in calls from Server context.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pvrtl_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+
+#include "tlserver.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLOpenStream(IMG_HANDLE hBridge,
+ const IMG_CHAR * puiName,
+ IMG_UINT32 ui32Mode,
+ IMG_HANDLE * phSD, IMG_HANDLE * phTLPMR)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC *psSDInt = NULL;
+ PMR *psTLPMRInt = NULL;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ eError = TLServerOpenStreamKM(puiName, ui32Mode, &psSDInt, &psTLPMRInt);
+
+ *phSD = psSDInt;
+ *phTLPMR = psTLPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLCloseStream(IMG_HANDLE hBridge, IMG_HANDLE hSD)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC *psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError = TLServerCloseStreamKM(psSDInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLAcquireData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 * pui32ReadOffset,
+ IMG_UINT32 * pui32ReadLen)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC *psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError = TLServerAcquireDataKM(psSDInt, pui32ReadOffset, pui32ReadLen);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLReleaseData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32ReadOffset, IMG_UINT32 ui32ReadLen)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC *psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError = TLServerReleaseDataKM(psSDInt, ui32ReadOffset, ui32ReadLen);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLDiscoverStreams(IMG_HANDLE hBridge,
+ const IMG_CHAR * puiNamePattern,
+ IMG_UINT32 ui32Size,
+ IMG_CHAR * puiStreams, IMG_UINT32 * pui32NumFound)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ eError = TLServerDiscoverStreamsKM(puiNamePattern, ui32Size, puiStreams, pui32NumFound);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLReserveStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 * pui32BufferOffset,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin, IMG_UINT32 * pui32Available)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC *psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerReserveStreamKM(psSDInt,
+ pui32BufferOffset, ui32Size, ui32SizeMin, pui32Available);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLCommitStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD, IMG_UINT32 ui32ReqSize)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC *psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError = TLServerCommitStreamKM(psSDInt, ui32ReqSize);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeTLWriteData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size, IMG_BYTE * pui8Data)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC *psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError = TLServerWriteDataKM(psSDInt, ui32Size, pui8Data);
+
+ return eError;
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for pvrtl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for pvrtl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PVRTL_BRIDGE_H
+#define COMMON_PVRTL_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+
+#define PVRSRV_BRIDGE_PVRTL_CMD_FIRST 0
+#define PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS PVRSRV_BRIDGE_PVRTL_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PVRTL_TLWRITEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7
+#define PVRSRV_BRIDGE_PVRTL_CMD_LAST (PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7)
+
+/*******************************************
+ TLOpenStream
+ *******************************************/
+
+/* Bridge in structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_IN_TLOPENSTREAM_TAG
+{
+ const IMG_CHAR *puiName;
+ IMG_UINT32 ui32Mode;
+} __packed PVRSRV_BRIDGE_IN_TLOPENSTREAM;
+
+/* Bridge out structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLOPENSTREAM_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_HANDLE hTLPMR;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_TLOPENSTREAM;
+
+/*******************************************
+ TLCloseStream
+ *******************************************/
+
+/* Bridge in structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCLOSESTREAM_TAG
+{
+ IMG_HANDLE hSD;
+} __packed PVRSRV_BRIDGE_IN_TLCLOSESTREAM;
+
+/* Bridge out structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCLOSESTREAM_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_TLCLOSESTREAM;
+
+/*******************************************
+ TLAcquireData
+ *******************************************/
+
+/* Bridge in structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_IN_TLACQUIREDATA_TAG
+{
+ IMG_HANDLE hSD;
+} __packed PVRSRV_BRIDGE_IN_TLACQUIREDATA;
+
+/* Bridge out structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_OUT_TLACQUIREDATA_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32ReadLen;
+ IMG_UINT32 ui32ReadOffset;
+} __packed PVRSRV_BRIDGE_OUT_TLACQUIREDATA;
+
+/*******************************************
+ TLReleaseData
+ *******************************************/
+
+/* Bridge in structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_IN_TLRELEASEDATA_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_UINT32 ui32ReadLen;
+ IMG_UINT32 ui32ReadOffset;
+} __packed PVRSRV_BRIDGE_IN_TLRELEASEDATA;
+
+/* Bridge out structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_OUT_TLRELEASEDATA_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_TLRELEASEDATA;
+
+/*******************************************
+ TLDiscoverStreams
+ *******************************************/
+
+/* Bridge in structure for TLDiscoverStreams */
+typedef struct PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS_TAG
+{
+ const IMG_CHAR *puiNamePattern;
+ IMG_CHAR *puiStreams;
+ IMG_UINT32 ui32Size;
+} __packed PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS;
+
+/* Bridge out structure for TLDiscoverStreams */
+typedef struct PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS_TAG
+{
+ IMG_CHAR *puiStreams;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32NumFound;
+} __packed PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS;
+
+/*******************************************
+ TLReserveStream
+ *******************************************/
+
+/* Bridge in structure for TLReserveStream */
+typedef struct PVRSRV_BRIDGE_IN_TLRESERVESTREAM_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_UINT32 ui32Size;
+ IMG_UINT32 ui32SizeMin;
+} __packed PVRSRV_BRIDGE_IN_TLRESERVESTREAM;
+
+/* Bridge out structure for TLReserveStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLRESERVESTREAM_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Available;
+ IMG_UINT32 ui32BufferOffset;
+} __packed PVRSRV_BRIDGE_OUT_TLRESERVESTREAM;
+
+/*******************************************
+ TLCommitStream
+ *******************************************/
+
+/* Bridge in structure for TLCommitStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCOMMITSTREAM_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_UINT32 ui32ReqSize;
+} __packed PVRSRV_BRIDGE_IN_TLCOMMITSTREAM;
+
+/* Bridge out structure for TLCommitStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM;
+
+/*******************************************
+ TLWriteData
+ *******************************************/
+
+/* Bridge in structure for TLWriteData */
+typedef struct PVRSRV_BRIDGE_IN_TLWRITEDATA_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_BYTE *pui8Data;
+ IMG_UINT32 ui32Size;
+} __packed PVRSRV_BRIDGE_IN_TLWRITEDATA;
+
+/* Bridge out structure for TLWriteData */
+typedef struct PVRSRV_BRIDGE_OUT_TLWRITEDATA_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_TLWRITEDATA;
+
+#endif /* COMMON_PVRTL_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for pvrtl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for pvrtl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "tlserver.h"
+
+#include "common_pvrtl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _TLOpenStreampsSDIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = TLServerCloseStreamKM((TL_STREAM_DESC *) pvData);
+ return eError;
+}
+
+static_assert(PRVSRVTL_MAX_STREAM_NAME_SIZE <= IMG_UINT32_MAX,
+ "PRVSRVTL_MAX_STREAM_NAME_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psTLOpenStreamIN_UI8,
+ IMG_UINT8 * psTLOpenStreamOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_TLOPENSTREAM *psTLOpenStreamIN =
+ (PVRSRV_BRIDGE_IN_TLOPENSTREAM *) IMG_OFFSET_ADDR(psTLOpenStreamIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_TLOPENSTREAM *psTLOpenStreamOUT =
+ (PVRSRV_BRIDGE_OUT_TLOPENSTREAM *) IMG_OFFSET_ADDR(psTLOpenStreamOUT_UI8, 0);
+
+ IMG_CHAR *uiNameInt = NULL;
+ TL_STREAM_DESC *psSDInt = NULL;
+ PMR *psTLPMRInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0;
+
+ psTLOpenStreamOUT->hSD = NULL;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psTLOpenStreamOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto TLOpenStream_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLOpenStreamIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psTLOpenStreamOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto TLOpenStream_exit;
+ }
+ }
+ }
+
+ {
+ uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiNameInt, (const void __user *)psTLOpenStreamIN->puiName,
+ PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psTLOpenStreamOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto TLOpenStream_exit;
+ }
+ ((IMG_CHAR *) uiNameInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ psTLOpenStreamOUT->eError =
+ TLServerOpenStreamKM(uiNameInt, psTLOpenStreamIN->ui32Mode, &psSDInt, &psTLPMRInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK))
+ {
+ goto TLOpenStream_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psTLOpenStreamOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psTLOpenStreamOUT->hSD,
+ (void *)psSDInt,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _TLOpenStreampsSDIntRelease);
+ if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto TLOpenStream_exit;
+ }
+
+ psTLOpenStreamOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+ &psTLOpenStreamOUT->hTLPMR,
+ (void *)psTLPMRInt,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ psTLOpenStreamOUT->hSD);
+ if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto TLOpenStream_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+TLOpenStream_exit:
+
+ if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+ {
+ if (psTLOpenStreamOUT->hSD)
+ {
+ PVRSRV_ERROR eError;
+
+ /* Lock over handle creation cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psTLOpenStreamOUT->hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psSDInt = NULL;
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ }
+
+ if (psSDInt)
+ {
+ TLServerCloseStreamKM(psSDInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psTLOpenStreamOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLCloseStream(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psTLCloseStreamIN_UI8,
+ IMG_UINT8 * psTLCloseStreamOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_TLCLOSESTREAM *psTLCloseStreamIN =
+ (PVRSRV_BRIDGE_IN_TLCLOSESTREAM *) IMG_OFFSET_ADDR(psTLCloseStreamIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *psTLCloseStreamOUT =
+ (PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *) IMG_OFFSET_ADDR(psTLCloseStreamOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psTLCloseStreamOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psTLCloseStreamIN->hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ if (unlikely((psTLCloseStreamOUT->eError != PVRSRV_OK) &&
+ (psTLCloseStreamOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psTLCloseStreamOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(psTLCloseStreamOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto TLCloseStream_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+TLCloseStream_exit:
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLAcquireData(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psTLAcquireDataIN_UI8,
+ IMG_UINT8 * psTLAcquireDataOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_TLACQUIREDATA *psTLAcquireDataIN =
+ (PVRSRV_BRIDGE_IN_TLACQUIREDATA *) IMG_OFFSET_ADDR(psTLAcquireDataIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_TLACQUIREDATA *psTLAcquireDataOUT =
+ (PVRSRV_BRIDGE_OUT_TLACQUIREDATA *) IMG_OFFSET_ADDR(psTLAcquireDataOUT_UI8, 0);
+
+ IMG_HANDLE hSD = psTLAcquireDataIN->hSD;
+ TL_STREAM_DESC *psSDInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psTLAcquireDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSDInt,
+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+ if (unlikely(psTLAcquireDataOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto TLAcquireData_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psTLAcquireDataOUT->eError =
+ TLServerAcquireDataKM(psSDInt,
+ &psTLAcquireDataOUT->ui32ReadOffset,
+ &psTLAcquireDataOUT->ui32ReadLen);
+
+TLAcquireData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLReleaseData(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psTLReleaseDataIN_UI8,
+ IMG_UINT8 * psTLReleaseDataOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_TLRELEASEDATA *psTLReleaseDataIN =
+ (PVRSRV_BRIDGE_IN_TLRELEASEDATA *) IMG_OFFSET_ADDR(psTLReleaseDataIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_TLRELEASEDATA *psTLReleaseDataOUT =
+ (PVRSRV_BRIDGE_OUT_TLRELEASEDATA *) IMG_OFFSET_ADDR(psTLReleaseDataOUT_UI8, 0);
+
+ IMG_HANDLE hSD = psTLReleaseDataIN->hSD;
+ TL_STREAM_DESC *psSDInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psTLReleaseDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSDInt,
+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+ if (unlikely(psTLReleaseDataOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto TLReleaseData_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psTLReleaseDataOUT->eError =
+ TLServerReleaseDataKM(psSDInt,
+ psTLReleaseDataIN->ui32ReadOffset,
+ psTLReleaseDataIN->ui32ReadLen);
+
+TLReleaseData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static_assert(PRVSRVTL_MAX_STREAM_NAME_SIZE <= IMG_UINT32_MAX,
+ "PRVSRVTL_MAX_STREAM_NAME_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER <= IMG_UINT32_MAX,
+ "PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psTLDiscoverStreamsIN_UI8,
+ IMG_UINT8 * psTLDiscoverStreamsOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *psTLDiscoverStreamsIN =
+ (PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *) IMG_OFFSET_ADDR(psTLDiscoverStreamsIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *psTLDiscoverStreamsOUT =
+ (PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *) IMG_OFFSET_ADDR(psTLDiscoverStreamsOUT_UI8, 0);
+
+ IMG_CHAR *uiNamePatternInt = NULL;
+ IMG_CHAR *puiStreamsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) +
+ ((IMG_UINT64) psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) + 0;
+
+ if (psTLDiscoverStreamsIN->ui32Size > PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER)
+ {
+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto TLDiscoverStreams_exit;
+ }
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psTLDiscoverStreamsOUT->puiStreams = psTLDiscoverStreamsIN->puiStreams;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto TLDiscoverStreams_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLDiscoverStreamsIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto TLDiscoverStreams_exit;
+ }
+ }
+ }
+
+ {
+ uiNamePatternInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiNamePatternInt,
+ (const void __user *)psTLDiscoverStreamsIN->puiNamePattern,
+ PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto TLDiscoverStreams_exit;
+ }
+ ((IMG_CHAR *) uiNamePatternInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) -
+ 1] = '\0';
+ }
+ if (psTLDiscoverStreamsIN->ui32Size != 0)
+ {
+ puiStreamsInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR);
+ }
+
+ psTLDiscoverStreamsOUT->eError =
+ TLServerDiscoverStreamsKM(uiNamePatternInt,
+ psTLDiscoverStreamsIN->ui32Size,
+ puiStreamsInt, &psTLDiscoverStreamsOUT->ui32NumFound);
+ /* Exit early if bridged call fails */
+ if (unlikely(psTLDiscoverStreamsOUT->eError != PVRSRV_OK))
+ {
+ goto TLDiscoverStreams_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((puiStreamsInt) && ((psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psTLDiscoverStreamsOUT->puiStreams, puiStreamsInt,
+ (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR))) != PVRSRV_OK))
+ {
+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto TLDiscoverStreams_exit;
+ }
+ }
+
+TLDiscoverStreams_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psTLDiscoverStreamsOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLReserveStream(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psTLReserveStreamIN_UI8,
+ IMG_UINT8 * psTLReserveStreamOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_TLRESERVESTREAM *psTLReserveStreamIN =
+ (PVRSRV_BRIDGE_IN_TLRESERVESTREAM *) IMG_OFFSET_ADDR(psTLReserveStreamIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *psTLReserveStreamOUT =
+ (PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *) IMG_OFFSET_ADDR(psTLReserveStreamOUT_UI8, 0);
+
+ IMG_HANDLE hSD = psTLReserveStreamIN->hSD;
+ TL_STREAM_DESC *psSDInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psTLReserveStreamOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSDInt,
+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+ if (unlikely(psTLReserveStreamOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto TLReserveStream_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psTLReserveStreamOUT->eError =
+ TLServerReserveStreamKM(psSDInt,
+ &psTLReserveStreamOUT->ui32BufferOffset,
+ psTLReserveStreamIN->ui32Size,
+ psTLReserveStreamIN->ui32SizeMin,
+ &psTLReserveStreamOUT->ui32Available);
+
+TLReserveStream_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeTLCommitStream(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psTLCommitStreamIN_UI8,
+ IMG_UINT8 * psTLCommitStreamOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *psTLCommitStreamIN =
+ (PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *) IMG_OFFSET_ADDR(psTLCommitStreamIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *psTLCommitStreamOUT =
+ (PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *) IMG_OFFSET_ADDR(psTLCommitStreamOUT_UI8, 0);
+
+ IMG_HANDLE hSD = psTLCommitStreamIN->hSD;
+ TL_STREAM_DESC *psSDInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psTLCommitStreamOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSDInt,
+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+ if (unlikely(psTLCommitStreamOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto TLCommitStream_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psTLCommitStreamOUT->eError =
+ TLServerCommitStreamKM(psSDInt, psTLCommitStreamIN->ui32ReqSize);
+
+TLCommitStream_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static_assert(PVRSRVTL_MAX_PACKET_SIZE <= IMG_UINT32_MAX,
+ "PVRSRVTL_MAX_PACKET_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psTLWriteDataIN_UI8,
+ IMG_UINT8 * psTLWriteDataOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_TLWRITEDATA *psTLWriteDataIN =
+ (PVRSRV_BRIDGE_IN_TLWRITEDATA *) IMG_OFFSET_ADDR(psTLWriteDataIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_TLWRITEDATA *psTLWriteDataOUT =
+ (PVRSRV_BRIDGE_OUT_TLWRITEDATA *) IMG_OFFSET_ADDR(psTLWriteDataOUT_UI8, 0);
+
+ IMG_HANDLE hSD = psTLWriteDataIN->hSD;
+ TL_STREAM_DESC *psSDInt = NULL;
+ IMG_BYTE *ui8DataInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) + 0;
+
+ if (unlikely(psTLWriteDataIN->ui32Size > PVRSRVTL_MAX_PACKET_SIZE))
+ {
+ psTLWriteDataOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto TLWriteData_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psTLWriteDataOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto TLWriteData_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLWriteDataIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psTLWriteDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto TLWriteData_exit;
+ }
+ }
+ }
+
+ if (psTLWriteDataIN->ui32Size != 0)
+ {
+ ui8DataInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8DataInt, (const void __user *)psTLWriteDataIN->pui8Data,
+ psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK)
+ {
+ psTLWriteDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto TLWriteData_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psTLWriteDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSDInt,
+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE);
+ if (unlikely(psTLWriteDataOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto TLWriteData_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psTLWriteDataOUT->eError =
+ TLServerWriteDataKM(psSDInt, psTLWriteDataIN->ui32Size, ui8DataInt);
+
+TLWriteData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psTLWriteDataOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitPVRTLBridge(void);
+void DeinitPVRTLBridge(void);
+
+/*
+ * Register all PVRTL functions with services
+ */
+PVRSRV_ERROR InitPVRTLBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM,
+ PVRSRVBridgeTLOpenStream, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM,
+ PVRSRVBridgeTLCloseStream, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA,
+ PVRSRVBridgeTLAcquireData, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA,
+ PVRSRVBridgeTLReleaseData, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS,
+ PVRSRVBridgeTLDiscoverStreams, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM,
+ PVRSRVBridgeTLReserveStream, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM,
+ PVRSRVBridgeTLCommitStream, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA,
+ PVRSRVBridgeTLWriteData, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pvrtl functions with services
+ */
+void DeinitPVRTLBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxcmp
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxcmp
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXCMP_BRIDGE_H
+#define COMMON_RGXCMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXCMP_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2 PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+8)
+
+/*******************************************
+ RGXCreateComputeContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT_TAG
+{
+ IMG_UINT64 ui64RobustnessAddress;
+ IMG_HANDLE hPrivData;
+ IMG_BYTE *pui8FrameworkCmd;
+ IMG_BYTE *pui8StaticComputeContextState;
+ IMG_INT32 i32Priority;
+ IMG_UINT32 ui32ContextFlags;
+ IMG_UINT32 ui32FrameworkCmdSize;
+ IMG_UINT32 ui32MaxDeadlineMS;
+ IMG_UINT32 ui32PackedCCBSizeU88;
+ IMG_UINT32 ui32StaticComputeContextStateSize;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT;
+
+/* Bridge out structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT_TAG
+{
+ IMG_HANDLE hComputeContext;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT;
+
+/*******************************************
+ RGXDestroyComputeContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+ IMG_HANDLE hComputeContext;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT;
+
+/* Bridge out structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT;
+
+/*******************************************
+ RGXFlushComputeData
+ *******************************************/
+
+/* Bridge in structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA_TAG
+{
+ IMG_HANDLE hComputeContext;
+} __packed PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA;
+
+/* Bridge out structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA;
+
+/*******************************************
+ RGXSetComputeContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hComputeContext;
+ IMG_INT32 i32Priority;
+} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY;
+
+/*******************************************
+ RGXNotifyComputeWriteOffsetUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXNotifyComputeWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG
+{
+ IMG_HANDLE hComputeContext;
+} __packed PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE;
+
+/* Bridge out structure for RGXNotifyComputeWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE;
+
+/*******************************************
+ RGXKickCDM2
+ *******************************************/
+
+/* Bridge in structure for RGXKickCDM2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG
+{
+ IMG_UINT64 ui64DeadlineInus;
+ IMG_HANDLE hComputeContext;
+ IMG_UINT32 *pui32ClientUpdateOffset;
+ IMG_UINT32 *pui32ClientUpdateValue;
+ IMG_UINT32 *pui32SyncPMRFlags;
+ IMG_BYTE *pui8DMCmd;
+ IMG_CHAR *puiUpdateFenceName;
+ IMG_HANDLE *phClientUpdateUFOSyncPrimBlock;
+ IMG_HANDLE *phSyncPMRs;
+ PVRSRV_FENCE hCheckFenceFd;
+ PVRSRV_TIMELINE hUpdateTimeline;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_UINT32 ui32CmdSize;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_UINT32 ui32NumOfWorkgroups;
+ IMG_UINT32 ui32NumOfWorkitems;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32SyncPMRCount;
+} __packed PVRSRV_BRIDGE_IN_RGXKICKCDM2;
+
+/* Bridge out structure for RGXKickCDM2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM2_TAG
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_FENCE hUpdateFence;
+} __packed PVRSRV_BRIDGE_OUT_RGXKICKCDM2;
+
+/*******************************************
+ RGXSetComputeContextProperty
+ *******************************************/
+
+/* Bridge in structure for RGXSetComputeContextProperty */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY_TAG
+{
+ IMG_UINT64 ui64Input;
+ IMG_HANDLE hComputeContext;
+ IMG_UINT32 ui32Property;
+} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY;
+
+/* Bridge out structure for RGXSetComputeContextProperty */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY_TAG
+{
+ IMG_UINT64 ui64Output;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY;
+
+/*******************************************
+ RGXGetLastDeviceError
+ *******************************************/
+
+/* Bridge in structure for RGXGetLastDeviceError */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR;
+
+/* Bridge out structure for RGXGetLastDeviceError */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Error;
+} __packed PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR;
+
+/*******************************************
+ RGXKickTimestampQuery
+ *******************************************/
+
+/* Bridge in structure for RGXKickTimestampQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY_TAG
+{
+ IMG_HANDLE hComputeContext;
+ IMG_BYTE *pui8DMCmd;
+ PVRSRV_FENCE hCheckFenceFd;
+ IMG_UINT32 ui32CmdSize;
+ IMG_UINT32 ui32ExtJobRef;
+} __packed PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY;
+
+/* Bridge out structure for RGXKickTimestampQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY;
+
+#endif /* COMMON_RGXCMP_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for rgxcmp
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxcmp
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxcompute.h"
+
+#include "common_rgxcmp_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "rgx_bvnc_defs_km.h"
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _RGXCreateComputeContextpsComputeContextIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVRGXDestroyComputeContextKM((RGX_SERVER_COMPUTE_CONTEXT *) pvData);
+ return eError;
+}
+
+static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_STATIC_COMPUTECONTEXT_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_STATIC_COMPUTECONTEXT_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXCreateComputeContextIN_UI8,
+ IMG_UINT8 * psRGXCreateComputeContextOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextIN =
+ (PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *)
+ IMG_OFFSET_ADDR(psRGXCreateComputeContextIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextOUT =
+ (PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *)
+ IMG_OFFSET_ADDR(psRGXCreateComputeContextOUT_UI8, 0);
+
+ IMG_BYTE *ui8FrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXCreateComputeContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ IMG_BYTE *ui8StaticComputeContextStateInt = NULL;
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) +
+ ((IMG_UINT64) psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize *
+ sizeof(IMG_BYTE)) + 0;
+
+ if (unlikely(psRGXCreateComputeContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE))
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXCreateComputeContext_exit;
+ }
+
+ if (unlikely
+ (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize >
+ RGXFWIF_STATIC_COMPUTECONTEXT_SIZE))
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXCreateComputeContext_exit;
+ }
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXCreateComputeContext_exit;
+ }
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXCreateComputeContext_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateComputeContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateComputeContext_exit;
+ }
+ }
+ }
+
+ if (psRGXCreateComputeContextIN->ui32FrameworkCmdSize != 0)
+ {
+ ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8FrameworkCmdInt,
+ (const void __user *)psRGXCreateComputeContextIN->pui8FrameworkCmd,
+ psRGXCreateComputeContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) !=
+ PVRSRV_OK)
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateComputeContext_exit;
+ }
+ }
+ if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize != 0)
+ {
+ ui8StaticComputeContextStateInt =
+ (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize *
+ sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8StaticComputeContextStateInt,
+ (const void __user *)psRGXCreateComputeContextIN->
+ pui8StaticComputeContextState,
+ psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize *
+ sizeof(IMG_BYTE)) != PVRSRV_OK)
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateComputeContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXCreateComputeContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&hPrivDataInt,
+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+ if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateComputeContext_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXCreateComputeContextOUT->eError =
+ PVRSRVRGXCreateComputeContextKM(psConnection, OSGetDevNode(psConnection),
+ psRGXCreateComputeContextIN->i32Priority,
+ psRGXCreateComputeContextIN->ui32FrameworkCmdSize,
+ ui8FrameworkCmdInt,
+ hPrivDataInt,
+ psRGXCreateComputeContextIN->
+ ui32StaticComputeContextStateSize,
+ ui8StaticComputeContextStateInt,
+ psRGXCreateComputeContextIN->ui32PackedCCBSizeU88,
+ psRGXCreateComputeContextIN->ui32ContextFlags,
+ psRGXCreateComputeContextIN->ui64RobustnessAddress,
+ psRGXCreateComputeContextIN->ui32MaxDeadlineMS,
+ &psComputeContextInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK))
+ {
+ goto RGXCreateComputeContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXCreateComputeContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRGXCreateComputeContextOUT->
+ hComputeContext,
+ (void *)
+ psComputeContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXCreateComputeContextpsComputeContextIntRelease);
+ if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateComputeContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXCreateComputeContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+ {
+ if (psComputeContextInt)
+ {
+ PVRSRVRGXDestroyComputeContextKM(psComputeContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXCreateComputeContextOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXDestroyComputeContextIN_UI8,
+ IMG_UINT8 * psRGXDestroyComputeContextOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextIN =
+ (PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *)
+ IMG_OFFSET_ADDR(psRGXDestroyComputeContextIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextOUT =
+ (PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *)
+ IMG_OFFSET_ADDR(psRGXDestroyComputeContextOUT_UI8, 0);
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXDestroyComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXDestroyComputeContext_exit;
+ }
+ }
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXDestroyComputeContextOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyComputeContextIN->
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ if (unlikely
+ ((psRGXDestroyComputeContextOUT->eError != PVRSRV_OK)
+ && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psRGXDestroyComputeContextOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXDestroyComputeContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyComputeContext_exit:
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFlushComputeDataIN_UI8,
+ IMG_UINT8 * psRGXFlushComputeDataOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataIN =
+ (PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *) IMG_OFFSET_ADDR(psRGXFlushComputeDataIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *) IMG_OFFSET_ADDR(psRGXFlushComputeDataOUT_UI8,
+ 0);
+
+ IMG_HANDLE hComputeContext = psRGXFlushComputeDataIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXFlushComputeDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXFlushComputeData_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXFlushComputeDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXFlushComputeDataOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXFlushComputeData_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXFlushComputeDataOUT->eError = PVRSRVRGXFlushComputeDataKM(psComputeContextInt);
+
+RGXFlushComputeData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXSetComputeContextPriorityIN_UI8,
+ IMG_UINT8 * psRGXSetComputeContextPriorityOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityIN =
+ (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *)
+ IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityOUT =
+ (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *)
+ IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityOUT_UI8, 0);
+
+ IMG_HANDLE hComputeContext = psRGXSetComputeContextPriorityIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXSetComputeContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXSetComputeContextPriority_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXSetComputeContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXSetComputeContextPriorityOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXSetComputeContextPriority_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXSetComputeContextPriorityOUT->eError =
+ PVRSRVRGXSetComputeContextPriorityKM(psConnection, OSGetDevNode(psConnection),
+ psComputeContextInt,
+ psRGXSetComputeContextPriorityIN->i32Priority);
+
+RGXSetComputeContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXNotifyComputeWriteOffsetUpdateIN_UI8,
+ IMG_UINT8 *
+ psRGXNotifyComputeWriteOffsetUpdateOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateIN =
+ (PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *)
+ IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateOUT
+ =
+ (PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *)
+ IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateOUT_UI8, 0);
+
+ IMG_HANDLE hComputeContext = psRGXNotifyComputeWriteOffsetUpdateIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXNotifyComputeWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXNotifyComputeWriteOffsetUpdate_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXNotifyComputeWriteOffsetUpdateOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXNotifyComputeWriteOffsetUpdate_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+ PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(psComputeContextInt);
+
+RGXNotifyComputeWriteOffsetUpdate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXKickCDM2IN_UI8,
+ IMG_UINT8 * psRGXKickCDM2OUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXKICKCDM2 *psRGXKickCDM2IN =
+ (PVRSRV_BRIDGE_IN_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2IN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *psRGXKickCDM2OUT =
+ (PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2OUT_UI8, 0);
+
+ IMG_HANDLE hComputeContext = psRGXKickCDM2IN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_BYTE *ui8DMCmdInt = NULL;
+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+ PMR **psSyncPMRsInt = NULL;
+ IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+ ((IMG_UINT64) psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) +
+ ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *)) +
+ ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0;
+
+ if (unlikely(psRGXKickCDM2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS))
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickCDM2_exit;
+ }
+
+ if (unlikely(psRGXKickCDM2IN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickCDM2_exit;
+ }
+
+ if (unlikely(psRGXKickCDM2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS))
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickCDM2_exit;
+ }
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXKickCDM2_exit;
+ }
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXKickCDM2_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXKickCDM2IN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickCDM2IN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickCDM2_exit;
+ }
+ }
+ }
+
+ if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0)
+ {
+ psClientUpdateUFOSyncPrimBlockInt =
+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psClientUpdateUFOSyncPrimBlockInt, 0,
+ psRGXKickCDM2IN->ui32ClientUpdateCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *));
+ ui32NextOffset +=
+ psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientUpdateUFOSyncPrimBlockInt2 =
+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hClientUpdateUFOSyncPrimBlockInt2,
+ (const void __user *)psRGXKickCDM2IN->phClientUpdateUFOSyncPrimBlock,
+ psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM2_exit;
+ }
+ }
+ if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateOffsetInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32ClientUpdateOffsetInt,
+ (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateOffset,
+ psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM2_exit;
+ }
+ }
+ if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateValueInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32ClientUpdateValueInt,
+ (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateValue,
+ psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM2_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt =
+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiUpdateFenceNameInt,
+ (const void __user *)psRGXKickCDM2IN->puiUpdateFenceName,
+ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM2_exit;
+ }
+ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) -
+ 1] = '\0';
+ }
+ if (psRGXKickCDM2IN->ui32CmdSize != 0)
+ {
+ ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8DMCmdInt, (const void __user *)psRGXKickCDM2IN->pui8DMCmd,
+ psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK)
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM2_exit;
+ }
+ }
+ if (psRGXKickCDM2IN->ui32SyncPMRCount != 0)
+ {
+ ui32SyncPMRFlagsInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32SyncPMRFlagsInt,
+ (const void __user *)psRGXKickCDM2IN->pui32SyncPMRFlags,
+ psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM2_exit;
+ }
+ }
+ if (psRGXKickCDM2IN->ui32SyncPMRCount != 0)
+ {
+ psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psSyncPMRsInt, 0, psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *));
+ ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *);
+ hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickCDM2IN->phSyncPMRs,
+ psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM2_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXKickCDM2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickCDM2_exit;
+ }
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickCDM2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)
+ &psClientUpdateUFOSyncPrimBlockInt[i],
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickCDM2_exit;
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickCDM2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSyncPMRsInt[i],
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickCDM2_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXKickCDM2OUT->eError =
+ PVRSRVRGXKickCDMKM(psComputeContextInt,
+ psRGXKickCDM2IN->ui32ClientUpdateCount,
+ psClientUpdateUFOSyncPrimBlockInt,
+ ui32ClientUpdateOffsetInt,
+ ui32ClientUpdateValueInt,
+ psRGXKickCDM2IN->hCheckFenceFd,
+ psRGXKickCDM2IN->hUpdateTimeline,
+ &psRGXKickCDM2OUT->hUpdateFence,
+ uiUpdateFenceNameInt,
+ psRGXKickCDM2IN->ui32CmdSize,
+ ui8DMCmdInt,
+ psRGXKickCDM2IN->ui32PDumpFlags,
+ psRGXKickCDM2IN->ui32ExtJobRef,
+ psRGXKickCDM2IN->ui32SyncPMRCount,
+ ui32SyncPMRFlagsInt,
+ psSyncPMRsInt,
+ psRGXKickCDM2IN->ui32NumOfWorkgroups,
+ psRGXKickCDM2IN->ui32NumOfWorkitems,
+ psRGXKickCDM2IN->ui64DeadlineInus);
+
+RGXKickCDM2_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+
+ if (hClientUpdateUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psClientUpdateUFOSyncPrimBlockInt
+ && psClientUpdateUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+ if (hSyncPMRsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psSyncPMRsInt && psSyncPMRsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXKickCDM2OUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXSetComputeContextPropertyIN_UI8,
+ IMG_UINT8 * psRGXSetComputeContextPropertyOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyIN =
+ (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *)
+ IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyOUT =
+ (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *)
+ IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyOUT_UI8, 0);
+
+ IMG_HANDLE hComputeContext = psRGXSetComputeContextPropertyIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXSetComputeContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXSetComputeContextProperty_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXSetComputeContextPropertyOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXSetComputeContextPropertyOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXSetComputeContextProperty_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXSetComputeContextPropertyOUT->eError =
+ PVRSRVRGXSetComputeContextPropertyKM(psComputeContextInt,
+ psRGXSetComputeContextPropertyIN->ui32Property,
+ psRGXSetComputeContextPropertyIN->ui64Input,
+ &psRGXSetComputeContextPropertyOUT->ui64Output);
+
+RGXSetComputeContextProperty_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXGetLastDeviceError(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXGetLastDeviceErrorIN_UI8,
+ IMG_UINT8 * psRGXGetLastDeviceErrorOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorIN =
+ (PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *)
+ IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorOUT =
+ (PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *)
+ IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorOUT_UI8, 0);
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXGetLastDeviceErrorOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXGetLastDeviceError_exit;
+ }
+ }
+
+ PVR_UNREFERENCED_PARAMETER(psRGXGetLastDeviceErrorIN);
+
+ psRGXGetLastDeviceErrorOUT->eError =
+ PVRSRVRGXGetLastDeviceErrorKM(psConnection, OSGetDevNode(psConnection),
+ &psRGXGetLastDeviceErrorOUT->ui32Error);
+
+RGXGetLastDeviceError_exit:
+
+ return 0;
+}
+
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXKickTimestampQuery(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXKickTimestampQueryIN_UI8,
+ IMG_UINT8 * psRGXKickTimestampQueryOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY *psRGXKickTimestampQueryIN =
+ (PVRSRV_BRIDGE_IN_RGXKICKTIMESTAMPQUERY *)
+ IMG_OFFSET_ADDR(psRGXKickTimestampQueryIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY *psRGXKickTimestampQueryOUT =
+ (PVRSRV_BRIDGE_OUT_RGXKICKTIMESTAMPQUERY *)
+ IMG_OFFSET_ADDR(psRGXKickTimestampQueryOUT_UI8, 0);
+
+ IMG_HANDLE hComputeContext = psRGXKickTimestampQueryIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL;
+ IMG_BYTE *ui8DMCmdInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE)) + 0;
+
+ if (unlikely(psRGXKickTimestampQueryIN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+ {
+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickTimestampQuery_exit;
+ }
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXKickTimestampQuery_exit;
+ }
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXKickTimestampQuery_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXKickTimestampQueryIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTimestampQueryIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickTimestampQuery_exit;
+ }
+ }
+ }
+
+ if (psRGXKickTimestampQueryIN->ui32CmdSize != 0)
+ {
+ ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8DMCmdInt, (const void __user *)psRGXKickTimestampQueryIN->pui8DMCmd,
+ psRGXKickTimestampQueryIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK)
+ {
+ psRGXKickTimestampQueryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTimestampQuery_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXKickTimestampQueryOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXKickTimestampQueryOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickTimestampQuery_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXKickTimestampQueryOUT->eError =
+ PVRSRVRGXKickTimestampQueryKM(psComputeContextInt,
+ psRGXKickTimestampQueryIN->hCheckFenceFd,
+ psRGXKickTimestampQueryIN->ui32CmdSize,
+ ui8DMCmdInt, psRGXKickTimestampQueryIN->ui32ExtJobRef);
+
+RGXKickTimestampQuery_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXKickTimestampQueryOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXCMPBridge(void);
+void DeinitRGXCMPBridge(void);
+
+/*
+ * Register all RGXCMP functions with services
+ */
+PVRSRV_ERROR InitRGXCMPBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT,
+ PVRSRVBridgeRGXCreateComputeContext, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT,
+ PVRSRVBridgeRGXDestroyComputeContext, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA,
+ PVRSRVBridgeRGXFlushComputeData, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+ PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY,
+ PVRSRVBridgeRGXSetComputeContextPriority, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+ PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE,
+ PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2,
+ PVRSRVBridgeRGXKickCDM2, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+ PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY,
+ PVRSRVBridgeRGXSetComputeContextProperty, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR,
+ PVRSRVBridgeRGXGetLastDeviceError, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY,
+ PVRSRVBridgeRGXKickTimestampQuery, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxcmp functions with services
+ */
+void DeinitRGXCMPBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+ PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+ PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+ PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP,
+ PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKTIMESTAMPQUERY);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxfwdbg
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxfwdbg
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXFWDBG_BRIDGE_H
+#define COMMON_RGXFWDBG_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "rgx_bridge.h"
+#include "pvrsrv_memallocflags.h"
+
+#define PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+9
+#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+10
+#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+10)
+
+/*******************************************
+ RGXFWDebugSetFWLog
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetFWLog */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG_TAG
+{
+ IMG_UINT32 ui32RGXFWLogType;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG;
+
+/* Bridge out structure for RGXFWDebugSetFWLog */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG;
+
+/*******************************************
+ RGXFWDebugDumpFreelistPageList
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST;
+
+/* Bridge out structure for RGXFWDebugDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST;
+
+/*******************************************
+ RGXFWDebugSetHCSDeadline
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetHCSDeadline */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE_TAG
+{
+ IMG_UINT32 ui32RGXHCSDeadline;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE;
+
+/* Bridge out structure for RGXFWDebugSetHCSDeadline */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE;
+
+/*******************************************
+ RGXFWDebugSetDriverPriority
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetDriverPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY_TAG
+{
+ IMG_UINT32 ui32DriverID;
+ IMG_UINT32 ui32Priority;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY;
+
+/* Bridge out structure for RGXFWDebugSetDriverPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY;
+
+/*******************************************
+ RGXFWDebugSetDriverIsolationGroup
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetDriverIsolationGroup */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP_TAG
+{
+ IMG_UINT32 ui32DriverID;
+ IMG_UINT32 ui32IsolationGroup;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP;
+
+/* Bridge out structure for RGXFWDebugSetDriverIsolationGroup */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP;
+
+/*******************************************
+ RGXFWDebugSetOSNewOnlineState
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugSetOSNewOnlineState */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE_TAG
+{
+ IMG_UINT32 ui32DriverID;
+ IMG_UINT32 ui32OSNewState;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE;
+
+/* Bridge out structure for RGXFWDebugSetOSNewOnlineState */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE;
+
+/*******************************************
+ RGXFWDebugMapGuestHeap
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugMapGuestHeap */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP_TAG
+{
+ IMG_UINT64 ui64ui64GuestHeapBase;
+ IMG_UINT32 ui32DriverID;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP;
+
+/* Bridge out structure for RGXFWDebugMapGuestHeap */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP;
+
+/*******************************************
+ RGXFWDebugPHRConfigure
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugPHRConfigure */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE_TAG
+{
+ IMG_UINT32 ui32ui32PHRMode;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE;
+
+/* Bridge out structure for RGXFWDebugPHRConfigure */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE;
+
+/*******************************************
+ RGXFWDebugWdgConfigure
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugWdgConfigure */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE_TAG
+{
+ IMG_UINT32 ui32ui32WdgPeriodUs;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE;
+
+/* Bridge out structure for RGXFWDebugWdgConfigure */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE;
+
+/*******************************************
+ RGXCurrentTime
+ *******************************************/
+
+/* Bridge in structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXCURRENTTIME;
+
+/* Bridge out structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCURRENTTIME_TAG
+{
+ IMG_UINT64 ui64Time;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCURRENTTIME;
+
+/*******************************************
+ RGXFWDebugInjectFault
+ *******************************************/
+
+/* Bridge in structure for RGXFWDebugInjectFault */
+typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT;
+
+/* Bridge out structure for RGXFWDebugInjectFault */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT;
+
+#endif /* COMMON_RGXFWDBG_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for rgxfwdbg
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxfwdbg
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "rgxfwdbg.h"
+#include "pmr.h"
+#include "rgxtimecorr.h"
+
+#include "common_rgxfwdbg_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetFWLog(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugSetFWLogIN_UI8,
+ IMG_UINT8 * psRGXFWDebugSetFWLogOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *) IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *) IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogOUT_UI8,
+ 0);
+
+ psRGXFWDebugSetFWLogOUT->eError =
+ PVRSRVRGXFWDebugSetFWLogKM(psConnection, OSGetDevNode(psConnection),
+ psRGXFWDebugSetFWLogIN->ui32RGXFWLogType);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugDumpFreelistPageListIN_UI8,
+ IMG_UINT8 * psRGXFWDebugDumpFreelistPageListOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST *psRGXFWDebugDumpFreelistPageListIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST *)
+ IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST *psRGXFWDebugDumpFreelistPageListOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST *)
+ IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psRGXFWDebugDumpFreelistPageListIN);
+
+ psRGXFWDebugDumpFreelistPageListOUT->eError =
+ PVRSRVRGXFWDebugDumpFreelistPageListKM(psConnection, OSGetDevNode(psConnection));
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugSetHCSDeadlineIN_UI8,
+ IMG_UINT8 * psRGXFWDebugSetHCSDeadlineOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *psRGXFWDebugSetHCSDeadlineIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *)
+ IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE *psRGXFWDebugSetHCSDeadlineOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE *)
+ IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineOUT_UI8, 0);
+
+ psRGXFWDebugSetHCSDeadlineOUT->eError =
+ PVRSRVRGXFWDebugSetHCSDeadlineKM(psConnection, OSGetDevNode(psConnection),
+ psRGXFWDebugSetHCSDeadlineIN->ui32RGXHCSDeadline);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetDriverPriority(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugSetDriverPriorityIN_UI8,
+ IMG_UINT8 * psRGXFWDebugSetDriverPriorityOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY *psRGXFWDebugSetDriverPriorityIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERPRIORITY *)
+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverPriorityIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY *psRGXFWDebugSetDriverPriorityOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERPRIORITY *)
+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverPriorityOUT_UI8, 0);
+
+ psRGXFWDebugSetDriverPriorityOUT->eError =
+ PVRSRVRGXFWDebugSetDriverPriorityKM(psConnection, OSGetDevNode(psConnection),
+ psRGXFWDebugSetDriverPriorityIN->ui32DriverID,
+ psRGXFWDebugSetDriverPriorityIN->ui32Priority);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetDriverIsolationGroup(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugSetDriverIsolationGroupIN_UI8,
+ IMG_UINT8 *
+ psRGXFWDebugSetDriverIsolationGroupOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP *psRGXFWDebugSetDriverIsolationGroupIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETDRIVERISOLATIONGROUP *)
+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverIsolationGroupIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP *psRGXFWDebugSetDriverIsolationGroupOUT
+ =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETDRIVERISOLATIONGROUP *)
+ IMG_OFFSET_ADDR(psRGXFWDebugSetDriverIsolationGroupOUT_UI8, 0);
+
+ psRGXFWDebugSetDriverIsolationGroupOUT->eError =
+ PVRSRVRGXFWDebugSetDriverIsolationGroupKM(psConnection, OSGetDevNode(psConnection),
+ psRGXFWDebugSetDriverIsolationGroupIN->
+ ui32DriverID,
+ psRGXFWDebugSetDriverIsolationGroupIN->
+ ui32IsolationGroup);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugSetOSNewOnlineStateIN_UI8,
+ IMG_UINT8 * psRGXFWDebugSetOSNewOnlineStateOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE *psRGXFWDebugSetOSNewOnlineStateIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE *)
+ IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE *psRGXFWDebugSetOSNewOnlineStateOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE *)
+ IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateOUT_UI8, 0);
+
+ psRGXFWDebugSetOSNewOnlineStateOUT->eError =
+ PVRSRVRGXFWDebugSetOSNewOnlineStateKM(psConnection, OSGetDevNode(psConnection),
+ psRGXFWDebugSetOSNewOnlineStateIN->ui32DriverID,
+ psRGXFWDebugSetOSNewOnlineStateIN->
+ ui32OSNewState);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugMapGuestHeap(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugMapGuestHeapIN_UI8,
+ IMG_UINT8 * psRGXFWDebugMapGuestHeapOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP *psRGXFWDebugMapGuestHeapIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGMAPGUESTHEAP *)
+ IMG_OFFSET_ADDR(psRGXFWDebugMapGuestHeapIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP *psRGXFWDebugMapGuestHeapOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGMAPGUESTHEAP *)
+ IMG_OFFSET_ADDR(psRGXFWDebugMapGuestHeapOUT_UI8, 0);
+
+ psRGXFWDebugMapGuestHeapOUT->eError =
+ PVRSRVRGXFWDebugMapGuestHeapKM(psConnection, OSGetDevNode(psConnection),
+ psRGXFWDebugMapGuestHeapIN->ui32DriverID,
+ psRGXFWDebugMapGuestHeapIN->ui64ui64GuestHeapBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugPHRConfigure(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugPHRConfigureIN_UI8,
+ IMG_UINT8 * psRGXFWDebugPHRConfigureOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *)
+ IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *)
+ IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureOUT_UI8, 0);
+
+ psRGXFWDebugPHRConfigureOUT->eError =
+ PVRSRVRGXFWDebugPHRConfigureKM(psConnection, OSGetDevNode(psConnection),
+ psRGXFWDebugPHRConfigureIN->ui32ui32PHRMode);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugWdgConfigure(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugWdgConfigureIN_UI8,
+ IMG_UINT8 * psRGXFWDebugWdgConfigureOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE *psRGXFWDebugWdgConfigureIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE *)
+ IMG_OFFSET_ADDR(psRGXFWDebugWdgConfigureIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE *psRGXFWDebugWdgConfigureOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE *)
+ IMG_OFFSET_ADDR(psRGXFWDebugWdgConfigureOUT_UI8, 0);
+
+ psRGXFWDebugWdgConfigureOUT->eError =
+ PVRSRVRGXFWDebugWdgConfigureKM(psConnection, OSGetDevNode(psConnection),
+ psRGXFWDebugWdgConfigureIN->ui32ui32WdgPeriodUs);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXCurrentTimeIN_UI8,
+ IMG_UINT8 * psRGXCurrentTimeOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXCURRENTTIME *psRGXCurrentTimeIN =
+ (PVRSRV_BRIDGE_IN_RGXCURRENTTIME *) IMG_OFFSET_ADDR(psRGXCurrentTimeIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT =
+ (PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *) IMG_OFFSET_ADDR(psRGXCurrentTimeOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN);
+
+ psRGXCurrentTimeOUT->eError =
+ PVRSRVRGXCurrentTime(psConnection, OSGetDevNode(psConnection),
+ &psRGXCurrentTimeOUT->ui64Time);
+
+ return 0;
+}
+
+#if defined(SUPPORT_VALIDATION)
+
+static IMG_INT
+PVRSRVBridgeRGXFWDebugInjectFault(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXFWDebugInjectFaultIN_UI8,
+ IMG_UINT8 * psRGXFWDebugInjectFaultOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT *psRGXFWDebugInjectFaultIN =
+ (PVRSRV_BRIDGE_IN_RGXFWDEBUGINJECTFAULT *)
+ IMG_OFFSET_ADDR(psRGXFWDebugInjectFaultIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT *psRGXFWDebugInjectFaultOUT =
+ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGINJECTFAULT *)
+ IMG_OFFSET_ADDR(psRGXFWDebugInjectFaultOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psRGXFWDebugInjectFaultIN);
+
+ psRGXFWDebugInjectFaultOUT->eError =
+ PVRSRVRGXFWDebugInjectFaultKM(psConnection, OSGetDevNode(psConnection));
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeRGXFWDebugInjectFault NULL
+#endif
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXFWDBGBridge(void);
+void DeinitRGXFWDBGBridge(void);
+
+/*
+ * Register all RGXFWDBG functions with services
+ */
+PVRSRV_ERROR InitRGXFWDBGBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG,
+ PVRSRVBridgeRGXFWDebugSetFWLog, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST,
+ PVRSRVBridgeRGXFWDebugDumpFreelistPageList, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE,
+ PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY,
+ PVRSRVBridgeRGXFWDebugSetDriverPriority, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP,
+ PVRSRVBridgeRGXFWDebugSetDriverIsolationGroup, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE,
+ PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP,
+ PVRSRVBridgeRGXFWDebugMapGuestHeap, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE,
+ PVRSRVBridgeRGXFWDebugPHRConfigure, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE,
+ PVRSRVBridgeRGXFWDebugWdgConfigure, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME,
+ PVRSRVBridgeRGXCurrentTime, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT,
+ PVRSRVBridgeRGXFWDebugInjectFault, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxfwdbg functions with services
+ */
+void DeinitRGXFWDBGBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERPRIORITY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETDRIVERISOLATIONGROUP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGMAPGUESTHEAP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG,
+ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGINJECTFAULT);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxhwperf
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxhwperf
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXHWPERF_BRIDGE_H
+#define COMMON_RGXHWPERF_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "rgx_hwperf.h"
+
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5)
+
+/*******************************************
+ RGXCtrlHWPerf
+ *******************************************/
+
+/* Bridge in structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG
+{
+ IMG_UINT64 ui64Mask;
+ IMG_UINT32 ui32StreamId;
+ IMG_BOOL bToggle;
+} __packed PVRSRV_BRIDGE_IN_RGXCTRLHWPERF;
+
+/* Bridge out structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF;
+
+/*******************************************
+ RGXConfigureHWPerfBlocks
+ *******************************************/
+
+/* Bridge in structure for RGXConfigureHWPerfBlocks */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS_TAG
+{
+ RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigs;
+ IMG_UINT32 ui32ArrayLen;
+ IMG_UINT32 ui32CtrlWord;
+} __packed PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS;
+
+/* Bridge out structure for RGXConfigureHWPerfBlocks */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS;
+
+/*******************************************
+ RGXGetHWPerfBvncFeatureFlags
+ *******************************************/
+
+/* Bridge in structure for RGXGetHWPerfBvncFeatureFlags */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS;
+
+/* Bridge out structure for RGXGetHWPerfBvncFeatureFlags */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS_TAG
+{
+ RGX_HWPERF_BVNC sBVNC;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS;
+
+/*******************************************
+ RGXControlHWPerfBlocks
+ *******************************************/
+
+/* Bridge in structure for RGXControlHWPerfBlocks */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS_TAG
+{
+ IMG_UINT16 *pui16BlockIDs;
+ IMG_UINT32 ui32ArrayLen;
+ IMG_BOOL bEnable;
+} __packed PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS;
+
+/* Bridge out structure for RGXControlHWPerfBlocks */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS;
+
+/*******************************************
+ RGXGetConfiguredHWPerfCounters
+ *******************************************/
+
+/* Bridge in structure for RGXGetConfiguredHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG
+{
+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters;
+ IMG_UINT32 ui32BlockID;
+} __packed PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS;
+
+/* Bridge out structure for RGXGetConfiguredHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS_TAG
+{
+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS;
+
+/*******************************************
+ RGXGetEnabledHWPerfBlocks
+ *******************************************/
+
+/* Bridge in structure for RGXGetEnabledHWPerfBlocks */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS_TAG
+{
+ IMG_UINT32 *pui32EnabledBlockIDs;
+ IMG_UINT32 ui32ArrayLen;
+} __packed PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS;
+
+/* Bridge out structure for RGXGetEnabledHWPerfBlocks */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS_TAG
+{
+ IMG_UINT32 *pui32EnabledBlockIDs;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32BlockCount;
+} __packed PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS;
+
+#endif /* COMMON_RGXHWPERF_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for rgxhwperf
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxhwperf
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxhwperf.h"
+#include "rgx_fwif_km.h"
+
+#include "common_rgxhwperf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXCtrlHWPerfIN_UI8,
+ IMG_UINT8 * psRGXCtrlHWPerfOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN =
+ (PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT =
+ (PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfOUT_UI8, 0);
+
+ psRGXCtrlHWPerfOUT->eError =
+ PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevNode(psConnection),
+ psRGXCtrlHWPerfIN->ui32StreamId,
+ psRGXCtrlHWPerfIN->bToggle, psRGXCtrlHWPerfIN->ui64Mask);
+
+ return 0;
+}
+
+static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX + 3 <= IMG_UINT32_MAX,
+ "RGXFWIF_HWPERF_CTRL_BLKS_MAX+3 must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXConfigureHWPerfBlocksIN_UI8,
+ IMG_UINT8 * psRGXConfigureHWPerfBlocksOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksIN =
+ (PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *)
+ IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksOUT =
+ (PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *)
+ IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksOUT_UI8, 0);
+
+ RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXConfigureHWPerfBlocksIN->ui32ArrayLen *
+ sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0;
+
+ if (unlikely(psRGXConfigureHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX + 3))
+ {
+ psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXConfigureHWPerfBlocks_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXConfigureHWPerfBlocks_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXConfigureHWPerfBlocksIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXConfigureHWPerfBlocksIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXConfigureHWPerfBlocks_exit;
+ }
+ }
+ }
+
+ if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen != 0)
+ {
+ psBlockConfigsInt =
+ (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK);
+ }
+
+ /* Copy the data over */
+ if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, psBlockConfigsInt,
+ (const void __user *)psRGXConfigureHWPerfBlocksIN->psBlockConfigs,
+ psRGXConfigureHWPerfBlocksIN->ui32ArrayLen *
+ sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK)
+ {
+ psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXConfigureHWPerfBlocks_exit;
+ }
+ }
+
+ psRGXConfigureHWPerfBlocksOUT->eError =
+ PVRSRVRGXConfigureHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection),
+ psRGXConfigureHWPerfBlocksIN->ui32CtrlWord,
+ psRGXConfigureHWPerfBlocksIN->ui32ArrayLen,
+ psBlockConfigsInt);
+
+RGXConfigureHWPerfBlocks_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXConfigureHWPerfBlocksOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXGetHWPerfBvncFeatureFlagsIN_UI8,
+ IMG_UINT8 * psRGXGetHWPerfBvncFeatureFlagsOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *psRGXGetHWPerfBvncFeatureFlagsIN =
+ (PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *)
+ IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *psRGXGetHWPerfBvncFeatureFlagsOUT =
+ (PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *)
+ IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psRGXGetHWPerfBvncFeatureFlagsIN);
+
+ psRGXGetHWPerfBvncFeatureFlagsOUT->eError =
+ PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(psConnection, OSGetDevNode(psConnection),
+ &psRGXGetHWPerfBvncFeatureFlagsOUT->sBVNC);
+
+ return 0;
+}
+
+static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX <= IMG_UINT32_MAX,
+ "RGXFWIF_HWPERF_CTRL_BLKS_MAX must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXControlHWPerfBlocksIN_UI8,
+ IMG_UINT8 * psRGXControlHWPerfBlocksOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksIN =
+ (PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *)
+ IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksOUT =
+ (PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *)
+ IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksOUT_UI8, 0);
+
+ IMG_UINT16 *ui16BlockIDsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) + 0;
+
+ if (unlikely(psRGXControlHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX))
+ {
+ psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXControlHWPerfBlocks_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXControlHWPerfBlocks_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXControlHWPerfBlocksIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXControlHWPerfBlocksIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXControlHWPerfBlocks_exit;
+ }
+ }
+ }
+
+ if (psRGXControlHWPerfBlocksIN->ui32ArrayLen != 0)
+ {
+ ui16BlockIDsInt = (IMG_UINT16 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16);
+ }
+
+ /* Copy the data over */
+ if (psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui16BlockIDsInt,
+ (const void __user *)psRGXControlHWPerfBlocksIN->pui16BlockIDs,
+ psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) != PVRSRV_OK)
+ {
+ psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXControlHWPerfBlocks_exit;
+ }
+ }
+
+ psRGXControlHWPerfBlocksOUT->eError =
+ PVRSRVRGXControlHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection),
+ psRGXControlHWPerfBlocksIN->bEnable,
+ psRGXControlHWPerfBlocksIN->ui32ArrayLen,
+ ui16BlockIDsInt);
+
+RGXControlHWPerfBlocks_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXControlHWPerfBlocksOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static_assert(1 <= IMG_UINT32_MAX, "1 must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXGetConfiguredHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXGetConfiguredHWPerfCountersIN_UI8,
+ IMG_UINT8 * psRGXGetConfiguredHWPerfCountersOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersIN =
+ (PVRSRV_BRIDGE_IN_RGXGETCONFIGUREDHWPERFCOUNTERS *)
+ IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *psRGXGetConfiguredHWPerfCountersOUT =
+ (PVRSRV_BRIDGE_OUT_RGXGETCONFIGUREDHWPERFCOUNTERS *)
+ IMG_OFFSET_ADDR(psRGXGetConfiguredHWPerfCountersOUT_UI8, 0);
+
+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCountersInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0;
+
+ psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters =
+ psRGXGetConfiguredHWPerfCountersIN->psConfiguredCounters;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXGetConfiguredHWPerfCounters_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXGetConfiguredHWPerfCountersIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer =
+ (IMG_BYTE *) (void *)psRGXGetConfiguredHWPerfCountersIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXGetConfiguredHWPerfCountersOUT->eError =
+ PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXGetConfiguredHWPerfCounters_exit;
+ }
+ }
+ }
+
+ if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL)
+ {
+ psConfiguredCountersInt =
+ (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += 1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK);
+ }
+
+ psRGXGetConfiguredHWPerfCountersOUT->eError =
+ PVRSRVRGXGetConfiguredHWPerfCountersKM(psConnection, OSGetDevNode(psConnection),
+ psRGXGetConfiguredHWPerfCountersIN->ui32BlockID,
+ psConfiguredCountersInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXGetConfiguredHWPerfCountersOUT->eError != PVRSRV_OK))
+ {
+ goto RGXGetConfiguredHWPerfCounters_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((psConfiguredCountersInt) && ((1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL,
+ (void __user *)psRGXGetConfiguredHWPerfCountersOUT->psConfiguredCounters,
+ psConfiguredCountersInt,
+ (1 * sizeof(RGX_HWPERF_CONFIG_CNTBLK))) != PVRSRV_OK))
+ {
+ psRGXGetConfiguredHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXGetConfiguredHWPerfCounters_exit;
+ }
+ }
+
+RGXGetConfiguredHWPerfCounters_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXGetConfiguredHWPerfCountersOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXGetEnabledHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXGetEnabledHWPerfBlocksIN_UI8,
+ IMG_UINT8 * psRGXGetEnabledHWPerfBlocksOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksIN =
+ (PVRSRV_BRIDGE_IN_RGXGETENABLEDHWPERFBLOCKS *)
+ IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *psRGXGetEnabledHWPerfBlocksOUT =
+ (PVRSRV_BRIDGE_OUT_RGXGETENABLEDHWPERFBLOCKS *)
+ IMG_OFFSET_ADDR(psRGXGetEnabledHWPerfBlocksOUT_UI8, 0);
+
+ IMG_UINT32 *pui32EnabledBlockIDsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) + 0;
+
+ psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs =
+ psRGXGetEnabledHWPerfBlocksIN->pui32EnabledBlockIDs;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXGetEnabledHWPerfBlocks_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXGetEnabledHWPerfBlocksIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXGetEnabledHWPerfBlocksIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXGetEnabledHWPerfBlocks_exit;
+ }
+ }
+ }
+
+ if (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen != 0)
+ {
+ pui32EnabledBlockIDsInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32);
+ }
+
+ psRGXGetEnabledHWPerfBlocksOUT->eError =
+ PVRSRVRGXGetEnabledHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection),
+ psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen,
+ &psRGXGetEnabledHWPerfBlocksOUT->ui32BlockCount,
+ pui32EnabledBlockIDsInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXGetEnabledHWPerfBlocksOUT->eError != PVRSRV_OK))
+ {
+ goto RGXGetEnabledHWPerfBlocks_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((pui32EnabledBlockIDsInt) &&
+ ((psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psRGXGetEnabledHWPerfBlocksOUT->pui32EnabledBlockIDs,
+ pui32EnabledBlockIDsInt,
+ (psRGXGetEnabledHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT32))) !=
+ PVRSRV_OK))
+ {
+ psRGXGetEnabledHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXGetEnabledHWPerfBlocks_exit;
+ }
+ }
+
+RGXGetEnabledHWPerfBlocks_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXGetEnabledHWPerfBlocksOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXHWPERFBridge(void);
+void DeinitRGXHWPERFBridge(void);
+
+/*
+ * Register all RGXHWPERF functions with services
+ */
+PVRSRV_ERROR InitRGXHWPERFBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF,
+ PVRSRVBridgeRGXCtrlHWPerf, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS,
+ PVRSRVBridgeRGXConfigureHWPerfBlocks, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS,
+ PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS,
+ PVRSRVBridgeRGXControlHWPerfBlocks, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS,
+ PVRSRVBridgeRGXGetConfiguredHWPerfCounters, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS,
+ PVRSRVBridgeRGXGetEnabledHWPerfBlocks, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxhwperf functions with services
+ */
+void DeinitRGXHWPERFBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETCONFIGUREDHWPERFCOUNTERS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF,
+ PVRSRV_BRIDGE_RGXHWPERF_RGXGETENABLEDHWPERFBLOCKS);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxkicksync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxkicksync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXKICKSYNC_BRIDGE_H
+#define COMMON_RGXKICKSYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2 PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3)
+
+/*******************************************
+ RGXCreateKickSyncContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+ IMG_HANDLE hPrivData;
+ IMG_UINT32 ui32ContextFlags;
+ IMG_UINT32 ui32PackedCCBSizeU88;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+ IMG_HANDLE hKickSyncContext;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT;
+
+/*******************************************
+ RGXDestroyKickSyncContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+ IMG_HANDLE hKickSyncContext;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT;
+
+/*******************************************
+ RGXKickSync2
+ *******************************************/
+
+/* Bridge in structure for RGXKickSync2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC2_TAG
+{
+ IMG_HANDLE hKickSyncContext;
+ IMG_UINT32 *pui32UpdateDevVarOffset;
+ IMG_UINT32 *pui32UpdateValue;
+ IMG_CHAR *puiUpdateFenceName;
+ IMG_HANDLE *phUpdateUFODevVarBlock;
+ PVRSRV_FENCE hCheckFenceFD;
+ PVRSRV_TIMELINE hTimelineFenceFD;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_UINT32 ui32ExtJobRef;
+} __packed PVRSRV_BRIDGE_IN_RGXKICKSYNC2;
+
+/* Bridge out structure for RGXKickSync2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC2_TAG
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_FENCE hUpdateFenceFD;
+} __packed PVRSRV_BRIDGE_OUT_RGXKICKSYNC2;
+
+/*******************************************
+ RGXSetKickSyncContextProperty
+ *******************************************/
+
+/* Bridge in structure for RGXSetKickSyncContextProperty */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY_TAG
+{
+ IMG_UINT64 ui64Input;
+ IMG_HANDLE hKickSyncContext;
+ IMG_UINT32 ui32Property;
+} __packed PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY;
+
+/* Bridge out structure for RGXSetKickSyncContextProperty */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY_TAG
+{
+ IMG_UINT64 ui64Output;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY;
+
+#endif /* COMMON_RGXKICKSYNC_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for rgxkicksync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxkicksync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxkicksync.h"
+
+#include "common_rgxkicksync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _RGXCreateKickSyncContextpsKickSyncContextIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVRGXDestroyKickSyncContextKM((RGX_SERVER_KICKSYNC_CONTEXT *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateKickSyncContext(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXCreateKickSyncContextIN_UI8,
+ IMG_UINT8 * psRGXCreateKickSyncContextOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextIN =
+ (PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXCreateKickSyncContextIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextOUT =
+ (PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXCreateKickSyncContextOUT_UI8, 0);
+
+ IMG_HANDLE hPrivData = psRGXCreateKickSyncContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXCreateKickSyncContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&hPrivDataInt,
+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+ if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateKickSyncContext_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXCreateKickSyncContextOUT->eError =
+ PVRSRVRGXCreateKickSyncContextKM(psConnection, OSGetDevNode(psConnection),
+ hPrivDataInt,
+ psRGXCreateKickSyncContextIN->ui32PackedCCBSizeU88,
+ psRGXCreateKickSyncContextIN->ui32ContextFlags,
+ &psKickSyncContextInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK))
+ {
+ goto RGXCreateKickSyncContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXCreateKickSyncContextOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRGXCreateKickSyncContextOUT->hKickSyncContext,
+ (void *)psKickSyncContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXCreateKickSyncContextpsKickSyncContextIntRelease);
+ if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateKickSyncContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXCreateKickSyncContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+ {
+ if (psKickSyncContextInt)
+ {
+ PVRSRVRGXDestroyKickSyncContextKM(psKickSyncContextInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyKickSyncContext(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXDestroyKickSyncContextIN_UI8,
+ IMG_UINT8 * psRGXDestroyKickSyncContextOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextIN =
+ (PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextOUT =
+ (PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXDestroyKickSyncContextOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyKickSyncContextIN->
+ hKickSyncContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+ if (unlikely
+ ((psRGXDestroyKickSyncContextOUT->eError != PVRSRV_OK)
+ && (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psRGXDestroyKickSyncContextOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXDestroyKickSyncContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyKickSyncContext_exit:
+
+ return 0;
+}
+
+static_assert(PVRSRV_MAX_DEV_VARS <= IMG_UINT32_MAX,
+ "PVRSRV_MAX_DEV_VARS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXKickSync2IN_UI8,
+ IMG_UINT8 * psRGXKickSync2OUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *psRGXKickSync2IN =
+ (PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *) IMG_OFFSET_ADDR(psRGXKickSync2IN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *psRGXKickSync2OUT =
+ (PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *) IMG_OFFSET_ADDR(psRGXKickSync2OUT_UI8, 0);
+
+ IMG_HANDLE hKickSyncContext = psRGXKickSync2IN->hKickSyncContext;
+ RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK **psUpdateUFODevVarBlockInt = NULL;
+ IMG_HANDLE *hUpdateUFODevVarBlockInt2 = NULL;
+ IMG_UINT32 *ui32UpdateDevVarOffsetInt = NULL;
+ IMG_UINT32 *ui32UpdateValueInt = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psRGXKickSync2IN->ui32ClientUpdateCount > PVRSRV_MAX_DEV_VARS))
+ {
+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickSync2_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXKickSync2_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXKickSync2IN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickSync2IN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickSync2_exit;
+ }
+ }
+ }
+
+ if (psRGXKickSync2IN->ui32ClientUpdateCount != 0)
+ {
+ psUpdateUFODevVarBlockInt =
+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psUpdateUFODevVarBlockInt, 0,
+ psRGXKickSync2IN->ui32ClientUpdateCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *));
+ ui32NextOffset +=
+ psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hUpdateUFODevVarBlockInt2 =
+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hUpdateUFODevVarBlockInt2,
+ (const void __user *)psRGXKickSync2IN->phUpdateUFODevVarBlock,
+ psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync2_exit;
+ }
+ }
+ if (psRGXKickSync2IN->ui32ClientUpdateCount != 0)
+ {
+ ui32UpdateDevVarOffsetInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32UpdateDevVarOffsetInt,
+ (const void __user *)psRGXKickSync2IN->pui32UpdateDevVarOffset,
+ psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync2_exit;
+ }
+ }
+ if (psRGXKickSync2IN->ui32ClientUpdateCount != 0)
+ {
+ ui32UpdateValueInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32UpdateValueInt,
+ (const void __user *)psRGXKickSync2IN->pui32UpdateValue,
+ psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync2_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt =
+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiUpdateFenceNameInt,
+ (const void __user *)psRGXKickSync2IN->puiUpdateFenceName,
+ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync2_exit;
+ }
+ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) -
+ 1] = '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXKickSync2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psKickSyncContextInt,
+ hKickSyncContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickSync2_exit;
+ }
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickSync2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psUpdateUFODevVarBlockInt[i],
+ hUpdateUFODevVarBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickSync2_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXKickSync2OUT->eError =
+ PVRSRVRGXKickSyncKM(psKickSyncContextInt,
+ psRGXKickSync2IN->ui32ClientUpdateCount,
+ psUpdateUFODevVarBlockInt,
+ ui32UpdateDevVarOffsetInt,
+ ui32UpdateValueInt,
+ psRGXKickSync2IN->hCheckFenceFD,
+ psRGXKickSync2IN->hTimelineFenceFD,
+ &psRGXKickSync2OUT->hUpdateFenceFD,
+ uiUpdateFenceNameInt, psRGXKickSync2IN->ui32ExtJobRef);
+
+RGXKickSync2_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psKickSyncContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hKickSyncContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+ }
+
+ if (hUpdateUFODevVarBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psUpdateUFODevVarBlockInt && psUpdateUFODevVarBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hUpdateUFODevVarBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXKickSync2OUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetKickSyncContextProperty(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXSetKickSyncContextPropertyIN_UI8,
+ IMG_UINT8 * psRGXSetKickSyncContextPropertyOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY *psRGXSetKickSyncContextPropertyIN =
+ (PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY *)
+ IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY *psRGXSetKickSyncContextPropertyOUT =
+ (PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY *)
+ IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyOUT_UI8, 0);
+
+ IMG_HANDLE hKickSyncContext = psRGXSetKickSyncContextPropertyIN->hKickSyncContext;
+ RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXSetKickSyncContextPropertyOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psKickSyncContextInt,
+ hKickSyncContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXSetKickSyncContextPropertyOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXSetKickSyncContextProperty_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXSetKickSyncContextPropertyOUT->eError =
+ PVRSRVRGXSetKickSyncContextPropertyKM(psKickSyncContextInt,
+ psRGXSetKickSyncContextPropertyIN->ui32Property,
+ psRGXSetKickSyncContextPropertyIN->ui64Input,
+ &psRGXSetKickSyncContextPropertyOUT->ui64Output);
+
+RGXSetKickSyncContextProperty_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psKickSyncContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hKickSyncContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+#endif /* SUPPORT_RGXKICKSYNC_BRIDGE */
+
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
+void DeinitRGXKICKSYNCBridge(void);
+
+/*
+ * Register all RGXKICKSYNC functions with services
+ */
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+ PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT,
+ PVRSRVBridgeRGXCreateKickSyncContext, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+ PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT,
+ PVRSRVBridgeRGXDestroyKickSyncContext, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2,
+ PVRSRVBridgeRGXKickSync2, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+ PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY,
+ PVRSRVBridgeRGXSetKickSyncContextProperty, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxkicksync functions with services
+ */
+void DeinitRGXKICKSYNCBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+ PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+ PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC,
+ PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY);
+
+}
+#else /* SUPPORT_RGXKICKSYNC_BRIDGE */
+/* This bridge is conditional on SUPPORT_RGXKICKSYNC_BRIDGE - when not defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitRGXKICKSYNCBridge() \
+ PVRSRV_OK
+
+#define DeinitRGXKICKSYNCBridge()
+
+#endif /* SUPPORT_RGXKICKSYNC_BRIDGE */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Client bridge header for rgxpdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for rgxpdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_RGXPDUMP_BRIDGE_H
+#define CLIENT_RGXPDUMP_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_rgxpdump_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpTraceBuffer(IMG_HANDLE hBridge, IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpSignatureBuffer(IMG_HANDLE hBridge, IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpComputeCRCSignatureCheck(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpCRCSignatureCheck(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpValCheckPreCommand(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpValCheckPostCommand(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags);
+
+#endif /* CLIENT_RGXPDUMP_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Direct client bridge for rgxpdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the client side of the bridge for rgxpdump
+ which is used in calls from Server context.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_rgxpdump_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "rgx_bridge.h"
+
+#include "rgxpdump.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpTraceBuffer(IMG_HANDLE hBridge, IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ PVRSRVPDumpTraceBufferKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpSignatureBuffer(IMG_HANDLE hBridge, IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ PVRSRVPDumpSignatureBufferKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpComputeCRCSignatureCheck(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags)
+{
+#if defined(SUPPORT_VALIDATION)
+ PVRSRV_ERROR eError;
+
+ eError =
+ PVRSRVPDumpComputeCRCSignatureCheckKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32PDumpFlags);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpCRCSignatureCheck(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ PVRSRVPDumpCRCSignatureCheckKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpValCheckPreCommand(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ PVRSRVPDumpValCheckPreCommandKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgePDumpValCheckPostCommand(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ PVRSRVPDumpValCheckPostCommandKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32PDumpFlags);
+
+ return eError;
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxpdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxpdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXPDUMP_BRIDGE_H
+#define COMMON_RGXPDUMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+#define PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPCOMPUTECRCSIGNATURECHECK PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPRECOMMAND PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPOSTCOMMAND PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST (PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+5)
+
+/*******************************************
+ PDumpTraceBuffer
+ *******************************************/
+
+/* Bridge in structure for PDumpTraceBuffer */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER_TAG
+{
+ IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER;
+
+/* Bridge out structure for PDumpTraceBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER;
+
+/*******************************************
+ PDumpSignatureBuffer
+ *******************************************/
+
+/* Bridge in structure for PDumpSignatureBuffer */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER_TAG
+{
+ IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER;
+
+/* Bridge out structure for PDumpSignatureBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER;
+
+/*******************************************
+ PDumpComputeCRCSignatureCheck
+ *******************************************/
+
+/* Bridge in structure for PDumpComputeCRCSignatureCheck */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK_TAG
+{
+ IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK;
+
+/* Bridge out structure for PDumpComputeCRCSignatureCheck */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK;
+
+/*******************************************
+ PDumpCRCSignatureCheck
+ *******************************************/
+
+/* Bridge in structure for PDumpCRCSignatureCheck */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK_TAG
+{
+ IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK;
+
+/* Bridge out structure for PDumpCRCSignatureCheck */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK;
+
+/*******************************************
+ PDumpValCheckPreCommand
+ *******************************************/
+
+/* Bridge in structure for PDumpValCheckPreCommand */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPVALCHECKPRECOMMAND_TAG
+{
+ IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PDUMPVALCHECKPRECOMMAND;
+
+/* Bridge out structure for PDumpValCheckPreCommand */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPRECOMMAND_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPRECOMMAND;
+
+/*******************************************
+ PDumpValCheckPostCommand
+ *******************************************/
+
+/* Bridge in structure for PDumpValCheckPostCommand */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPVALCHECKPOSTCOMMAND_TAG
+{
+ IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_PDUMPVALCHECKPOSTCOMMAND;
+
+/* Bridge out structure for PDumpValCheckPostCommand */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPOSTCOMMAND_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPOSTCOMMAND;
+
+#endif /* COMMON_RGXPDUMP_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for rgxpdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxpdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxpdump.h"
+
+#include "common_rgxpdump_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePDumpTraceBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPDumpTraceBufferIN_UI8,
+ IMG_UINT8 * psPDumpTraceBufferOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER *psPDumpTraceBufferIN =
+ (PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER *) IMG_OFFSET_ADDR(psPDumpTraceBufferIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER *psPDumpTraceBufferOUT =
+ (PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER *) IMG_OFFSET_ADDR(psPDumpTraceBufferOUT_UI8, 0);
+
+ psPDumpTraceBufferOUT->eError =
+ PVRSRVPDumpTraceBufferKM(psConnection, OSGetDevNode(psConnection),
+ psPDumpTraceBufferIN->ui32PDumpFlags);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePDumpSignatureBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPDumpSignatureBufferIN_UI8,
+ IMG_UINT8 * psPDumpSignatureBufferOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferIN =
+ (PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER *) IMG_OFFSET_ADDR(psPDumpSignatureBufferIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferOUT =
+ (PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER *)
+ IMG_OFFSET_ADDR(psPDumpSignatureBufferOUT_UI8, 0);
+
+ psPDumpSignatureBufferOUT->eError =
+ PVRSRVPDumpSignatureBufferKM(psConnection, OSGetDevNode(psConnection),
+ psPDumpSignatureBufferIN->ui32PDumpFlags);
+
+ return 0;
+}
+
+#if defined(SUPPORT_VALIDATION)
+
+static IMG_INT
+PVRSRVBridgePDumpComputeCRCSignatureCheck(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPDumpComputeCRCSignatureCheckIN_UI8,
+ IMG_UINT8 * psPDumpComputeCRCSignatureCheckOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK *psPDumpComputeCRCSignatureCheckIN =
+ (PVRSRV_BRIDGE_IN_PDUMPCOMPUTECRCSIGNATURECHECK *)
+ IMG_OFFSET_ADDR(psPDumpComputeCRCSignatureCheckIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK *psPDumpComputeCRCSignatureCheckOUT =
+ (PVRSRV_BRIDGE_OUT_PDUMPCOMPUTECRCSIGNATURECHECK *)
+ IMG_OFFSET_ADDR(psPDumpComputeCRCSignatureCheckOUT_UI8, 0);
+
+ psPDumpComputeCRCSignatureCheckOUT->eError =
+ PVRSRVPDumpComputeCRCSignatureCheckKM(psConnection, OSGetDevNode(psConnection),
+ psPDumpComputeCRCSignatureCheckIN->
+ ui32PDumpFlags);
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgePDumpComputeCRCSignatureCheck NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgePDumpCRCSignatureCheck(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPDumpCRCSignatureCheckIN_UI8,
+ IMG_UINT8 * psPDumpCRCSignatureCheckOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK *psPDumpCRCSignatureCheckIN =
+ (PVRSRV_BRIDGE_IN_PDUMPCRCSIGNATURECHECK *)
+ IMG_OFFSET_ADDR(psPDumpCRCSignatureCheckIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK *psPDumpCRCSignatureCheckOUT =
+ (PVRSRV_BRIDGE_OUT_PDUMPCRCSIGNATURECHECK *)
+ IMG_OFFSET_ADDR(psPDumpCRCSignatureCheckOUT_UI8, 0);
+
+ psPDumpCRCSignatureCheckOUT->eError =
+ PVRSRVPDumpCRCSignatureCheckKM(psConnection, OSGetDevNode(psConnection),
+ psPDumpCRCSignatureCheckIN->ui32PDumpFlags);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePDumpValCheckPreCommand(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPDumpValCheckPreCommandIN_UI8,
+ IMG_UINT8 * psPDumpValCheckPreCommandOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PDUMPVALCHECKPRECOMMAND *psPDumpValCheckPreCommandIN =
+ (PVRSRV_BRIDGE_IN_PDUMPVALCHECKPRECOMMAND *)
+ IMG_OFFSET_ADDR(psPDumpValCheckPreCommandIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPRECOMMAND *psPDumpValCheckPreCommandOUT =
+ (PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPRECOMMAND *)
+ IMG_OFFSET_ADDR(psPDumpValCheckPreCommandOUT_UI8, 0);
+
+ psPDumpValCheckPreCommandOUT->eError =
+ PVRSRVPDumpValCheckPreCommandKM(psConnection, OSGetDevNode(psConnection),
+ psPDumpValCheckPreCommandIN->ui32PDumpFlags);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgePDumpValCheckPostCommand(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psPDumpValCheckPostCommandIN_UI8,
+ IMG_UINT8 * psPDumpValCheckPostCommandOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_PDUMPVALCHECKPOSTCOMMAND *psPDumpValCheckPostCommandIN =
+ (PVRSRV_BRIDGE_IN_PDUMPVALCHECKPOSTCOMMAND *)
+ IMG_OFFSET_ADDR(psPDumpValCheckPostCommandIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPOSTCOMMAND *psPDumpValCheckPostCommandOUT =
+ (PVRSRV_BRIDGE_OUT_PDUMPVALCHECKPOSTCOMMAND *)
+ IMG_OFFSET_ADDR(psPDumpValCheckPostCommandOUT_UI8, 0);
+
+ psPDumpValCheckPostCommandOUT->eError =
+ PVRSRVPDumpValCheckPostCommandKM(psConnection, OSGetDevNode(psConnection),
+ psPDumpValCheckPostCommandIN->ui32PDumpFlags);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXPDUMPBridge(void);
+void DeinitRGXPDUMPBridge(void);
+
+/*
+ * Register all RGXPDUMP functions with services
+ */
+PVRSRV_ERROR InitRGXPDUMPBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER,
+ PVRSRVBridgePDumpTraceBuffer, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER,
+ PVRSRVBridgePDumpSignatureBuffer, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+ PVRSRV_BRIDGE_RGXPDUMP_PDUMPCOMPUTECRCSIGNATURECHECK,
+ PVRSRVBridgePDumpComputeCRCSignatureCheck, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK,
+ PVRSRVBridgePDumpCRCSignatureCheck, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+ PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPRECOMMAND,
+ PVRSRVBridgePDumpValCheckPreCommand, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+ PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPOSTCOMMAND,
+ PVRSRVBridgePDumpValCheckPostCommand, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxpdump functions with services
+ */
+void DeinitRGXPDUMPBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+ PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+ PVRSRV_BRIDGE_RGXPDUMP_PDUMPCOMPUTECRCSIGNATURECHECK);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+ PVRSRV_BRIDGE_RGXPDUMP_PDUMPCRCSIGNATURECHECK);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+ PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPRECOMMAND);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP,
+ PVRSRV_BRIDGE_RGXPDUMP_PDUMPVALCHECKPOSTCOMMAND);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxray
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxray
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXRAY_BRIDGE_H
+#define COMMON_RGXRAY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXRAY_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXRAY_RGXKICKRDM PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXRAY_CMD_LAST (PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+2)
+
+/*******************************************
+ RGXCreateRayContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRayContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT_TAG
+{
+ IMG_UINT64 ui64RobustnessAddress;
+ IMG_HANDLE hPrivData;
+ IMG_BYTE *pui8sStaticRayContextState;
+ IMG_INT32 i32Priority;
+ IMG_UINT32 ui32ContextFlags;
+ IMG_UINT32 ui32MaxDeadlineMS;
+ IMG_UINT32 ui32StaticRayContextStateSize;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT;
+
+/* Bridge out structure for RGXCreateRayContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT_TAG
+{
+ IMG_HANDLE hRayContext;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT;
+
+/*******************************************
+ RGXDestroyRayContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRayContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT_TAG
+{
+ IMG_HANDLE hRayContext;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT;
+
+/* Bridge out structure for RGXDestroyRayContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT;
+
+/*******************************************
+ RGXKickRDM
+ *******************************************/
+
+/* Bridge in structure for RGXKickRDM */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKRDM_TAG
+{
+ IMG_UINT64 ui64ui64DeadlineInus;
+ IMG_HANDLE hRayContext;
+ IMG_UINT32 *pui32ClientUpdateOffset;
+ IMG_UINT32 *pui32ClientUpdateValue;
+ IMG_BYTE *pui8DMCmd;
+ IMG_CHAR *puiUpdateFenceName;
+ IMG_HANDLE *phClientUpdateUFOSyncPrimBlock;
+ PVRSRV_FENCE hCheckFenceFd;
+ PVRSRV_TIMELINE hUpdateTimeline;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_UINT32 ui32CmdSize;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32ui32AccStructSizeInBytes;
+ IMG_UINT32 ui32ui32DispatchSize;
+} __packed PVRSRV_BRIDGE_IN_RGXKICKRDM;
+
+/* Bridge out structure for RGXKickRDM */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKRDM_TAG
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_FENCE hUpdateFence;
+} __packed PVRSRV_BRIDGE_OUT_RGXKICKRDM;
+
+#endif /* COMMON_RGXRAY_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for rgxray
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxray
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxray.h"
+
+#include "common_rgxray_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _RGXCreateRayContextpsRayContextIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVRGXDestroyRayContextKM((RGX_SERVER_RAY_CONTEXT *) pvData);
+ return eError;
+}
+
+static_assert(RGXFWIF_STATIC_RAYCONTEXT_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_STATIC_RAYCONTEXT_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRayContext(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXCreateRayContextIN_UI8,
+ IMG_UINT8 * psRGXCreateRayContextOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT *psRGXCreateRayContextIN =
+ (PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT *) IMG_OFFSET_ADDR(psRGXCreateRayContextIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT *psRGXCreateRayContextOUT =
+ (PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT *) IMG_OFFSET_ADDR(psRGXCreateRayContextOUT_UI8,
+ 0);
+
+ IMG_HANDLE hPrivData = psRGXCreateRayContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ IMG_BYTE *ui8sStaticRayContextStateInt = NULL;
+ RGX_SERVER_RAY_CONTEXT *psRayContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXCreateRayContextIN->ui32StaticRayContextStateSize *
+ sizeof(IMG_BYTE)) + 0;
+
+ if (unlikely
+ (psRGXCreateRayContextIN->ui32StaticRayContextStateSize >
+ RGXFWIF_STATIC_RAYCONTEXT_SIZE))
+ {
+ psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXCreateRayContext_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXCreateRayContext_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXCreateRayContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateRayContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateRayContext_exit;
+ }
+ }
+ }
+
+ if (psRGXCreateRayContextIN->ui32StaticRayContextStateSize != 0)
+ {
+ ui8sStaticRayContextStateInt =
+ (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psRGXCreateRayContextIN->ui32StaticRayContextStateSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateRayContextIN->ui32StaticRayContextStateSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8sStaticRayContextStateInt,
+ (const void __user *)psRGXCreateRayContextIN->pui8sStaticRayContextState,
+ psRGXCreateRayContextIN->ui32StaticRayContextStateSize * sizeof(IMG_BYTE)) !=
+ PVRSRV_OK)
+ {
+ psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateRayContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXCreateRayContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&hPrivDataInt,
+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+ if (unlikely(psRGXCreateRayContextOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateRayContext_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXCreateRayContextOUT->eError =
+ PVRSRVRGXCreateRayContextKM(psConnection, OSGetDevNode(psConnection),
+ psRGXCreateRayContextIN->i32Priority,
+ hPrivDataInt,
+ psRGXCreateRayContextIN->ui32ContextFlags,
+ psRGXCreateRayContextIN->ui32StaticRayContextStateSize,
+ ui8sStaticRayContextStateInt,
+ psRGXCreateRayContextIN->ui64RobustnessAddress,
+ psRGXCreateRayContextIN->ui32MaxDeadlineMS,
+ &psRayContextInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXCreateRayContextOUT->eError != PVRSRV_OK))
+ {
+ goto RGXCreateRayContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXCreateRayContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRGXCreateRayContextOUT->
+ hRayContext,
+ (void *)psRayContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXCreateRayContextpsRayContextIntRelease);
+ if (unlikely(psRGXCreateRayContextOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateRayContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXCreateRayContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+ {
+ if (psRayContextInt)
+ {
+ PVRSRVRGXDestroyRayContextKM(psRayContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXCreateRayContextOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRayContext(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXDestroyRayContextIN_UI8,
+ IMG_UINT8 * psRGXDestroyRayContextOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT *psRGXDestroyRayContextIN =
+ (PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT *) IMG_OFFSET_ADDR(psRGXDestroyRayContextIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT *psRGXDestroyRayContextOUT =
+ (PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXDestroyRayContextOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXDestroyRayContextOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyRayContextIN->hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+ if (unlikely((psRGXDestroyRayContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyRayContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psRGXDestroyRayContextOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psRGXDestroyRayContextOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXDestroyRayContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyRayContext_exit:
+
+ return 0;
+}
+
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXKickRDM(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXKickRDMIN_UI8,
+ IMG_UINT8 * psRGXKickRDMOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXKICKRDM *psRGXKickRDMIN =
+ (PVRSRV_BRIDGE_IN_RGXKICKRDM *) IMG_OFFSET_ADDR(psRGXKickRDMIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXKICKRDM *psRGXKickRDMOUT =
+ (PVRSRV_BRIDGE_OUT_RGXKICKRDM *) IMG_OFFSET_ADDR(psRGXKickRDMOUT_UI8, 0);
+
+ IMG_HANDLE hRayContext = psRGXKickRDMIN->hRayContext;
+ RGX_SERVER_RAY_CONTEXT *psRayContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_BYTE *ui8DMCmdInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ ((IMG_UINT64) psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ ((IMG_UINT64) psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+ ((IMG_UINT64) psRGXKickRDMIN->ui32CmdSize * sizeof(IMG_BYTE)) + 0;
+
+ if (unlikely(psRGXKickRDMIN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS))
+ {
+ psRGXKickRDMOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickRDM_exit;
+ }
+
+ if (unlikely(psRGXKickRDMIN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+ {
+ psRGXKickRDMOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickRDM_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXKickRDMOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXKickRDM_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXKickRDMIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickRDMIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXKickRDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickRDM_exit;
+ }
+ }
+ }
+
+ if (psRGXKickRDMIN->ui32ClientUpdateCount != 0)
+ {
+ psClientUpdateUFOSyncPrimBlockInt =
+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psClientUpdateUFOSyncPrimBlockInt, 0,
+ psRGXKickRDMIN->ui32ClientUpdateCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *));
+ ui32NextOffset +=
+ psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientUpdateUFOSyncPrimBlockInt2 =
+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hClientUpdateUFOSyncPrimBlockInt2,
+ (const void __user *)psRGXKickRDMIN->phClientUpdateUFOSyncPrimBlock,
+ psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ psRGXKickRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRDM_exit;
+ }
+ }
+ if (psRGXKickRDMIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateOffsetInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32ClientUpdateOffsetInt,
+ (const void __user *)psRGXKickRDMIN->pui32ClientUpdateOffset,
+ psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRDM_exit;
+ }
+ }
+ if (psRGXKickRDMIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateValueInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32ClientUpdateValueInt,
+ (const void __user *)psRGXKickRDMIN->pui32ClientUpdateValue,
+ psRGXKickRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRDM_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt =
+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiUpdateFenceNameInt,
+ (const void __user *)psRGXKickRDMIN->puiUpdateFenceName,
+ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psRGXKickRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRDM_exit;
+ }
+ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) -
+ 1] = '\0';
+ }
+ if (psRGXKickRDMIN->ui32CmdSize != 0)
+ {
+ ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickRDMIN->ui32CmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRDMIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8DMCmdInt, (const void __user *)psRGXKickRDMIN->pui8DMCmd,
+ psRGXKickRDMIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK)
+ {
+ psRGXKickRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRDM_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXKickRDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psRayContextInt,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXKickRDMOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickRDM_exit;
+ }
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickRDMIN->ui32ClientUpdateCount; i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickRDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)
+ &psClientUpdateUFOSyncPrimBlockInt[i],
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if (unlikely(psRGXKickRDMOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickRDM_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXKickRDMOUT->eError =
+ PVRSRVRGXKickRDMKM(psRayContextInt,
+ psRGXKickRDMIN->ui32ClientUpdateCount,
+ psClientUpdateUFOSyncPrimBlockInt,
+ ui32ClientUpdateOffsetInt,
+ ui32ClientUpdateValueInt,
+ psRGXKickRDMIN->hCheckFenceFd,
+ psRGXKickRDMIN->hUpdateTimeline,
+ &psRGXKickRDMOUT->hUpdateFence,
+ uiUpdateFenceNameInt,
+ psRGXKickRDMIN->ui32CmdSize,
+ ui8DMCmdInt,
+ psRGXKickRDMIN->ui32PDumpFlags,
+ psRGXKickRDMIN->ui32ExtJobRef,
+ psRGXKickRDMIN->ui32ui32AccStructSizeInBytes,
+ psRGXKickRDMIN->ui32ui32DispatchSize,
+ psRGXKickRDMIN->ui64ui64DeadlineInus);
+
+RGXKickRDM_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psRayContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRayContext, PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+ }
+
+ if (hClientUpdateUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickRDMIN->ui32ClientUpdateCount; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psClientUpdateUFOSyncPrimBlockInt
+ && psClientUpdateUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXKickRDMOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXRAYBridge(void);
+void DeinitRGXRAYBridge(void);
+
+/*
+ * Register all RGXRAY functions with services
+ */
+PVRSRV_ERROR InitRGXRAYBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT,
+ PVRSRVBridgeRGXCreateRayContext, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT,
+ PVRSRVBridgeRGXDestroyRayContext, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKRDM,
+ PVRSRVBridgeRGXKickRDM, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxray functions with services
+ */
+void DeinitRGXRAYBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKRDM);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxregconfig
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxregconfig
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXREGCONFIG_BRIDGE_H
+#define COMMON_RGXREGCONFIG_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST (PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4)
+
+/*******************************************
+ RGXSetRegConfigType
+ *******************************************/
+
+/* Bridge in structure for RGXSetRegConfigType */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE_TAG
+{
+ IMG_UINT8 ui8RegPowerIsland;
+} __packed PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE;
+
+/* Bridge out structure for RGXSetRegConfigType */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE;
+
+/*******************************************
+ RGXAddRegconfig
+ *******************************************/
+
+/* Bridge in structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXADDREGCONFIG_TAG
+{
+ IMG_UINT64 ui64RegMask;
+ IMG_UINT64 ui64RegValue;
+ IMG_UINT32 ui32RegAddr;
+} __packed PVRSRV_BRIDGE_IN_RGXADDREGCONFIG;
+
+/* Bridge out structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG;
+
+/*******************************************
+ RGXClearRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG;
+
+/* Bridge out structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG;
+
+/*******************************************
+ RGXEnableRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG;
+
+/* Bridge out structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG;
+
+/*******************************************
+ RGXDisableRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG;
+
+/* Bridge out structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG;
+
+#endif /* COMMON_RGXREGCONFIG_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for rgxregconfig
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxregconfig
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxregconfig.h"
+
+#include "common_rgxregconfig_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXSetRegConfigType(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXSetRegConfigTypeIN_UI8,
+ IMG_UINT8 * psRGXSetRegConfigTypeOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeIN =
+ (PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *) IMG_OFFSET_ADDR(psRGXSetRegConfigTypeIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeOUT =
+ (PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *) IMG_OFFSET_ADDR(psRGXSetRegConfigTypeOUT_UI8,
+ 0);
+
+ psRGXSetRegConfigTypeOUT->eError =
+ PVRSRVRGXSetRegConfigTypeKM(psConnection, OSGetDevNode(psConnection),
+ psRGXSetRegConfigTypeIN->ui8RegPowerIsland);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXAddRegconfig(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXAddRegconfigIN_UI8,
+ IMG_UINT8 * psRGXAddRegconfigOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *psRGXAddRegconfigIN =
+ (PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *) IMG_OFFSET_ADDR(psRGXAddRegconfigIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *psRGXAddRegconfigOUT =
+ (PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *) IMG_OFFSET_ADDR(psRGXAddRegconfigOUT_UI8, 0);
+
+ psRGXAddRegconfigOUT->eError =
+ PVRSRVRGXAddRegConfigKM(psConnection, OSGetDevNode(psConnection),
+ psRGXAddRegconfigIN->ui32RegAddr,
+ psRGXAddRegconfigIN->ui64RegValue,
+ psRGXAddRegconfigIN->ui64RegMask);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXClearRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXClearRegConfigIN_UI8,
+ IMG_UINT8 * psRGXClearRegConfigOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *psRGXClearRegConfigIN =
+ (PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *) IMG_OFFSET_ADDR(psRGXClearRegConfigIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *psRGXClearRegConfigOUT =
+ (PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *) IMG_OFFSET_ADDR(psRGXClearRegConfigOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psRGXClearRegConfigIN);
+
+ psRGXClearRegConfigOUT->eError =
+ PVRSRVRGXClearRegConfigKM(psConnection, OSGetDevNode(psConnection));
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXEnableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXEnableRegConfigIN_UI8,
+ IMG_UINT8 * psRGXEnableRegConfigOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *psRGXEnableRegConfigIN =
+ (PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXEnableRegConfigIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *psRGXEnableRegConfigOUT =
+ (PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXEnableRegConfigOUT_UI8,
+ 0);
+
+ PVR_UNREFERENCED_PARAMETER(psRGXEnableRegConfigIN);
+
+ psRGXEnableRegConfigOUT->eError =
+ PVRSRVRGXEnableRegConfigKM(psConnection, OSGetDevNode(psConnection));
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDisableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXDisableRegConfigIN_UI8,
+ IMG_UINT8 * psRGXDisableRegConfigOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *psRGXDisableRegConfigIN =
+ (PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXDisableRegConfigIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *psRGXDisableRegConfigOUT =
+ (PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXDisableRegConfigOUT_UI8,
+ 0);
+
+ PVR_UNREFERENCED_PARAMETER(psRGXDisableRegConfigIN);
+
+ psRGXDisableRegConfigOUT->eError =
+ PVRSRVRGXDisableRegConfigKM(psConnection, OSGetDevNode(psConnection));
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */
+
+#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
+PVRSRV_ERROR InitRGXREGCONFIGBridge(void);
+void DeinitRGXREGCONFIGBridge(void);
+
+/*
+ * Register all RGXREGCONFIG functions with services
+ */
+PVRSRV_ERROR InitRGXREGCONFIGBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE,
+ PVRSRVBridgeRGXSetRegConfigType, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG,
+ PVRSRVBridgeRGXAddRegconfig, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG,
+ PVRSRVBridgeRGXClearRegConfig, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG,
+ PVRSRVBridgeRGXEnableRegConfig, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG,
+ PVRSRVBridgeRGXDisableRegConfig, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxregconfig functions with services
+ */
+void DeinitRGXREGCONFIGBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG,
+ PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG);
+
+}
+#else /* EXCLUDE_RGXREGCONFIG_BRIDGE */
+/* This bridge is conditional on EXCLUDE_RGXREGCONFIG_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitRGXREGCONFIGBridge() \
+ PVRSRV_OK
+
+#define DeinitRGXREGCONFIGBridge()
+
+#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxta3d
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxta3d
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTA3D_BRIDGE_H
+#define COMMON_RGXTA3D_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "rgx_fwif_shared.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9
+#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10
+#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11
+#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2 PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12
+#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13)
+
+/*******************************************
+ RGXCreateHWRTDataSet
+ *******************************************/
+
+/* Bridge in structure for RGXCreateHWRTDataSet */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET_TAG
+{
+ IMG_DEV_VIRTADDR sVHeapTableDevVAddr;
+ IMG_UINT64 ui64PPPMultiSampleCtl;
+ IMG_DEV_VIRTADDR *psPMDataAddr;
+ IMG_DEV_VIRTADDR *psPMSecureDataAddr;
+ IMG_DEV_VIRTADDR *psTailPtrsDevVAddr;
+ IMG_HANDLE *phKmHwRTDataSet;
+ IMG_HANDLE *phapsFreeLists;
+ IMG_UINT32 ui32ISPMergeLowerX;
+ IMG_UINT32 ui32ISPMergeLowerY;
+ IMG_UINT32 ui32ISPMergeScaleX;
+ IMG_UINT32 ui32ISPMergeScaleY;
+ IMG_UINT32 ui32ISPMergeUpperX;
+ IMG_UINT32 ui32ISPMergeUpperY;
+ IMG_UINT32 ui32PPPScreen;
+ IMG_UINT32 ui32RgnStride;
+ IMG_UINT32 ui32TEAA;
+ IMG_UINT32 ui32TEMTILE1;
+ IMG_UINT32 ui32TEMTILE2;
+ IMG_UINT32 ui32TEScreen;
+ IMG_UINT32 ui32TPCSize;
+ IMG_UINT32 ui32TPCStride;
+ IMG_UINT16 ui16MaxRTs;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET;
+
+/* Bridge out structure for RGXCreateHWRTDataSet */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET_TAG
+{
+ IMG_HANDLE *phKmHwRTDataSet;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET;
+
+/*******************************************
+ RGXDestroyHWRTDataSet
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyHWRTDataSet */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET_TAG
+{
+ IMG_HANDLE hKmHwRTDataSet;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET;
+
+/* Bridge out structure for RGXDestroyHWRTDataSet */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET;
+
+/*******************************************
+ RGXCreateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_HANDLE hReservation;
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER;
+
+/* Bridge out structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER_TAG
+{
+ IMG_HANDLE hsZSBufferKM;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER;
+
+/*******************************************
+ RGXDestroyZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER_TAG
+{
+ IMG_HANDLE hsZSBufferMemDesc;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER;
+
+/* Bridge out structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER;
+
+/*******************************************
+ RGXPopulateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER_TAG
+{
+ IMG_HANDLE hsZSBufferKM;
+} __packed PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER;
+
+/* Bridge out structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER_TAG
+{
+ IMG_HANDLE hsPopulation;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER;
+
+/*******************************************
+ RGXUnpopulateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER_TAG
+{
+ IMG_HANDLE hsPopulation;
+} __packed PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER;
+
+/* Bridge out structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER;
+
+/*******************************************
+ RGXCreateFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG
+{
+ IMG_DEV_VIRTADDR spsFreeListBaseDevVAddr;
+ IMG_DEV_VIRTADDR spsFreeListStateDevVAddr;
+ IMG_DEVMEM_OFFSET_T uiPMROffset;
+ IMG_DEVMEM_OFFSET_T uiPMRStateOffset;
+ IMG_HANDLE hMemCtxPrivData;
+ IMG_HANDLE hsFreeListPMR;
+ IMG_HANDLE hsFreeListStatePMR;
+ IMG_HANDLE hsGlobalFreeList;
+ IMG_UINT32 ui32GrowFLPages;
+ IMG_UINT32 ui32GrowParamThreshold;
+ IMG_UINT32 ui32InitFLPages;
+ IMG_UINT32 ui32MaxFLPages;
+ IMG_BOOL bbFreeListCheck;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATEFREELIST;
+
+/* Bridge out structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST;
+
+/*******************************************
+ RGXDestroyFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST;
+
+/* Bridge out structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST;
+
+/*******************************************
+ RGXCreateRenderContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG
+{
+ IMG_UINT64 ui64RobustnessAddress;
+ IMG_HANDLE hPrivData;
+ IMG_BYTE *pui8FrameworkCmd;
+ IMG_BYTE *pui8StaticRenderContextState;
+ IMG_INT32 i32Priority;
+ IMG_UINT32 ui32ContextFlags;
+ IMG_UINT32 ui32FrameworkCmdSize;
+ IMG_UINT32 ui32Max3DDeadlineMS;
+ IMG_UINT32 ui32MaxTADeadlineMS;
+ IMG_UINT32 ui32PackedCCBSizeU8888;
+ IMG_UINT32 ui32StaticRenderContextStateSize;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT;
+
+/* Bridge out structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG
+{
+ IMG_HANDLE hRenderContext;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT;
+
+/*******************************************
+ RGXDestroyRenderContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT;
+
+/* Bridge out structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT;
+
+/*******************************************
+ RGXSetRenderContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hRenderContext;
+ IMG_INT32 i32Priority;
+} __packed PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY;
+
+/*******************************************
+ RGXRenderContextStalled
+ *******************************************/
+
+/* Bridge in structure for RGXRenderContextStalled */
+typedef struct PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED_TAG
+{
+ IMG_HANDLE hRenderContext;
+} __packed PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED;
+
+/* Bridge out structure for RGXRenderContextStalled */
+typedef struct PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED;
+
+/*******************************************
+ RGXKickTA3D2
+ *******************************************/
+
+/* Bridge in structure for RGXKickTA3D2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D2_TAG
+{
+ IMG_UINT64 ui64Deadline;
+ IMG_HANDLE hKMHWRTDataSet;
+ IMG_HANDLE hMSAAScratchBuffer;
+ IMG_HANDLE hPRFenceUFOSyncPrimBlock;
+ IMG_HANDLE hRenderContext;
+ IMG_HANDLE hZSBuffer;
+ IMG_UINT32 *pui32Client3DUpdateSyncOffset;
+ IMG_UINT32 *pui32Client3DUpdateValue;
+ IMG_UINT32 *pui32ClientTAFenceSyncOffset;
+ IMG_UINT32 *pui32ClientTAFenceValue;
+ IMG_UINT32 *pui32ClientTAUpdateSyncOffset;
+ IMG_UINT32 *pui32ClientTAUpdateValue;
+ IMG_UINT32 *pui32SyncPMRFlags;
+ IMG_BYTE *pui83DCmd;
+ IMG_BYTE *pui83DPRCmd;
+ IMG_BYTE *pui8TACmd;
+ IMG_CHAR *puiUpdateFenceName;
+ IMG_CHAR *puiUpdateFenceName3D;
+ IMG_HANDLE *phClient3DUpdateSyncPrimBlock;
+ IMG_HANDLE *phClientTAFenceSyncPrimBlock;
+ IMG_HANDLE *phClientTAUpdateSyncPrimBlock;
+ IMG_HANDLE *phSyncPMRs;
+ PVRSRV_FENCE hCheckFence;
+ PVRSRV_FENCE hCheckFence3D;
+ PVRSRV_TIMELINE hUpdateTimeline;
+ PVRSRV_TIMELINE hUpdateTimeline3D;
+ IMG_UINT32 ui323DCmdSize;
+ IMG_UINT32 ui323DPRCmdSize;
+ IMG_UINT32 ui32Client3DUpdateCount;
+ IMG_UINT32 ui32ClientTAFenceCount;
+ IMG_UINT32 ui32ClientTAUpdateCount;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_UINT32 ui32NumberOfDrawCalls;
+ IMG_UINT32 ui32NumberOfIndices;
+ IMG_UINT32 ui32NumberOfMRTs;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32PRFenceUFOSyncOffset;
+ IMG_UINT32 ui32PRFenceValue;
+ IMG_UINT32 ui32RenderTargetSize;
+ IMG_UINT32 ui32SyncPMRCount;
+ IMG_UINT32 ui32TACmdSize;
+ IMG_BOOL bbAbort;
+ IMG_BOOL bbKick3D;
+ IMG_BOOL bbKickPR;
+ IMG_BOOL bbKickTA;
+} __packed PVRSRV_BRIDGE_IN_RGXKICKTA3D2;
+
+/* Bridge out structure for RGXKickTA3D2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D2_TAG
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_FENCE hUpdateFence;
+ PVRSRV_FENCE hUpdateFence3D;
+} __packed PVRSRV_BRIDGE_OUT_RGXKICKTA3D2;
+
+/*******************************************
+ RGXSetRenderContextProperty
+ *******************************************/
+
+/* Bridge in structure for RGXSetRenderContextProperty */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY_TAG
+{
+ IMG_UINT64 ui64Input;
+ IMG_HANDLE hRenderContext;
+ IMG_UINT32 ui32Property;
+} __packed PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY;
+
+/* Bridge out structure for RGXSetRenderContextProperty */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY_TAG
+{
+ IMG_UINT64 ui64Output;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY;
+
+#endif /* COMMON_RGXTA3D_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for rgxta3d
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxta3d
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxta3d.h"
+
+#include "common_rgxta3d_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData);
+ return eError;
+}
+
+static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX,
+ "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX");
+static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX,
+ "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX");
+static_assert(RGXMKIF_NUM_RTDATA_FREELISTS <= IMG_UINT32_MAX,
+ "RGXMKIF_NUM_RTDATA_FREELISTS must not be larger than IMG_UINT32_MAX");
+static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX,
+ "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX");
+static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX,
+ "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXCreateHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXCreateHWRTDataSetIN_UI8,
+ IMG_UINT8 * psRGXCreateHWRTDataSetOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetIN =
+ (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *) IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetOUT =
+ (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *)
+ IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetOUT_UI8, 0);
+
+ IMG_DEV_VIRTADDR *sPMDataAddrInt = NULL;
+ IMG_DEV_VIRTADDR *sPMSecureDataAddrInt = NULL;
+ RGX_FREELIST **psapsFreeListsInt = NULL;
+ IMG_HANDLE *hapsFreeListsInt2 = NULL;
+ IMG_DEV_VIRTADDR *sTailPtrsDevVAddrInt = NULL;
+ RGX_KM_HW_RT_DATASET **psKmHwRTDataSetInt = NULL;
+ IMG_HANDLE *hKmHwRTDataSetInt2 = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) +
+ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) +
+ ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)) +
+ ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) +
+ ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) +
+ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) +
+ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE)) + 0;
+
+ psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet = psRGXCreateHWRTDataSetIN->phKmHwRTDataSet;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXCreateHWRTDataSet_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSetIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateHWRTDataSetIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateHWRTDataSet_exit;
+ }
+ }
+ }
+
+ {
+ sPMDataAddrInt =
+ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR);
+ }
+
+ /* Copy the data over */
+ if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, sPMDataAddrInt,
+ (const void __user *)psRGXCreateHWRTDataSetIN->psPMDataAddr,
+ RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK)
+ {
+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateHWRTDataSet_exit;
+ }
+ }
+
+ {
+ sPMSecureDataAddrInt =
+ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR);
+ }
+
+ /* Copy the data over */
+ if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, sPMSecureDataAddrInt,
+ (const void __user *)psRGXCreateHWRTDataSetIN->psPMSecureDataAddr,
+ RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK)
+ {
+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateHWRTDataSet_exit;
+ }
+ }
+
+ {
+ psapsFreeListsInt =
+ (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psapsFreeListsInt, 0,
+ RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *));
+ ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *);
+ hapsFreeListsInt2 =
+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hapsFreeListsInt2,
+ (const void __user *)psRGXCreateHWRTDataSetIN->phapsFreeLists,
+ RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateHWRTDataSet_exit;
+ }
+ }
+
+ {
+ sTailPtrsDevVAddrInt =
+ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR);
+ }
+
+ /* Copy the data over */
+ if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, sTailPtrsDevVAddrInt,
+ (const void __user *)psRGXCreateHWRTDataSetIN->psTailPtrsDevVAddr,
+ RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK)
+ {
+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateHWRTDataSet_exit;
+ }
+ }
+ if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL)
+ {
+ psKmHwRTDataSetInt =
+ (RGX_KM_HW_RT_DATASET **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psKmHwRTDataSetInt, 0,
+ RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *));
+ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *);
+ hKmHwRTDataSetInt2 =
+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE);
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++)
+ {
+ /* Look up the address from the handle */
+ psRGXCreateHWRTDataSetOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psapsFreeListsInt[i],
+ hapsFreeListsInt2[i],
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE);
+ if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateHWRTDataSet_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXCreateHWRTDataSetOUT->eError =
+ RGXCreateHWRTDataSet(psConnection, OSGetDevNode(psConnection),
+ psRGXCreateHWRTDataSetIN->sVHeapTableDevVAddr,
+ sPMDataAddrInt,
+ sPMSecureDataAddrInt,
+ psapsFreeListsInt,
+ psRGXCreateHWRTDataSetIN->ui32PPPScreen,
+ psRGXCreateHWRTDataSetIN->ui64PPPMultiSampleCtl,
+ psRGXCreateHWRTDataSetIN->ui32TPCStride,
+ sTailPtrsDevVAddrInt,
+ psRGXCreateHWRTDataSetIN->ui32TPCSize,
+ psRGXCreateHWRTDataSetIN->ui32TEScreen,
+ psRGXCreateHWRTDataSetIN->ui32TEAA,
+ psRGXCreateHWRTDataSetIN->ui32TEMTILE1,
+ psRGXCreateHWRTDataSetIN->ui32TEMTILE2,
+ psRGXCreateHWRTDataSetIN->ui32RgnStride,
+ psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerX,
+ psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerY,
+ psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperX,
+ psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperY,
+ psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleX,
+ psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleY,
+ psRGXCreateHWRTDataSetIN->ui16MaxRTs, psKmHwRTDataSetInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK))
+ {
+ goto RGXCreateHWRTDataSet_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+ if (hKmHwRTDataSetInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++)
+ {
+
+ psRGXCreateHWRTDataSetOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &hKmHwRTDataSetInt2[i],
+ (void *)psKmHwRTDataSetInt[i],
+ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease);
+ if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateHWRTDataSet_exit;
+ }
+
+ }
+ }
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((hKmHwRTDataSetInt2) && ((RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet,
+ hKmHwRTDataSetInt2,
+ (RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *))) != PVRSRV_OK))
+ {
+ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateHWRTDataSet_exit;
+ }
+ }
+
+RGXCreateHWRTDataSet_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ if (hapsFreeListsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psapsFreeListsInt && psapsFreeListsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hapsFreeListsInt2[i],
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)
+ {
+ {
+ IMG_UINT32 i;
+
+ if (hKmHwRTDataSetInt2)
+ {
+ for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++)
+ {
+ if (hKmHwRTDataSetInt2[i])
+ {
+ RGXDestroyHWRTDataSet(hKmHwRTDataSetInt2[i]);
+ }
+ }
+ }
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXCreateHWRTDataSetOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXDestroyHWRTDataSetIN_UI8,
+ IMG_UINT8 * psRGXDestroyHWRTDataSetOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetIN =
+ (PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *)
+ IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetOUT =
+ (PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *)
+ IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXDestroyHWRTDataSetOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyHWRTDataSetIN->
+ hKmHwRTDataSet,
+ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET);
+ if (unlikely
+ ((psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_OK)
+ && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psRGXDestroyHWRTDataSetOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXDestroyHWRTDataSet_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyHWRTDataSet_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _RGXCreateZSBufferpssZSBufferKMIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = RGXDestroyZSBufferKM((RGX_ZSBUFFER_DATA *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXCreateZSBufferIN_UI8,
+ IMG_UINT8 * psRGXCreateZSBufferOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN =
+ (PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT =
+ (PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferOUT_UI8, 0);
+
+ IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation;
+ DEVMEMINT_RESERVATION *psReservationInt = NULL;
+ IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR;
+ PMR *psPMRInt = NULL;
+ RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXCreateZSBufferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE);
+ if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateZSBuffer_exit;
+ }
+
+ /* Look up the address from the handle */
+ psRGXCreateZSBufferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRInt,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateZSBuffer_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXCreateZSBufferOUT->eError =
+ RGXCreateZSBufferKM(psConnection, OSGetDevNode(psConnection),
+ psReservationInt,
+ psPMRInt, psRGXCreateZSBufferIN->uiMapFlags, &pssZSBufferKMInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK))
+ {
+ goto RGXCreateZSBuffer_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRGXCreateZSBufferOUT->
+ hsZSBufferKM,
+ (void *)pssZSBufferKMInt,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXCreateZSBufferpssZSBufferKMIntRelease);
+ if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateZSBuffer_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXCreateZSBuffer_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ }
+
+ /* Unreference the previously looked up handle */
+ if (psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ if (pssZSBufferKMInt)
+ {
+ RGXDestroyZSBufferKM(pssZSBufferKMInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXDestroyZSBufferIN_UI8,
+ IMG_UINT8 * psRGXDestroyZSBufferOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN =
+ (PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT =
+ (PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferOUT_UI8,
+ 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXDestroyZSBufferOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyZSBufferIN->
+ hsZSBufferMemDesc,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ if (unlikely
+ ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK)
+ && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psRGXDestroyZSBufferOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXDestroyZSBuffer_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyZSBuffer_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _RGXPopulateZSBufferpssPopulationIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = RGXUnpopulateZSBufferKM((RGX_POPULATION *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXPopulateZSBufferIN_UI8,
+ IMG_UINT8 * psRGXPopulateZSBufferOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN =
+ (PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT =
+ (PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferOUT_UI8,
+ 0);
+
+ IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM;
+ RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL;
+ RGX_POPULATION *pssPopulationInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXPopulateZSBufferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&pssZSBufferKMInt,
+ hsZSBufferKM,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE);
+ if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXPopulateZSBuffer_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXPopulateZSBufferOUT->eError =
+ RGXPopulateZSBufferKM(pssZSBufferKMInt, &pssPopulationInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK))
+ {
+ goto RGXPopulateZSBuffer_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRGXPopulateZSBufferOUT->
+ hsPopulation,
+ (void *)pssPopulationInt,
+ PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXPopulateZSBufferpssPopulationIntRelease);
+ if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXPopulateZSBuffer_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXPopulateZSBuffer_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (pssZSBufferKMInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hsZSBufferKM, PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ if (pssPopulationInt)
+ {
+ RGXUnpopulateZSBufferKM(pssPopulationInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXUnpopulateZSBufferIN_UI8,
+ IMG_UINT8 * psRGXUnpopulateZSBufferOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN =
+ (PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *)
+ IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT =
+ (PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *)
+ IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXUnpopulateZSBufferOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation,
+ PVRSRV_HANDLE_TYPE_RGX_POPULATION);
+ if (unlikely((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) &&
+ (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psRGXUnpopulateZSBufferOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXUnpopulateZSBuffer_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXUnpopulateZSBuffer_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _RGXCreateFreeListpsCleanupCookieIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = RGXDestroyFreeList((RGX_FREELIST *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXCreateFreeListIN_UI8,
+ IMG_UINT8 * psRGXCreateFreeListOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN =
+ (PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT =
+ (PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListOUT_UI8, 0);
+
+ IMG_HANDLE hMemCtxPrivData = psRGXCreateFreeListIN->hMemCtxPrivData;
+ IMG_HANDLE hMemCtxPrivDataInt = NULL;
+ IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList;
+ RGX_FREELIST *pssGlobalFreeListInt = NULL;
+ IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR;
+ PMR *pssFreeListPMRInt = NULL;
+ IMG_HANDLE hsFreeListStatePMR = psRGXCreateFreeListIN->hsFreeListStatePMR;
+ PMR *pssFreeListStatePMRInt = NULL;
+ RGX_FREELIST *psCleanupCookieInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXCreateFreeListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&hMemCtxPrivDataInt,
+ hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateFreeList_exit;
+ }
+
+ if (psRGXCreateFreeListIN->hsGlobalFreeList)
+ {
+ /* Look up the address from the handle */
+ psRGXCreateFreeListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&pssGlobalFreeListInt,
+ hsGlobalFreeList,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE);
+ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateFreeList_exit;
+ }
+ }
+
+ /* Look up the address from the handle */
+ psRGXCreateFreeListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&pssFreeListPMRInt,
+ hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateFreeList_exit;
+ }
+
+ /* Look up the address from the handle */
+ psRGXCreateFreeListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&pssFreeListStatePMRInt,
+ hsFreeListStatePMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateFreeList_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXCreateFreeListOUT->eError =
+ RGXCreateFreeList(psConnection, OSGetDevNode(psConnection),
+ hMemCtxPrivDataInt,
+ psRGXCreateFreeListIN->ui32MaxFLPages,
+ psRGXCreateFreeListIN->ui32InitFLPages,
+ psRGXCreateFreeListIN->ui32GrowFLPages,
+ psRGXCreateFreeListIN->ui32GrowParamThreshold,
+ pssGlobalFreeListInt,
+ psRGXCreateFreeListIN->bbFreeListCheck,
+ psRGXCreateFreeListIN->spsFreeListBaseDevVAddr,
+ psRGXCreateFreeListIN->spsFreeListStateDevVAddr,
+ pssFreeListPMRInt,
+ psRGXCreateFreeListIN->uiPMROffset,
+ pssFreeListStatePMRInt,
+ psRGXCreateFreeListIN->uiPMRStateOffset, &psCleanupCookieInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+ {
+ goto RGXCreateFreeList_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXCreateFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRGXCreateFreeListOUT->
+ hCleanupCookie,
+ (void *)psCleanupCookieInt,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXCreateFreeListpsCleanupCookieIntRelease);
+ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateFreeList_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXCreateFreeList_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (hMemCtxPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+
+ if (psRGXCreateFreeListIN->hsGlobalFreeList)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (pssGlobalFreeListInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hsGlobalFreeList,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+ }
+ }
+
+ /* Unreference the previously looked up handle */
+ if (pssFreeListPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+
+ /* Unreference the previously looked up handle */
+ if (pssFreeListStatePMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hsFreeListStatePMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+ {
+ if (psCleanupCookieInt)
+ {
+ RGXDestroyFreeList(psCleanupCookieInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXDestroyFreeListIN_UI8,
+ IMG_UINT8 * psRGXDestroyFreeListOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN =
+ (PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT =
+ (PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListOUT_UI8,
+ 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXDestroyFreeListOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+ if (unlikely((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psRGXDestroyFreeListOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXDestroyFreeList_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyFreeList_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _RGXCreateRenderContextpsRenderContextIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVRGXDestroyRenderContextKM((RGX_SERVER_RENDER_CONTEXT *) pvData);
+ return eError;
+}
+
+static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_STATIC_RENDERCONTEXT_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_STATIC_RENDERCONTEXT_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXCreateRenderContextIN_UI8,
+ IMG_UINT8 * psRGXCreateRenderContextOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN =
+ (PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXCreateRenderContextIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT =
+ (PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXCreateRenderContextOUT_UI8, 0);
+
+ IMG_BYTE *ui8FrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ IMG_BYTE *ui8StaticRenderContextStateInt = NULL;
+ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) +
+ ((IMG_UINT64) psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize *
+ sizeof(IMG_BYTE)) + 0;
+
+ if (unlikely(psRGXCreateRenderContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE))
+ {
+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXCreateRenderContext_exit;
+ }
+
+ if (unlikely
+ (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize >
+ RGXFWIF_STATIC_RENDERCONTEXT_SIZE))
+ {
+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXCreateRenderContext_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXCreateRenderContext_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateRenderContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateRenderContext_exit;
+ }
+ }
+ }
+
+ if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize != 0)
+ {
+ ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8FrameworkCmdInt,
+ (const void __user *)psRGXCreateRenderContextIN->pui8FrameworkCmd,
+ psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) !=
+ PVRSRV_OK)
+ {
+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateRenderContext_exit;
+ }
+ }
+ if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize != 0)
+ {
+ ui8StaticRenderContextStateInt =
+ (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8StaticRenderContextStateInt,
+ (const void __user *)psRGXCreateRenderContextIN->pui8StaticRenderContextState,
+ psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize *
+ sizeof(IMG_BYTE)) != PVRSRV_OK)
+ {
+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateRenderContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXCreateRenderContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&hPrivDataInt,
+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+ if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateRenderContext_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXCreateRenderContextOUT->eError =
+ PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevNode(psConnection),
+ psRGXCreateRenderContextIN->i32Priority,
+ psRGXCreateRenderContextIN->ui32FrameworkCmdSize,
+ ui8FrameworkCmdInt,
+ hPrivDataInt,
+ psRGXCreateRenderContextIN->
+ ui32StaticRenderContextStateSize,
+ ui8StaticRenderContextStateInt,
+ psRGXCreateRenderContextIN->ui32PackedCCBSizeU8888,
+ psRGXCreateRenderContextIN->ui32ContextFlags,
+ psRGXCreateRenderContextIN->ui64RobustnessAddress,
+ psRGXCreateRenderContextIN->ui32MaxTADeadlineMS,
+ psRGXCreateRenderContextIN->ui32Max3DDeadlineMS,
+ &psRenderContextInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK))
+ {
+ goto RGXCreateRenderContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRGXCreateRenderContextOUT->
+ hRenderContext,
+ (void *)psRenderContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXCreateRenderContextpsRenderContextIntRelease);
+ if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXCreateRenderContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXCreateRenderContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+ {
+ if (psRenderContextInt)
+ {
+ PVRSRVRGXDestroyRenderContextKM(psRenderContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXCreateRenderContextOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXDestroyRenderContextIN_UI8,
+ IMG_UINT8 * psRGXDestroyRenderContextOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN =
+ (PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXDestroyRenderContextIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT =
+ (PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXDestroyRenderContextOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXDestroyRenderContextOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyRenderContextIN->
+ hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ if (unlikely
+ ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK)
+ && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psRGXDestroyRenderContextOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXDestroyRenderContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXDestroyRenderContext_exit:
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXSetRenderContextPriorityIN_UI8,
+ IMG_UINT8 * psRGXSetRenderContextPriorityOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN =
+ (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *)
+ IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT =
+ (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *)
+ IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityOUT_UI8, 0);
+
+ IMG_HANDLE hRenderContext = psRGXSetRenderContextPriorityIN->hRenderContext;
+ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXSetRenderContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psRenderContextInt,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXSetRenderContextPriority_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXSetRenderContextPriorityOUT->eError =
+ PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevNode(psConnection),
+ psRenderContextInt,
+ psRGXSetRenderContextPriorityIN->i32Priority);
+
+RGXSetRenderContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psRenderContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXRenderContextStalledIN_UI8,
+ IMG_UINT8 * psRGXRenderContextStalledOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledIN =
+ (PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *)
+ IMG_OFFSET_ADDR(psRGXRenderContextStalledIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledOUT =
+ (PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *)
+ IMG_OFFSET_ADDR(psRGXRenderContextStalledOUT_UI8, 0);
+
+ IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext;
+ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXRenderContextStalledOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psRenderContextInt,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXRenderContextStalledOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXRenderContextStalled_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXRenderContextStalledOUT->eError = RGXRenderContextStalledKM(psRenderContextInt);
+
+RGXRenderContextStalled_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psRenderContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXKickTA3D2IN_UI8,
+ IMG_UINT8 * psRGXKickTA3D2OUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *psRGXKickTA3D2IN =
+ (PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2IN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *psRGXKickTA3D2OUT =
+ (PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2OUT_UI8, 0);
+
+ IMG_HANDLE hRenderContext = psRGXKickTA3D2IN->hRenderContext;
+ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientTAFenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32Client3DUpdateValueInt = NULL;
+ IMG_HANDLE hPRFenceUFOSyncPrimBlock = psRGXKickTA3D2IN->hPRFenceUFOSyncPrimBlock;
+ SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_CHAR *uiUpdateFenceName3DInt = NULL;
+ IMG_BYTE *ui8TACmdInt = NULL;
+ IMG_BYTE *ui83DPRCmdInt = NULL;
+ IMG_BYTE *ui83DCmdInt = NULL;
+ IMG_HANDLE hKMHWRTDataSet = psRGXKickTA3D2IN->hKMHWRTDataSet;
+ RGX_KM_HW_RT_DATASET *psKMHWRTDataSetInt = NULL;
+ IMG_HANDLE hZSBuffer = psRGXKickTA3D2IN->hZSBuffer;
+ RGX_ZSBUFFER_DATA *psZSBufferInt = NULL;
+ IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3D2IN->hMSAAScratchBuffer;
+ RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL;
+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+ PMR **psSyncPMRsInt = NULL;
+ IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)) +
+ ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0;
+
+ if (unlikely(psRGXKickTA3D2IN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNCS))
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickTA3D2_exit;
+ }
+
+ if (unlikely(psRGXKickTA3D2IN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNCS))
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickTA3D2_exit;
+ }
+
+ if (unlikely(psRGXKickTA3D2IN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNCS))
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickTA3D2_exit;
+ }
+
+ if (unlikely(psRGXKickTA3D2IN->ui32TACmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickTA3D2_exit;
+ }
+
+ if (unlikely(psRGXKickTA3D2IN->ui323DPRCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickTA3D2_exit;
+ }
+
+ if (unlikely(psRGXKickTA3D2IN->ui323DCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickTA3D2_exit;
+ }
+
+ if (unlikely(psRGXKickTA3D2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS))
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXKickTA3D2_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXKickTA3D2_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTA3D2IN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ }
+
+ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0)
+ {
+ psClientTAFenceSyncPrimBlockInt =
+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psClientTAFenceSyncPrimBlockInt, 0,
+ psRGXKickTA3D2IN->ui32ClientTAFenceCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *));
+ ui32NextOffset +=
+ psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientTAFenceSyncPrimBlockInt2 =
+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hClientTAFenceSyncPrimBlockInt2,
+ (const void __user *)psRGXKickTA3D2IN->phClientTAFenceSyncPrimBlock,
+ psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0)
+ {
+ ui32ClientTAFenceSyncOffsetInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32ClientTAFenceSyncOffsetInt,
+ (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceSyncOffset,
+ psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0)
+ {
+ ui32ClientTAFenceValueInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32ClientTAFenceValueInt,
+ (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceValue,
+ psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0)
+ {
+ psClientTAUpdateSyncPrimBlockInt =
+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psClientTAUpdateSyncPrimBlockInt, 0,
+ psRGXKickTA3D2IN->ui32ClientTAUpdateCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *));
+ ui32NextOffset +=
+ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientTAUpdateSyncPrimBlockInt2 =
+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hClientTAUpdateSyncPrimBlockInt2,
+ (const void __user *)psRGXKickTA3D2IN->phClientTAUpdateSyncPrimBlock,
+ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0)
+ {
+ ui32ClientTAUpdateSyncOffsetInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32ClientTAUpdateSyncOffsetInt,
+ (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateSyncOffset,
+ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0)
+ {
+ ui32ClientTAUpdateValueInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32ClientTAUpdateValueInt,
+ (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateValue,
+ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0)
+ {
+ psClient3DUpdateSyncPrimBlockInt =
+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psClient3DUpdateSyncPrimBlockInt, 0,
+ psRGXKickTA3D2IN->ui32Client3DUpdateCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *));
+ ui32NextOffset +=
+ psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClient3DUpdateSyncPrimBlockInt2 =
+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hClient3DUpdateSyncPrimBlockInt2,
+ (const void __user *)psRGXKickTA3D2IN->phClient3DUpdateSyncPrimBlock,
+ psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0)
+ {
+ ui32Client3DUpdateSyncOffsetInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32Client3DUpdateSyncOffsetInt,
+ (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateSyncOffset,
+ psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0)
+ {
+ ui32Client3DUpdateValueInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32Client3DUpdateValueInt,
+ (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateValue,
+ psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt =
+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiUpdateFenceNameInt,
+ (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName,
+ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) -
+ 1] = '\0';
+ }
+
+ {
+ uiUpdateFenceName3DInt =
+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiUpdateFenceName3DInt,
+ (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName3D,
+ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ ((IMG_CHAR *) uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) -
+ 1] = '\0';
+ }
+ if (psRGXKickTA3D2IN->ui32TACmdSize != 0)
+ {
+ ui8TACmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8TACmdInt, (const void __user *)psRGXKickTA3D2IN->pui8TACmd,
+ psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ if (psRGXKickTA3D2IN->ui323DPRCmdSize != 0)
+ {
+ ui83DPRCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui83DPRCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DPRCmd,
+ psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ if (psRGXKickTA3D2IN->ui323DCmdSize != 0)
+ {
+ ui83DCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui83DCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DCmd,
+ psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0)
+ {
+ ui32SyncPMRFlagsInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32SyncPMRFlagsInt,
+ (const void __user *)psRGXKickTA3D2IN->pui32SyncPMRFlags,
+ psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0)
+ {
+ psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psSyncPMRsInt, 0,
+ psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *));
+ ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *);
+ hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickTA3D2IN->phSyncPMRs,
+ psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D2_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXKickTA3D2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psRenderContextInt,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickTA3D2_exit;
+ }
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3D2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psClientTAFenceSyncPrimBlockInt[i],
+ hClientTAFenceSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3D2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)
+ &psClientTAUpdateSyncPrimBlockInt[i],
+ hClientTAUpdateSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3D2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)
+ &psClient3DUpdateSyncPrimBlockInt[i],
+ hClient3DUpdateSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ }
+
+ /* Look up the address from the handle */
+ psRGXKickTA3D2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPRFenceUFOSyncPrimBlockInt,
+ hPRFenceUFOSyncPrimBlock,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickTA3D2_exit;
+ }
+
+ if (psRGXKickTA3D2IN->hKMHWRTDataSet)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3D2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psKMHWRTDataSetInt,
+ hKMHWRTDataSet,
+ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, IMG_TRUE);
+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickTA3D2_exit;
+ }
+ }
+
+ if (psRGXKickTA3D2IN->hZSBuffer)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3D2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psZSBufferInt,
+ hZSBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE);
+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickTA3D2_exit;
+ }
+ }
+
+ if (psRGXKickTA3D2IN->hMSAAScratchBuffer)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3D2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psMSAAScratchBufferInt,
+ hMSAAScratchBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE);
+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickTA3D2_exit;
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3D2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSyncPMRsInt[i],
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXKickTA3D2_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXKickTA3D2OUT->eError =
+ PVRSRVRGXKickTA3DKM(psRenderContextInt,
+ psRGXKickTA3D2IN->ui32ClientTAFenceCount,
+ psClientTAFenceSyncPrimBlockInt,
+ ui32ClientTAFenceSyncOffsetInt,
+ ui32ClientTAFenceValueInt,
+ psRGXKickTA3D2IN->ui32ClientTAUpdateCount,
+ psClientTAUpdateSyncPrimBlockInt,
+ ui32ClientTAUpdateSyncOffsetInt,
+ ui32ClientTAUpdateValueInt,
+ psRGXKickTA3D2IN->ui32Client3DUpdateCount,
+ psClient3DUpdateSyncPrimBlockInt,
+ ui32Client3DUpdateSyncOffsetInt,
+ ui32Client3DUpdateValueInt,
+ psPRFenceUFOSyncPrimBlockInt,
+ psRGXKickTA3D2IN->ui32PRFenceUFOSyncOffset,
+ psRGXKickTA3D2IN->ui32PRFenceValue,
+ psRGXKickTA3D2IN->hCheckFence,
+ psRGXKickTA3D2IN->hUpdateTimeline,
+ &psRGXKickTA3D2OUT->hUpdateFence,
+ uiUpdateFenceNameInt,
+ psRGXKickTA3D2IN->hCheckFence3D,
+ psRGXKickTA3D2IN->hUpdateTimeline3D,
+ &psRGXKickTA3D2OUT->hUpdateFence3D,
+ uiUpdateFenceName3DInt,
+ psRGXKickTA3D2IN->ui32TACmdSize,
+ ui8TACmdInt,
+ psRGXKickTA3D2IN->ui323DPRCmdSize,
+ ui83DPRCmdInt,
+ psRGXKickTA3D2IN->ui323DCmdSize,
+ ui83DCmdInt,
+ psRGXKickTA3D2IN->ui32ExtJobRef,
+ psRGXKickTA3D2IN->bbKickTA,
+ psRGXKickTA3D2IN->bbKickPR,
+ psRGXKickTA3D2IN->bbKick3D,
+ psRGXKickTA3D2IN->bbAbort,
+ psRGXKickTA3D2IN->ui32PDumpFlags,
+ psKMHWRTDataSetInt,
+ psZSBufferInt,
+ psMSAAScratchBufferInt,
+ psRGXKickTA3D2IN->ui32SyncPMRCount,
+ ui32SyncPMRFlagsInt,
+ psSyncPMRsInt,
+ psRGXKickTA3D2IN->ui32RenderTargetSize,
+ psRGXKickTA3D2IN->ui32NumberOfDrawCalls,
+ psRGXKickTA3D2IN->ui32NumberOfIndices,
+ psRGXKickTA3D2IN->ui32NumberOfMRTs, psRGXKickTA3D2IN->ui64Deadline);
+
+RGXKickTA3D2_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psRenderContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ }
+
+ if (hClientTAFenceSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psClientTAFenceSyncPrimBlockInt && psClientTAFenceSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientTAFenceSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+ if (hClientTAUpdateSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psClientTAUpdateSyncPrimBlockInt && psClientTAUpdateSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientTAUpdateSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+ if (hClient3DUpdateSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psClient3DUpdateSyncPrimBlockInt && psClient3DUpdateSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClient3DUpdateSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+ /* Unreference the previously looked up handle */
+ if (psPRFenceUFOSyncPrimBlockInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPRFenceUFOSyncPrimBlock,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+
+ if (psRGXKickTA3D2IN->hKMHWRTDataSet)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psKMHWRTDataSetInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hKMHWRTDataSet,
+ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET);
+ }
+ }
+
+ if (psRGXKickTA3D2IN->hZSBuffer)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psZSBufferInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hZSBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ }
+ }
+
+ if (psRGXKickTA3D2IN->hMSAAScratchBuffer)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psMSAAScratchBufferInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hMSAAScratchBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ }
+ }
+
+ if (hSyncPMRsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psSyncPMRsInt && psSyncPMRsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXKickTA3D2OUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXSetRenderContextProperty(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXSetRenderContextPropertyIN_UI8,
+ IMG_UINT8 * psRGXSetRenderContextPropertyOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyIN =
+ (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *)
+ IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyOUT =
+ (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *)
+ IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyOUT_UI8, 0);
+
+ IMG_HANDLE hRenderContext = psRGXSetRenderContextPropertyIN->hRenderContext;
+ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXSetRenderContextPropertyOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psRenderContextInt,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXSetRenderContextPropertyOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXSetRenderContextProperty_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXSetRenderContextPropertyOUT->eError =
+ PVRSRVRGXSetRenderContextPropertyKM(psRenderContextInt,
+ psRGXSetRenderContextPropertyIN->ui32Property,
+ psRGXSetRenderContextPropertyIN->ui64Input,
+ &psRGXSetRenderContextPropertyOUT->ui64Output);
+
+RGXSetRenderContextProperty_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psRenderContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXTA3DBridge(void);
+void DeinitRGXTA3DBridge(void);
+
+/*
+ * Register all RGXTA3D functions with services
+ */
+PVRSRV_ERROR InitRGXTA3DBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET,
+ PVRSRVBridgeRGXCreateHWRTDataSet, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET,
+ PVRSRVBridgeRGXDestroyHWRTDataSet, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER,
+ PVRSRVBridgeRGXCreateZSBuffer, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER,
+ PVRSRVBridgeRGXDestroyZSBuffer, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER,
+ PVRSRVBridgeRGXPopulateZSBuffer, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER,
+ PVRSRVBridgeRGXUnpopulateZSBuffer, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST,
+ PVRSRVBridgeRGXCreateFreeList, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST,
+ PVRSRVBridgeRGXDestroyFreeList, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT,
+ PVRSRVBridgeRGXCreateRenderContext, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT,
+ PVRSRVBridgeRGXDestroyRenderContext, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+ PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY,
+ PVRSRVBridgeRGXSetRenderContextPriority, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED,
+ PVRSRVBridgeRGXRenderContextStalled, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2,
+ PVRSRVBridgeRGXKickTA3D2, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+ PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY,
+ PVRSRVBridgeRGXSetRenderContextProperty, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxta3d functions with services
+ */
+void DeinitRGXTA3DBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+ PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+ PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+ PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+ PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D,
+ PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxtimerquery
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxtimerquery
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTIMERQUERY_BRIDGE_H
+#define COMMON_RGXTIMERQUERY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+#define PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_LAST (PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+2)
+
+/*******************************************
+ RGXBeginTimerQuery
+ *******************************************/
+
+/* Bridge in structure for RGXBeginTimerQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY_TAG
+{
+ IMG_UINT32 ui32QueryId;
+} __packed PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY;
+
+/* Bridge out structure for RGXBeginTimerQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY;
+
+/*******************************************
+ RGXEndTimerQuery
+ *******************************************/
+
+/* Bridge in structure for RGXEndTimerQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY;
+
+/* Bridge out structure for RGXEndTimerQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY;
+
+/*******************************************
+ RGXQueryTimer
+ *******************************************/
+
+/* Bridge in structure for RGXQueryTimer */
+typedef struct PVRSRV_BRIDGE_IN_RGXQUERYTIMER_TAG
+{
+ IMG_UINT32 ui32QueryId;
+} __packed PVRSRV_BRIDGE_IN_RGXQUERYTIMER;
+
+/* Bridge out structure for RGXQueryTimer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXQUERYTIMER_TAG
+{
+ IMG_UINT64 ui64EndTime;
+ IMG_UINT64 ui64StartTime;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXQUERYTIMER;
+
+#endif /* COMMON_RGXTIMERQUERY_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for rgxtimerquery
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxtimerquery
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtimerquery.h"
+
+#include "common_rgxtimerquery_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXBeginTimerQuery(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXBeginTimerQueryIN_UI8,
+ IMG_UINT8 * psRGXBeginTimerQueryOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryIN =
+ (PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *) IMG_OFFSET_ADDR(psRGXBeginTimerQueryIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryOUT =
+ (PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *) IMG_OFFSET_ADDR(psRGXBeginTimerQueryOUT_UI8,
+ 0);
+
+ psRGXBeginTimerQueryOUT->eError =
+ PVRSRVRGXBeginTimerQueryKM(psConnection, OSGetDevNode(psConnection),
+ psRGXBeginTimerQueryIN->ui32QueryId);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXEndTimerQuery(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXEndTimerQueryIN_UI8,
+ IMG_UINT8 * psRGXEndTimerQueryOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *psRGXEndTimerQueryIN =
+ (PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *) IMG_OFFSET_ADDR(psRGXEndTimerQueryIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *psRGXEndTimerQueryOUT =
+ (PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *) IMG_OFFSET_ADDR(psRGXEndTimerQueryOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psRGXEndTimerQueryIN);
+
+ psRGXEndTimerQueryOUT->eError =
+ PVRSRVRGXEndTimerQueryKM(psConnection, OSGetDevNode(psConnection));
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXQueryTimer(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXQueryTimerIN_UI8,
+ IMG_UINT8 * psRGXQueryTimerOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXQUERYTIMER *psRGXQueryTimerIN =
+ (PVRSRV_BRIDGE_IN_RGXQUERYTIMER *) IMG_OFFSET_ADDR(psRGXQueryTimerIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *psRGXQueryTimerOUT =
+ (PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *) IMG_OFFSET_ADDR(psRGXQueryTimerOUT_UI8, 0);
+
+ psRGXQueryTimerOUT->eError =
+ PVRSRVRGXQueryTimerKM(psConnection, OSGetDevNode(psConnection),
+ psRGXQueryTimerIN->ui32QueryId,
+ &psRGXQueryTimerOUT->ui64StartTime,
+ &psRGXQueryTimerOUT->ui64EndTime);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXTIMERQUERYBridge(void);
+void DeinitRGXTIMERQUERYBridge(void);
+
+/*
+ * Register all RGXTIMERQUERY functions with services
+ */
+PVRSRV_ERROR InitRGXTIMERQUERYBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY,
+ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY,
+ PVRSRVBridgeRGXBeginTimerQuery, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY,
+ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY,
+ PVRSRVBridgeRGXEndTimerQuery, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY,
+ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER, PVRSRVBridgeRGXQueryTimer,
+ NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtimerquery functions with services
+ */
+void DeinitRGXTIMERQUERYBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY,
+ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY,
+ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY,
+ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxtq2
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxtq2
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTQ2_BRIDGE_H
+#define COMMON_RGXTQ2_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2 PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7)
+
+/*******************************************
+ RGXTDMCreateTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXTDMCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG
+{
+ IMG_UINT64 ui64RobustnessAddress;
+ IMG_HANDLE hPrivData;
+ IMG_BYTE *pui8FrameworkCmd;
+ IMG_INT32 i32Priority;
+ IMG_UINT32 ui32ContextFlags;
+ IMG_UINT32 ui32FrameworkCmdSize;
+ IMG_UINT32 ui32PackedCCBSizeU88;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT;
+
+/* Bridge out structure for RGXTDMCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT_TAG
+{
+ IMG_HANDLE hTransferContext;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT;
+
+/*******************************************
+ RGXTDMDestroyTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXTDMDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT_TAG
+{
+ IMG_HANDLE hTransferContext;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT;
+
+/* Bridge out structure for RGXTDMDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT;
+
+/*******************************************
+ RGXTDMSetTransferContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hTransferContext;
+ IMG_INT32 i32Priority;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXTDMSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY;
+
+/*******************************************
+ RGXTDMNotifyWriteOffsetUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXTDMNotifyWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG
+{
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 ui32PDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE;
+
+/* Bridge out structure for RGXTDMNotifyWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE;
+
+/*******************************************
+ RGXTDMSubmitTransfer2
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSubmitTransfer2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2_TAG
+{
+ IMG_UINT64 ui64DeadlineInus;
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 *pui32SyncPMRFlags;
+ IMG_UINT32 *pui32UpdateSyncOffset;
+ IMG_UINT32 *pui32UpdateValue;
+ IMG_UINT8 *pui8FWCommand;
+ IMG_CHAR *puiUpdateFenceName;
+ IMG_HANDLE *phSyncPMRs;
+ IMG_HANDLE *phUpdateUFOSyncPrimBlock;
+ PVRSRV_FENCE hCheckFenceFD;
+ PVRSRV_TIMELINE hUpdateTimeline;
+ IMG_UINT32 ui32Characteristic1;
+ IMG_UINT32 ui32Characteristic2;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_UINT32 ui32CommandSize;
+ IMG_UINT32 ui32ExternalJobReference;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32SyncPMRCount;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2;
+
+/* Bridge out structure for RGXTDMSubmitTransfer2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2_TAG
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_FENCE hUpdateFence;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2;
+
+/*******************************************
+ RGXTDMGetSharedMemory
+ *******************************************/
+
+/* Bridge in structure for RGXTDMGetSharedMemory */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY;
+
+/* Bridge out structure for RGXTDMGetSharedMemory */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY_TAG
+{
+ IMG_HANDLE hCLIPMRMem;
+ IMG_HANDLE hUSCPMRMem;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY;
+
+/*******************************************
+ RGXTDMReleaseSharedMemory
+ *******************************************/
+
+/* Bridge in structure for RGXTDMReleaseSharedMemory */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY_TAG
+{
+ IMG_HANDLE hPMRMem;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY;
+
+/* Bridge out structure for RGXTDMReleaseSharedMemory */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY;
+
+/*******************************************
+ RGXTDMSetTransferContextProperty
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSetTransferContextProperty */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG
+{
+ IMG_UINT64 ui64Input;
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 ui32Property;
+} __packed PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY;
+
+/* Bridge out structure for RGXTDMSetTransferContextProperty */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG
+{
+ IMG_UINT64 ui64Output;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY;
+
+#endif /* COMMON_RGXTQ2_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for rgxtq2
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxtq2
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtdmtransfer.h"
+
+#include "common_rgxtq2_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "rgx_bvnc_defs_km.h"
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _RGXTDMCreateTransferContextpsTransferContextIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVRGXTDMDestroyTransferContextKM((RGX_SERVER_TQ_TDM_CONTEXT *) pvData);
+ return eError;
+}
+
+static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXTDMCreateTransferContextIN_UI8,
+ IMG_UINT8 * psRGXTDMCreateTransferContextOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextIN =
+ (PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextOUT =
+ (PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextOUT_UI8, 0);
+
+ IMG_BYTE *ui8FrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXTDMCreateTransferContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize *
+ sizeof(IMG_BYTE)) + 0;
+
+ if (unlikely(psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE))
+ {
+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXTDMCreateTransferContext_exit;
+ }
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMCreateTransferContext_exit;
+ }
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXTDMCreateTransferContext_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer =
+ (IMG_BYTE *) (void *)psRGXTDMCreateTransferContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXTDMCreateTransferContextOUT->eError =
+ PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXTDMCreateTransferContext_exit;
+ }
+ }
+ }
+
+ if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize != 0)
+ {
+ ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8FrameworkCmdInt,
+ (const void __user *)psRGXTDMCreateTransferContextIN->pui8FrameworkCmd,
+ psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) !=
+ PVRSRV_OK)
+ {
+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMCreateTransferContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXTDMCreateTransferContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&hPrivDataInt,
+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE);
+ if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTDMCreateTransferContext_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXTDMCreateTransferContextOUT->eError =
+ PVRSRVRGXTDMCreateTransferContextKM(psConnection, OSGetDevNode(psConnection),
+ psRGXTDMCreateTransferContextIN->i32Priority,
+ psRGXTDMCreateTransferContextIN->
+ ui32FrameworkCmdSize, ui8FrameworkCmdInt,
+ hPrivDataInt,
+ psRGXTDMCreateTransferContextIN->
+ ui32PackedCCBSizeU88,
+ psRGXTDMCreateTransferContextIN->ui32ContextFlags,
+ psRGXTDMCreateTransferContextIN->
+ ui64RobustnessAddress, &psTransferContextInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK))
+ {
+ goto RGXTDMCreateTransferContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXTDMCreateTransferContextOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRGXTDMCreateTransferContextOUT->hTransferContext,
+ (void *)psTransferContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXTDMCreateTransferContextpsTransferContextIntRelease);
+ if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTDMCreateTransferContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXTDMCreateTransferContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ if (psTransferContextInt)
+ {
+ PVRSRVRGXTDMDestroyTransferContextKM(psTransferContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXTDMCreateTransferContextOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXTDMDestroyTransferContextIN_UI8,
+ IMG_UINT8 * psRGXTDMDestroyTransferContextOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextIN =
+ (PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextOUT =
+ (PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *)
+ IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextOUT_UI8, 0);
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMDestroyTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMDestroyTransferContext_exit;
+ }
+ }
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXTDMDestroyTransferContextOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXTDMDestroyTransferContextIN->
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ if (unlikely
+ ((psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_OK)
+ && (psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__,
+ PVRSRVGetErrorString(psRGXTDMDestroyTransferContextOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTDMDestroyTransferContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXTDMDestroyTransferContext_exit:
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXTDMSetTransferContextPriorityIN_UI8,
+ IMG_UINT8 * psRGXTDMSetTransferContextPriorityOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityIN =
+ (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *)
+ IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityOUT =
+ (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *)
+ IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityOUT_UI8, 0);
+
+ IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPriorityIN->hTransferContext;
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMSetTransferContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMSetTransferContextPriority_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXTDMSetTransferContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXTDMSetTransferContextPriorityOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTDMSetTransferContextPriority_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXTDMSetTransferContextPriorityOUT->eError =
+ PVRSRVRGXTDMSetTransferContextPriorityKM(psConnection, OSGetDevNode(psConnection),
+ psTransferContextInt,
+ psRGXTDMSetTransferContextPriorityIN->
+ i32Priority);
+
+RGXTDMSetTransferContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXTDMNotifyWriteOffsetUpdateIN_UI8,
+ IMG_UINT8 * psRGXTDMNotifyWriteOffsetUpdateOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateIN =
+ (PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *)
+ IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateOUT =
+ (PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *)
+ IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateOUT_UI8, 0);
+
+ IMG_HANDLE hTransferContext = psRGXTDMNotifyWriteOffsetUpdateIN->hTransferContext;
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMNotifyWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMNotifyWriteOffsetUpdate_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXTDMNotifyWriteOffsetUpdateOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTDMNotifyWriteOffsetUpdate_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+ PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(psTransferContextInt,
+ psRGXTDMNotifyWriteOffsetUpdateIN->
+ ui32PDumpFlags);
+
+RGXTDMNotifyWriteOffsetUpdate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX,
+ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX");
+static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX,
+ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXTDMSubmitTransfer2IN_UI8,
+ IMG_UINT8 * psRGXTDMSubmitTransfer2OUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2IN =
+ (PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *)
+ IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2IN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2OUT =
+ (PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *)
+ IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2OUT_UI8, 0);
+
+ IMG_HANDLE hTransferContext = psRGXTDMSubmitTransfer2IN->hTransferContext;
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32UpdateValueInt = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_UINT8 *ui8FWCommandInt = NULL;
+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+ PMR **psSyncPMRsInt = NULL;
+ IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) +
+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) +
+ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0;
+
+ if (unlikely(psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS))
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+
+ if (unlikely
+ (psRGXTDMSubmitTransfer2IN->ui32CommandSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE))
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+
+ if (unlikely(psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS))
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer2IN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXTDMSubmitTransfer2IN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+ }
+ }
+
+ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0)
+ {
+ psUpdateUFOSyncPrimBlockInt =
+ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psUpdateUFOSyncPrimBlockInt, 0,
+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *));
+ ui32NextOffset +=
+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount *
+ sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hUpdateUFOSyncPrimBlockInt2 =
+ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hUpdateUFOSyncPrimBlockInt2,
+ (const void __user *)psRGXTDMSubmitTransfer2IN->phUpdateUFOSyncPrimBlock,
+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) !=
+ PVRSRV_OK)
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0)
+ {
+ ui32UpdateSyncOffsetInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32UpdateSyncOffsetInt,
+ (const void __user *)psRGXTDMSubmitTransfer2IN->pui32UpdateSyncOffset,
+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) !=
+ PVRSRV_OK)
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0)
+ {
+ ui32UpdateValueInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32UpdateValueInt,
+ (const void __user *)psRGXTDMSubmitTransfer2IN->pui32UpdateValue,
+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) !=
+ PVRSRV_OK)
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt =
+ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiUpdateFenceNameInt,
+ (const void __user *)psRGXTDMSubmitTransfer2IN->puiUpdateFenceName,
+ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) -
+ 1] = '\0';
+ }
+ if (psRGXTDMSubmitTransfer2IN->ui32CommandSize != 0)
+ {
+ ui8FWCommandInt = (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui8FWCommandInt,
+ (const void __user *)psRGXTDMSubmitTransfer2IN->pui8FWCommand,
+ psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) != PVRSRV_OK)
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0)
+ {
+ ui32SyncPMRFlagsInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32SyncPMRFlagsInt,
+ (const void __user *)psRGXTDMSubmitTransfer2IN->pui32SyncPMRFlags,
+ psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0)
+ {
+ psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ OSCachedMemSet(psSyncPMRsInt, 0,
+ psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *));
+ ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *);
+ hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, hSyncPMRsInt2,
+ (const void __user *)psRGXTDMSubmitTransfer2IN->phSyncPMRs,
+ psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK)
+ {
+ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransfer2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++)
+ {
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransfer2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psUpdateUFOSyncPrimBlockInt[i],
+ hUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; i++)
+ {
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransfer2OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSyncPMRsInt[i],
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTDMSubmitTransfer2_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXTDMSubmitTransfer2OUT->eError =
+ PVRSRVRGXTDMSubmitTransferKM(psTransferContextInt,
+ psRGXTDMSubmitTransfer2IN->ui32PDumpFlags,
+ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount,
+ psUpdateUFOSyncPrimBlockInt,
+ ui32UpdateSyncOffsetInt,
+ ui32UpdateValueInt,
+ psRGXTDMSubmitTransfer2IN->hCheckFenceFD,
+ psRGXTDMSubmitTransfer2IN->hUpdateTimeline,
+ &psRGXTDMSubmitTransfer2OUT->hUpdateFence,
+ uiUpdateFenceNameInt,
+ psRGXTDMSubmitTransfer2IN->ui32CommandSize,
+ ui8FWCommandInt,
+ psRGXTDMSubmitTransfer2IN->ui32ExternalJobReference,
+ psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount,
+ ui32SyncPMRFlagsInt,
+ psSyncPMRsInt,
+ psRGXTDMSubmitTransfer2IN->ui32Characteristic1,
+ psRGXTDMSubmitTransfer2IN->ui32Characteristic2,
+ psRGXTDMSubmitTransfer2IN->ui64DeadlineInus);
+
+RGXTDMSubmitTransfer2_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ }
+
+ if (hUpdateUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psUpdateUFOSyncPrimBlockInt && psUpdateUFOSyncPrimBlockInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+ if (hSyncPMRsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if (psSyncPMRsInt && psSyncPMRsInt[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRGXTDMSubmitTransfer2OUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static PVRSRV_ERROR _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData);
+ return eError;
+}
+
+static PVRSRV_ERROR _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXTDMGetSharedMemoryIN_UI8,
+ IMG_UINT8 * psRGXTDMGetSharedMemoryOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryIN =
+ (PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *)
+ IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryOUT =
+ (PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *)
+ IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryOUT_UI8, 0);
+
+ PMR *psCLIPMRMemInt = NULL;
+ PMR *psUSCPMRMemInt = NULL;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMGetSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMGetSharedMemory_exit;
+ }
+ }
+
+ PVR_UNREFERENCED_PARAMETER(psRGXTDMGetSharedMemoryIN);
+
+ psRGXTDMGetSharedMemoryOUT->eError =
+ PVRSRVRGXTDMGetSharedMemoryKM(psConnection, OSGetDevNode(psConnection),
+ &psCLIPMRMemInt, &psUSCPMRMemInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK))
+ {
+ goto RGXTDMGetSharedMemory_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXTDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRGXTDMGetSharedMemoryOUT->
+ hCLIPMRMem,
+ (void *)psCLIPMRMemInt,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease);
+ if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTDMGetSharedMemory_exit;
+ }
+
+ psRGXTDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRGXTDMGetSharedMemoryOUT->
+ hUSCPMRMem,
+ (void *)psUSCPMRMemInt,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease);
+ if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTDMGetSharedMemory_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXTDMGetSharedMemory_exit:
+
+ if (psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)
+ {
+ if (psCLIPMRMemInt)
+ {
+ PVRSRVRGXTDMReleaseSharedMemoryKM(psCLIPMRMemInt);
+ }
+ if (psUSCPMRMemInt)
+ {
+ PVRSRVRGXTDMReleaseSharedMemoryKM(psUSCPMRMemInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMReleaseSharedMemory(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXTDMReleaseSharedMemoryIN_UI8,
+ IMG_UINT8 * psRGXTDMReleaseSharedMemoryOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY *psRGXTDMReleaseSharedMemoryIN =
+ (PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY *)
+ IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY *psRGXTDMReleaseSharedMemoryOUT =
+ (PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY *)
+ IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryOUT_UI8, 0);
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMReleaseSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMReleaseSharedMemory_exit;
+ }
+ }
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRGXTDMReleaseSharedMemoryOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXTDMReleaseSharedMemoryIN->hPMRMem,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE);
+ if (unlikely((psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_OK) &&
+ (psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psRGXTDMReleaseSharedMemoryOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTDMReleaseSharedMemory_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RGXTDMReleaseSharedMemory_exit:
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRGXTDMSetTransferContextPropertyIN_UI8,
+ IMG_UINT8 * psRGXTDMSetTransferContextPropertyOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY *psRGXTDMSetTransferContextPropertyIN =
+ (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY *)
+ IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY *psRGXTDMSetTransferContextPropertyOUT =
+ (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY *)
+ IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyOUT_UI8, 0);
+
+ IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPropertyIN->hTransferContext;
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode,
+ RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMSetTransferContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMSetTransferContextProperty_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRGXTDMSetTransferContextPropertyOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE);
+ if (unlikely(psRGXTDMSetTransferContextPropertyOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RGXTDMSetTransferContextProperty_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRGXTDMSetTransferContextPropertyOUT->eError =
+ PVRSRVRGXTDMSetTransferContextPropertyKM(psTransferContextInt,
+ psRGXTDMSetTransferContextPropertyIN->
+ ui32Property,
+ psRGXTDMSetTransferContextPropertyIN->
+ ui64Input,
+ &psRGXTDMSetTransferContextPropertyOUT->
+ ui64Output);
+
+RGXTDMSetTransferContextProperty_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRGXTQ2Bridge(void);
+void DeinitRGXTQ2Bridge(void);
+
+/*
+ * Register all RGXTQ2 functions with services
+ */
+PVRSRV_ERROR InitRGXTQ2Bridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT,
+ PVRSRVBridgeRGXTDMCreateTransferContext, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT,
+ PVRSRVBridgeRGXTDMDestroyTransferContext, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY,
+ PVRSRVBridgeRGXTDMSetTransferContextPriority, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE,
+ PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2,
+ PVRSRVBridgeRGXTDMSubmitTransfer2, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY,
+ PVRSRVBridgeRGXTDMGetSharedMemory, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY,
+ PVRSRVBridgeRGXTDMReleaseSharedMemory, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY,
+ PVRSRVBridgeRGXTDMSetTransferContextProperty, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtq2 functions with services
+ */
+void DeinitRGXTQ2Bridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2,
+ PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Client bridge header for ri
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for ri
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_RI_BRIDGE_H
+#define CLIENT_RI_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_ri_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntry(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR * puiTextB,
+ IMG_UINT64 ui64Offset,
+ IMG_UINT64 ui64Size,
+ IMG_BOOL bIsImport,
+ IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteProcListEntry(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR * puiTextB,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64DevVAddr,
+ IMG_HANDLE * phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle, IMG_DEV_VIRTADDR sAddr);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge, IMG_HANDLE hRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpList(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpAll(IMG_HANDLE hBridge);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpProcess(IMG_HANDLE hBridge, IMG_PID ui32Pid);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntryWithOwner(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle, IMG_PID ui32Owner);
+
+#endif /* CLIENT_RI_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Direct client bridge for ri
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the client side of the bridge for ri
+ which is used in calls from Server context.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_ri_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "ri_typedefs.h"
+
+#include "ri_server.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntry(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRHandleInt = (PMR *) hPMRHandle;
+
+ eError = RIWritePMREntryKM(psPMRHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR * puiTextB,
+ IMG_UINT64 ui64Offset,
+ IMG_UINT64 ui64Size,
+ IMG_BOOL bIsImport,
+ IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRHandleInt;
+ RI_HANDLE psRIHandleInt = NULL;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRHandleInt = (PMR *) hPMRHandle;
+
+ eError =
+ RIWriteMEMDESCEntryKM(psPMRHandleInt,
+ ui32TextBSize,
+ puiTextB,
+ ui64Offset, ui64Size, bIsImport, bIsSuballoc, &psRIHandleInt);
+
+ *phRIHandle = psRIHandleInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteProcListEntry(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR * puiTextB,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64DevVAddr,
+ IMG_HANDLE * phRIHandle)
+{
+ PVRSRV_ERROR eError;
+ RI_HANDLE psRIHandleInt = NULL;
+
+ eError =
+ RIWriteProcListEntryKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ ui32TextBSize, puiTextB, ui64Size, ui64DevVAddr, &psRIHandleInt);
+
+ *phRIHandle = psRIHandleInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle, IMG_DEV_VIRTADDR sAddr)
+{
+ PVRSRV_ERROR eError;
+ RI_HANDLE psRIHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+ eError = RIUpdateMEMDESCAddrKM(psRIHandleInt, sAddr);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge, IMG_HANDLE hRIHandle)
+{
+ PVRSRV_ERROR eError;
+ RI_HANDLE psRIHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+ eError = RIDeleteMEMDESCEntryKM(psRIHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpList(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRHandleInt = (PMR *) hPMRHandle;
+
+ eError = RIDumpListKM(psPMRHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpAll(IMG_HANDLE hBridge)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ eError = RIDumpAllKM();
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpProcess(IMG_HANDLE hBridge, IMG_PID ui32Pid)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ eError = RIDumpProcessKM(ui32Pid);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntryWithOwner(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle, IMG_PID ui32Owner)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMRHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRHandleInt = (PMR *) hPMRHandle;
+
+ eError = RIWritePMREntryWithOwnerKM(psPMRHandleInt, ui32Owner);
+
+ return eError;
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for ri
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for ri
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RI_BRIDGE_H
+#define COMMON_RI_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "ri_typedefs.h"
+
+#define PVRSRV_BRIDGE_RI_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR PVRSRV_BRIDGE_RI_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RI_RIDUMPLIST PVRSRV_BRIDGE_RI_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RI_RIDUMPALL PVRSRV_BRIDGE_RI_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RI_RIDUMPPROCESS PVRSRV_BRIDGE_RI_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER PVRSRV_BRIDGE_RI_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RI_CMD_LAST (PVRSRV_BRIDGE_RI_CMD_FIRST+8)
+
+/*******************************************
+ RIWritePMREntry
+ *******************************************/
+
+/* Bridge in structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY_TAG
+{
+ IMG_HANDLE hPMRHandle;
+} __packed PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY;
+
+/* Bridge out structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY;
+
+/*******************************************
+ RIWriteMEMDESCEntry
+ *******************************************/
+
+/* Bridge in structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG
+{
+ IMG_UINT64 ui64Offset;
+ IMG_UINT64 ui64Size;
+ IMG_HANDLE hPMRHandle;
+ const IMG_CHAR *puiTextB;
+ IMG_UINT32 ui32TextBSize;
+ IMG_BOOL bIsImport;
+ IMG_BOOL bIsSuballoc;
+} __packed PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY;
+
+/* Bridge out structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY_TAG
+{
+ IMG_HANDLE hRIHandle;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY;
+
+/*******************************************
+ RIWriteProcListEntry
+ *******************************************/
+
+/* Bridge in structure for RIWriteProcListEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY_TAG
+{
+ IMG_UINT64 ui64DevVAddr;
+ IMG_UINT64 ui64Size;
+ const IMG_CHAR *puiTextB;
+ IMG_UINT32 ui32TextBSize;
+} __packed PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY;
+
+/* Bridge out structure for RIWriteProcListEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY_TAG
+{
+ IMG_HANDLE hRIHandle;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY;
+
+/*******************************************
+ RIUpdateMEMDESCAddr
+ *******************************************/
+
+/* Bridge in structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR_TAG
+{
+ IMG_DEV_VIRTADDR sAddr;
+ IMG_HANDLE hRIHandle;
+} __packed PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR;
+
+/* Bridge out structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR;
+
+/*******************************************
+ RIDeleteMEMDESCEntry
+ *******************************************/
+
+/* Bridge in structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY_TAG
+{
+ IMG_HANDLE hRIHandle;
+} __packed PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY;
+
+/* Bridge out structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY;
+
+/*******************************************
+ RIDumpList
+ *******************************************/
+
+/* Bridge in structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPLIST_TAG
+{
+ IMG_HANDLE hPMRHandle;
+} __packed PVRSRV_BRIDGE_IN_RIDUMPLIST;
+
+/* Bridge out structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPLIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIDUMPLIST;
+
+/*******************************************
+ RIDumpAll
+ *******************************************/
+
+/* Bridge in structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPALL_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_RIDUMPALL;
+
+/* Bridge out structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPALL_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIDUMPALL;
+
+/*******************************************
+ RIDumpProcess
+ *******************************************/
+
+/* Bridge in structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPPROCESS_TAG
+{
+ IMG_PID ui32Pid;
+} __packed PVRSRV_BRIDGE_IN_RIDUMPPROCESS;
+
+/* Bridge out structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPPROCESS_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIDUMPPROCESS;
+
+/*******************************************
+ RIWritePMREntryWithOwner
+ *******************************************/
+
+/* Bridge in structure for RIWritePMREntryWithOwner */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER_TAG
+{
+ IMG_HANDLE hPMRHandle;
+ IMG_PID ui32Owner;
+} __packed PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER;
+
+/* Bridge out structure for RIWritePMREntryWithOwner */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER;
+
+#endif /* COMMON_RI_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for ri
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for ri
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "ri_server.h"
+
+#include "common_ri_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRIWritePMREntry(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRIWritePMREntryIN_UI8,
+ IMG_UINT8 * psRIWritePMREntryOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *psRIWritePMREntryIN =
+ (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *) IMG_OFFSET_ADDR(psRIWritePMREntryIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *psRIWritePMREntryOUT =
+ (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *) IMG_OFFSET_ADDR(psRIWritePMREntryOUT_UI8, 0);
+
+ IMG_HANDLE hPMRHandle = psRIWritePMREntryIN->hPMRHandle;
+ PMR *psPMRHandleInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRIWritePMREntryOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRHandleInt,
+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psRIWritePMREntryOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RIWritePMREntry_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRIWritePMREntryOUT->eError = RIWritePMREntryKM(psPMRHandleInt);
+
+RIWritePMREntry_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static PVRSRV_ERROR _RIWriteMEMDESCEntrypsRIHandleIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData);
+ return eError;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRIWriteMEMDESCEntryIN_UI8,
+ IMG_UINT8 * psRIWriteMEMDESCEntryOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryIN =
+ (PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryOUT =
+ (PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryOUT_UI8,
+ 0);
+
+ IMG_HANDLE hPMRHandle = psRIWriteMEMDESCEntryIN->hPMRHandle;
+ PMR *psPMRHandleInt = NULL;
+ IMG_CHAR *uiTextBInt = NULL;
+ RI_HANDLE psRIHandleInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psRIWriteMEMDESCEntryIN->ui32TextBSize > DEVMEM_ANNOTATION_MAX_LEN))
+ {
+ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RIWriteMEMDESCEntry_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RIWriteMEMDESCEntry_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRIWriteMEMDESCEntryIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RIWriteMEMDESCEntry_exit;
+ }
+ }
+ }
+
+ if (psRIWriteMEMDESCEntryIN->ui32TextBSize != 0)
+ {
+ uiTextBInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiTextBInt, (const void __user *)psRIWriteMEMDESCEntryIN->puiTextB,
+ psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RIWriteMEMDESCEntry_exit;
+ }
+ ((IMG_CHAR *)
+ uiTextBInt)[(psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRIWriteMEMDESCEntryOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRHandleInt,
+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RIWriteMEMDESCEntry_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRIWriteMEMDESCEntryOUT->eError =
+ RIWriteMEMDESCEntryKM(psPMRHandleInt,
+ psRIWriteMEMDESCEntryIN->ui32TextBSize,
+ uiTextBInt,
+ psRIWriteMEMDESCEntryIN->ui64Offset,
+ psRIWriteMEMDESCEntryIN->ui64Size,
+ psRIWriteMEMDESCEntryIN->bIsImport,
+ psRIWriteMEMDESCEntryIN->bIsSuballoc, &psRIHandleInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK))
+ {
+ goto RIWriteMEMDESCEntry_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRIWriteMEMDESCEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRIWriteMEMDESCEntryOUT->
+ hRIHandle,
+ (void *)psRIHandleInt,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RIWriteMEMDESCEntrypsRIHandleIntRelease);
+ if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RIWriteMEMDESCEntry_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RIWriteMEMDESCEntry_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+ {
+ if (psRIHandleInt)
+ {
+ RIDeleteMEMDESCEntryKM(psRIHandleInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRIWriteMEMDESCEntryOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static PVRSRV_ERROR _RIWriteProcListEntrypsRIHandleIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData);
+ return eError;
+}
+
+static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX,
+ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRIWriteProcListEntryIN_UI8,
+ IMG_UINT8 * psRIWriteProcListEntryOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryIN =
+ (PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *) IMG_OFFSET_ADDR(psRIWriteProcListEntryIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryOUT =
+ (PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *)
+ IMG_OFFSET_ADDR(psRIWriteProcListEntryOUT_UI8, 0);
+
+ IMG_CHAR *uiTextBInt = NULL;
+ RI_HANDLE psRIHandleInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psRIWriteProcListEntryIN->ui32TextBSize > DEVMEM_ANNOTATION_MAX_LEN))
+ {
+ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto RIWriteProcListEntry_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto RIWriteProcListEntry_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRIWriteProcListEntryIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RIWriteProcListEntry_exit;
+ }
+ }
+ }
+
+ if (psRIWriteProcListEntryIN->ui32TextBSize != 0)
+ {
+ uiTextBInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiTextBInt, (const void __user *)psRIWriteProcListEntryIN->puiTextB,
+ psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RIWriteProcListEntry_exit;
+ }
+ ((IMG_CHAR *)
+ uiTextBInt)[(psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ psRIWriteProcListEntryOUT->eError =
+ RIWriteProcListEntryKM(psConnection, OSGetDevNode(psConnection),
+ psRIWriteProcListEntryIN->ui32TextBSize,
+ uiTextBInt,
+ psRIWriteProcListEntryIN->ui64Size,
+ psRIWriteProcListEntryIN->ui64DevVAddr, &psRIHandleInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK))
+ {
+ goto RIWriteProcListEntry_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRIWriteProcListEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psRIWriteProcListEntryOUT->
+ hRIHandle,
+ (void *)psRIHandleInt,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _RIWriteProcListEntrypsRIHandleIntRelease);
+ if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RIWriteProcListEntry_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RIWriteProcListEntry_exit:
+
+ if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK)
+ {
+ if (psRIHandleInt)
+ {
+ RIDeleteMEMDESCEntryKM(psRIHandleInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psRIWriteProcListEntryOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIUpdateMEMDESCAddr(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRIUpdateMEMDESCAddrIN_UI8,
+ IMG_UINT8 * psRIUpdateMEMDESCAddrOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrIN =
+ (PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *) IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrOUT =
+ (PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *) IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrOUT_UI8,
+ 0);
+
+ IMG_HANDLE hRIHandle = psRIUpdateMEMDESCAddrIN->hRIHandle;
+ RI_HANDLE psRIHandleInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRIUpdateMEMDESCAddrOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psRIHandleInt,
+ hRIHandle, PVRSRV_HANDLE_TYPE_RI_HANDLE, IMG_TRUE);
+ if (unlikely(psRIUpdateMEMDESCAddrOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RIUpdateMEMDESCAddr_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRIUpdateMEMDESCAddrOUT->eError =
+ RIUpdateMEMDESCAddrKM(psRIHandleInt, psRIUpdateMEMDESCAddrIN->sAddr);
+
+RIUpdateMEMDESCAddr_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psRIHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRIHandle, PVRSRV_HANDLE_TYPE_RI_HANDLE);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDeleteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRIDeleteMEMDESCEntryIN_UI8,
+ IMG_UINT8 * psRIDeleteMEMDESCEntryOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryIN =
+ (PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryOUT =
+ (PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *)
+ IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psRIDeleteMEMDESCEntryOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRIDeleteMEMDESCEntryIN->hRIHandle,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE);
+ if (unlikely((psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_OK) &&
+ (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psRIDeleteMEMDESCEntryOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto RIDeleteMEMDESCEntry_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+RIDeleteMEMDESCEntry_exit:
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDumpList(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRIDumpListIN_UI8,
+ IMG_UINT8 * psRIDumpListOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RIDUMPLIST *psRIDumpListIN =
+ (PVRSRV_BRIDGE_IN_RIDUMPLIST *) IMG_OFFSET_ADDR(psRIDumpListIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RIDUMPLIST *psRIDumpListOUT =
+ (PVRSRV_BRIDGE_OUT_RIDUMPLIST *) IMG_OFFSET_ADDR(psRIDumpListOUT_UI8, 0);
+
+ IMG_HANDLE hPMRHandle = psRIDumpListIN->hPMRHandle;
+ PMR *psPMRHandleInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRIDumpListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRHandleInt,
+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psRIDumpListOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RIDumpList_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRIDumpListOUT->eError = RIDumpListKM(psPMRHandleInt);
+
+RIDumpList_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDumpAll(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRIDumpAllIN_UI8,
+ IMG_UINT8 * psRIDumpAllOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RIDUMPALL *psRIDumpAllIN =
+ (PVRSRV_BRIDGE_IN_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RIDUMPALL *psRIDumpAllOUT =
+ (PVRSRV_BRIDGE_OUT_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psRIDumpAllIN);
+
+ psRIDumpAllOUT->eError = RIDumpAllKM();
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIDumpProcess(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRIDumpProcessIN_UI8,
+ IMG_UINT8 * psRIDumpProcessOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RIDUMPPROCESS *psRIDumpProcessIN =
+ (PVRSRV_BRIDGE_IN_RIDUMPPROCESS *) IMG_OFFSET_ADDR(psRIDumpProcessIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *psRIDumpProcessOUT =
+ (PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *) IMG_OFFSET_ADDR(psRIDumpProcessOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psRIDumpProcessOUT->eError = RIDumpProcessKM(psRIDumpProcessIN->ui32Pid);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeRIWritePMREntryWithOwner(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psRIWritePMREntryWithOwnerIN_UI8,
+ IMG_UINT8 * psRIWritePMREntryWithOwnerOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerIN =
+ (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *)
+ IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerOUT =
+ (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *)
+ IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerOUT_UI8, 0);
+
+ IMG_HANDLE hPMRHandle = psRIWritePMREntryWithOwnerIN->hPMRHandle;
+ PMR *psPMRHandleInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psRIWritePMREntryWithOwnerOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psPMRHandleInt,
+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE);
+ if (unlikely(psRIWritePMREntryWithOwnerOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto RIWritePMREntryWithOwner_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psRIWritePMREntryWithOwnerOUT->eError =
+ RIWritePMREntryWithOwnerKM(psPMRHandleInt, psRIWritePMREntryWithOwnerIN->ui32Owner);
+
+RIWritePMREntryWithOwner_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psPMRHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitRIBridge(void);
+void DeinitRIBridge(void);
+
+/*
+ * Register all RI functions with services
+ */
+PVRSRV_ERROR InitRIBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY,
+ PVRSRVBridgeRIWritePMREntry, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY,
+ PVRSRVBridgeRIWriteMEMDESCEntry, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY,
+ PVRSRVBridgeRIWriteProcListEntry, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR,
+ PVRSRVBridgeRIUpdateMEMDESCAddr, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY,
+ PVRSRVBridgeRIDeleteMEMDESCEntry, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, PVRSRVBridgeRIDumpList,
+ NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, PVRSRVBridgeRIDumpAll,
+ NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS,
+ PVRSRVBridgeRIDumpProcess, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER,
+ PVRSRVBridgeRIWritePMREntryWithOwner, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all ri functions with services
+ */
+void DeinitRIBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for srvcore
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for srvcore
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SRVCORE_BRIDGE_H
+#define COMMON_SRVCORE_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_device_types.h"
+#include "cache_ops.h"
+
+#define PVRSRV_BRIDGE_SRVCORE_CMD_FIRST 0
+#define PVRSRV_BRIDGE_SRVCORE_CONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SRVCORE_DISCONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+14
+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+15
+#define PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16
+#define PVRSRV_BRIDGE_SRVCORE_CMD_LAST (PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16)
+
+/*******************************************
+ Connect
+ *******************************************/
+
+/* Bridge in structure for Connect */
+typedef struct PVRSRV_BRIDGE_IN_CONNECT_TAG
+{
+ IMG_UINT32 ui32ClientBuildOptions;
+ IMG_UINT32 ui32ClientDDKBuild;
+ IMG_UINT32 ui32ClientDDKVersion;
+ IMG_UINT32 ui32Flags;
+} __packed PVRSRV_BRIDGE_IN_CONNECT;
+
+/* Bridge out structure for Connect */
+typedef struct PVRSRV_BRIDGE_OUT_CONNECT_TAG
+{
+ IMG_UINT64 ui64PackedBvnc;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32CapabilityFlags;
+ IMG_UINT8 ui8KernelArch;
+} __packed PVRSRV_BRIDGE_OUT_CONNECT;
+
+/*******************************************
+ Disconnect
+ *******************************************/
+
+/* Bridge in structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_IN_DISCONNECT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_DISCONNECT;
+
+/* Bridge out structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_OUT_DISCONNECT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DISCONNECT;
+
+/*******************************************
+ AcquireGlobalEventObject
+ *******************************************/
+
+/* Bridge in structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT;
+
+/* Bridge out structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+ IMG_HANDLE hGlobalEventObject;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT;
+
+/*******************************************
+ ReleaseGlobalEventObject
+ *******************************************/
+
+/* Bridge in structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT_TAG
+{
+ IMG_HANDLE hGlobalEventObject;
+} __packed PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT;
+
+/* Bridge out structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT;
+
+/*******************************************
+ EventObjectOpen
+ *******************************************/
+
+/* Bridge in structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN_TAG
+{
+ IMG_HANDLE hEventObject;
+} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN;
+
+/* Bridge out structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN_TAG
+{
+ IMG_HANDLE hOSEvent;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN;
+
+/*******************************************
+ EventObjectWait
+ *******************************************/
+
+/* Bridge in structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT_TAG
+{
+ IMG_HANDLE hOSEventKM;
+} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT;
+
+/* Bridge out structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT;
+
+/*******************************************
+ EventObjectClose
+ *******************************************/
+
+/* Bridge in structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE_TAG
+{
+ IMG_HANDLE hOSEventKM;
+} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE;
+
+/* Bridge out structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE;
+
+/*******************************************
+ DumpDebugInfo
+ *******************************************/
+
+/* Bridge in structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_IN_DUMPDEBUGINFO_TAG
+{
+ IMG_UINT32 ui32VerbLevel;
+} __packed PVRSRV_BRIDGE_IN_DUMPDEBUGINFO;
+
+/* Bridge out structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO;
+
+/*******************************************
+ GetDevClockSpeed
+ *******************************************/
+
+/* Bridge in structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED;
+
+/* Bridge out structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32ClockSpeed;
+} __packed PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED;
+
+/*******************************************
+ HWOpTimeout
+ *******************************************/
+
+/* Bridge in structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_IN_HWOPTIMEOUT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_HWOPTIMEOUT;
+
+/* Bridge out structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_HWOPTIMEOUT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_HWOPTIMEOUT;
+
+/*******************************************
+ AlignmentCheck
+ *******************************************/
+
+/* Bridge in structure for AlignmentCheck */
+typedef struct PVRSRV_BRIDGE_IN_ALIGNMENTCHECK_TAG
+{
+ IMG_UINT32 *pui32AlignChecks;
+ IMG_UINT32 ui32AlignChecksSize;
+} __packed PVRSRV_BRIDGE_IN_ALIGNMENTCHECK;
+
+/* Bridge out structure for AlignmentCheck */
+typedef struct PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK;
+
+/*******************************************
+ GetDeviceStatus
+ *******************************************/
+
+/* Bridge in structure for GetDeviceStatus */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVICESTATUS_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_GETDEVICESTATUS;
+
+/* Bridge out structure for GetDeviceStatus */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVICESTATUS_TAG
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32DeviceSatus;
+} __packed PVRSRV_BRIDGE_OUT_GETDEVICESTATUS;
+
+/*******************************************
+ GetMultiCoreInfo
+ *******************************************/
+
+/* Bridge in structure for GetMultiCoreInfo */
+typedef struct PVRSRV_BRIDGE_IN_GETMULTICOREINFO_TAG
+{
+ IMG_UINT64 *pui64Caps;
+ IMG_UINT32 ui32CapsSize;
+} __packed PVRSRV_BRIDGE_IN_GETMULTICOREINFO;
+
+/* Bridge out structure for GetMultiCoreInfo */
+typedef struct PVRSRV_BRIDGE_OUT_GETMULTICOREINFO_TAG
+{
+ IMG_UINT64 *pui64Caps;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32NumCores;
+} __packed PVRSRV_BRIDGE_OUT_GETMULTICOREINFO;
+
+/*******************************************
+ EventObjectWaitTimeout
+ *******************************************/
+
+/* Bridge in structure for EventObjectWaitTimeout */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT_TAG
+{
+ IMG_UINT64 ui64uiTimeoutus;
+ IMG_HANDLE hOSEventKM;
+} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT;
+
+/* Bridge out structure for EventObjectWaitTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT;
+
+/*******************************************
+ FindProcessMemStats
+ *******************************************/
+
+/* Bridge in structure for FindProcessMemStats */
+typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG
+{
+ IMG_UINT64 *pui64MemStatsArray;
+ IMG_UINT32 ui32ArrSize;
+ IMG_UINT32 ui32PID;
+ IMG_BOOL bbAllProcessStats;
+} __packed PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS;
+
+/* Bridge out structure for FindProcessMemStats */
+typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG
+{
+ IMG_UINT64 *pui64MemStatsArray;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS;
+
+/*******************************************
+ AcquireInfoPage
+ *******************************************/
+
+/* Bridge in structure for AcquireInfoPage */
+typedef struct PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE;
+
+/* Bridge out structure for AcquireInfoPage */
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE_TAG
+{
+ IMG_HANDLE hPMR;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE;
+
+/*******************************************
+ ReleaseInfoPage
+ *******************************************/
+
+/* Bridge in structure for ReleaseInfoPage */
+typedef struct PVRSRV_BRIDGE_IN_RELEASEINFOPAGE_TAG
+{
+ IMG_HANDLE hPMR;
+} __packed PVRSRV_BRIDGE_IN_RELEASEINFOPAGE;
+
+/* Bridge out structure for ReleaseInfoPage */
+typedef struct PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE;
+
+#endif /* COMMON_SRVCORE_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for srvcore
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for srvcore
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "srvcore.h"
+#include "info_page.h"
+#include "proc_stats.h"
+#include "rgx_fwif_alignchecks.h"
+
+#include "common_srvcore_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeConnect(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psConnectIN_UI8,
+ IMG_UINT8 * psConnectOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_CONNECT *psConnectIN =
+ (PVRSRV_BRIDGE_IN_CONNECT *) IMG_OFFSET_ADDR(psConnectIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_CONNECT *psConnectOUT =
+ (PVRSRV_BRIDGE_OUT_CONNECT *) IMG_OFFSET_ADDR(psConnectOUT_UI8, 0);
+
+ psConnectOUT->eError =
+ PVRSRVConnectKM(psConnection, OSGetDevNode(psConnection),
+ psConnectIN->ui32Flags,
+ psConnectIN->ui32ClientBuildOptions,
+ psConnectIN->ui32ClientDDKVersion,
+ psConnectIN->ui32ClientDDKBuild,
+ &psConnectOUT->ui8KernelArch,
+ &psConnectOUT->ui32CapabilityFlags, &psConnectOUT->ui64PackedBvnc);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDisconnect(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDisconnectIN_UI8,
+ IMG_UINT8 * psDisconnectOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DISCONNECT *psDisconnectIN =
+ (PVRSRV_BRIDGE_IN_DISCONNECT *) IMG_OFFSET_ADDR(psDisconnectIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DISCONNECT *psDisconnectOUT =
+ (PVRSRV_BRIDGE_OUT_DISCONNECT *) IMG_OFFSET_ADDR(psDisconnectOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDisconnectIN);
+
+ psDisconnectOUT->eError = PVRSRVDisconnectKM();
+
+ return 0;
+}
+
+static PVRSRV_ERROR _AcquireGlobalEventObjecthGlobalEventObjectIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVReleaseGlobalEventObjectKM((IMG_HANDLE) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeAcquireGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psAcquireGlobalEventObjectIN_UI8,
+ IMG_UINT8 * psAcquireGlobalEventObjectOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectIN =
+ (PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *)
+ IMG_OFFSET_ADDR(psAcquireGlobalEventObjectIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectOUT =
+ (PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *)
+ IMG_OFFSET_ADDR(psAcquireGlobalEventObjectOUT_UI8, 0);
+
+ IMG_HANDLE hGlobalEventObjectInt = NULL;
+
+ PVR_UNREFERENCED_PARAMETER(psAcquireGlobalEventObjectIN);
+
+ psAcquireGlobalEventObjectOUT->eError =
+ PVRSRVAcquireGlobalEventObjectKM(&hGlobalEventObjectInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK))
+ {
+ goto AcquireGlobalEventObject_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psAcquireGlobalEventObjectOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psAcquireGlobalEventObjectOUT->hGlobalEventObject,
+ (void *)hGlobalEventObjectInt,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _AcquireGlobalEventObjecthGlobalEventObjectIntRelease);
+ if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto AcquireGlobalEventObject_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+AcquireGlobalEventObject_exit:
+
+ if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+ {
+ if (hGlobalEventObjectInt)
+ {
+ PVRSRVReleaseGlobalEventObjectKM(hGlobalEventObjectInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeReleaseGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psReleaseGlobalEventObjectIN_UI8,
+ IMG_UINT8 * psReleaseGlobalEventObjectOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectIN =
+ (PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *)
+ IMG_OFFSET_ADDR(psReleaseGlobalEventObjectIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectOUT =
+ (PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *)
+ IMG_OFFSET_ADDR(psReleaseGlobalEventObjectOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psReleaseGlobalEventObjectOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psReleaseGlobalEventObjectIN->
+ hGlobalEventObject,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+ if (unlikely
+ ((psReleaseGlobalEventObjectOUT->eError != PVRSRV_OK)
+ && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psReleaseGlobalEventObjectOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto ReleaseGlobalEventObject_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ReleaseGlobalEventObject_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _EventObjectOpenhOSEventIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = OSEventObjectClose((IMG_HANDLE) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectOpen(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psEventObjectOpenIN_UI8,
+ IMG_UINT8 * psEventObjectOpenOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *psEventObjectOpenIN =
+ (PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *) IMG_OFFSET_ADDR(psEventObjectOpenIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *psEventObjectOpenOUT =
+ (PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *) IMG_OFFSET_ADDR(psEventObjectOpenOUT_UI8, 0);
+
+ IMG_HANDLE hEventObject = psEventObjectOpenIN->hEventObject;
+ IMG_HANDLE hEventObjectInt = NULL;
+ IMG_HANDLE hOSEventInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psEventObjectOpenOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&hEventObjectInt,
+ hEventObject,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, IMG_TRUE);
+ if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto EventObjectOpen_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psEventObjectOpenOUT->eError = OSEventObjectOpen(hEventObjectInt, &hOSEventInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK))
+ {
+ goto EventObjectOpen_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psEventObjectOpenOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psEventObjectOpenOUT->hOSEvent,
+ (void *)hOSEventInt,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _EventObjectOpenhOSEventIntRelease);
+ if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto EventObjectOpen_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+EventObjectOpen_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (hEventObjectInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hEventObject, PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ if (hOSEventInt)
+ {
+ OSEventObjectClose(hOSEventInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectWait(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psEventObjectWaitIN_UI8,
+ IMG_UINT8 * psEventObjectWaitOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *psEventObjectWaitIN =
+ (PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *) IMG_OFFSET_ADDR(psEventObjectWaitIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *psEventObjectWaitOUT =
+ (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *) IMG_OFFSET_ADDR(psEventObjectWaitOUT_UI8, 0);
+
+ IMG_HANDLE hOSEventKM = psEventObjectWaitIN->hOSEventKM;
+ IMG_HANDLE hOSEventKMInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psEventObjectWaitOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&hOSEventKMInt,
+ hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, IMG_TRUE);
+ if (unlikely(psEventObjectWaitOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto EventObjectWait_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psEventObjectWaitOUT->eError = OSEventObjectWait(hOSEventKMInt);
+
+EventObjectWait_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (hOSEventKMInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hOSEventKM, PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectClose(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psEventObjectCloseIN_UI8,
+ IMG_UINT8 * psEventObjectCloseOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *psEventObjectCloseIN =
+ (PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *) IMG_OFFSET_ADDR(psEventObjectCloseIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *psEventObjectCloseOUT =
+ (PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *) IMG_OFFSET_ADDR(psEventObjectCloseOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psEventObjectCloseOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psEventObjectCloseIN->hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+ if (unlikely((psEventObjectCloseOUT->eError != PVRSRV_OK) &&
+ (psEventObjectCloseOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psEventObjectCloseOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(psEventObjectCloseOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto EventObjectClose_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+EventObjectClose_exit:
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeDumpDebugInfo(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psDumpDebugInfoIN_UI8,
+ IMG_UINT8 * psDumpDebugInfoOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *psDumpDebugInfoIN =
+ (PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *) IMG_OFFSET_ADDR(psDumpDebugInfoIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *psDumpDebugInfoOUT =
+ (PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *) IMG_OFFSET_ADDR(psDumpDebugInfoOUT_UI8, 0);
+
+ psDumpDebugInfoOUT->eError =
+ PVRSRVDumpDebugInfoKM(psConnection, OSGetDevNode(psConnection),
+ psDumpDebugInfoIN->ui32VerbLevel);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetDevClockSpeed(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psGetDevClockSpeedIN_UI8,
+ IMG_UINT8 * psGetDevClockSpeedOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *psGetDevClockSpeedIN =
+ (PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *) IMG_OFFSET_ADDR(psGetDevClockSpeedIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *psGetDevClockSpeedOUT =
+ (PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *) IMG_OFFSET_ADDR(psGetDevClockSpeedOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psGetDevClockSpeedIN);
+
+ psGetDevClockSpeedOUT->eError =
+ PVRSRVGetDevClockSpeedKM(psConnection, OSGetDevNode(psConnection),
+ &psGetDevClockSpeedOUT->ui32ClockSpeed);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeHWOpTimeout(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psHWOpTimeoutIN_UI8,
+ IMG_UINT8 * psHWOpTimeoutOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_HWOPTIMEOUT *psHWOpTimeoutIN =
+ (PVRSRV_BRIDGE_IN_HWOPTIMEOUT *) IMG_OFFSET_ADDR(psHWOpTimeoutIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *psHWOpTimeoutOUT =
+ (PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *) IMG_OFFSET_ADDR(psHWOpTimeoutOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psHWOpTimeoutIN);
+
+ psHWOpTimeoutOUT->eError = PVRSRVHWOpTimeoutKM(psConnection, OSGetDevNode(psConnection));
+
+ return 0;
+}
+
+static_assert(RGXFW_ALIGN_CHECKS_UM_MAX <= IMG_UINT32_MAX,
+ "RGXFW_ALIGN_CHECKS_UM_MAX must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psAlignmentCheckIN_UI8,
+ IMG_UINT8 * psAlignmentCheckOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *psAlignmentCheckIN =
+ (PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *) IMG_OFFSET_ADDR(psAlignmentCheckIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *psAlignmentCheckOUT =
+ (PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *) IMG_OFFSET_ADDR(psAlignmentCheckOUT_UI8, 0);
+
+ IMG_UINT32 *ui32AlignChecksInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) + 0;
+
+ if (unlikely(psAlignmentCheckIN->ui32AlignChecksSize > RGXFW_ALIGN_CHECKS_UM_MAX))
+ {
+ psAlignmentCheckOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto AlignmentCheck_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psAlignmentCheckOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto AlignmentCheck_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psAlignmentCheckIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psAlignmentCheckIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psAlignmentCheckOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto AlignmentCheck_exit;
+ }
+ }
+ }
+
+ if (psAlignmentCheckIN->ui32AlignChecksSize != 0)
+ {
+ ui32AlignChecksInt =
+ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, ui32AlignChecksInt,
+ (const void __user *)psAlignmentCheckIN->pui32AlignChecks,
+ psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) != PVRSRV_OK)
+ {
+ psAlignmentCheckOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto AlignmentCheck_exit;
+ }
+ }
+
+ psAlignmentCheckOUT->eError =
+ PVRSRVAlignmentCheckKM(psConnection, OSGetDevNode(psConnection),
+ psAlignmentCheckIN->ui32AlignChecksSize, ui32AlignChecksInt);
+
+AlignmentCheck_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psAlignmentCheckOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeGetDeviceStatus(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psGetDeviceStatusIN_UI8,
+ IMG_UINT8 * psGetDeviceStatusOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_GETDEVICESTATUS *psGetDeviceStatusIN =
+ (PVRSRV_BRIDGE_IN_GETDEVICESTATUS *) IMG_OFFSET_ADDR(psGetDeviceStatusIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *psGetDeviceStatusOUT =
+ (PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *) IMG_OFFSET_ADDR(psGetDeviceStatusOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psGetDeviceStatusIN);
+
+ psGetDeviceStatusOUT->eError =
+ PVRSRVGetDeviceStatusKM(psConnection, OSGetDevNode(psConnection),
+ &psGetDeviceStatusOUT->ui32DeviceSatus);
+
+ return 0;
+}
+
+static_assert(8 <= IMG_UINT32_MAX, "8 must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psGetMultiCoreInfoIN_UI8,
+ IMG_UINT8 * psGetMultiCoreInfoOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_GETMULTICOREINFO *psGetMultiCoreInfoIN =
+ (PVRSRV_BRIDGE_IN_GETMULTICOREINFO *) IMG_OFFSET_ADDR(psGetMultiCoreInfoIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *psGetMultiCoreInfoOUT =
+ (PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *) IMG_OFFSET_ADDR(psGetMultiCoreInfoOUT_UI8, 0);
+
+ IMG_UINT64 *pui64CapsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) + 0;
+
+ if (psGetMultiCoreInfoIN->ui32CapsSize > 8)
+ {
+ psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto GetMultiCoreInfo_exit;
+ }
+
+ psGetMultiCoreInfoOUT->pui64Caps = psGetMultiCoreInfoIN->pui64Caps;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto GetMultiCoreInfo_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psGetMultiCoreInfoIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetMultiCoreInfoIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto GetMultiCoreInfo_exit;
+ }
+ }
+ }
+
+ if (psGetMultiCoreInfoIN->ui32CapsSize != 0)
+ {
+ pui64CapsInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64);
+ }
+
+ psGetMultiCoreInfoOUT->eError =
+ PVRSRVGetMultiCoreInfoKM(psConnection, OSGetDevNode(psConnection),
+ psGetMultiCoreInfoIN->ui32CapsSize,
+ &psGetMultiCoreInfoOUT->ui32NumCores, pui64CapsInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psGetMultiCoreInfoOUT->eError != PVRSRV_OK))
+ {
+ goto GetMultiCoreInfo_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((pui64CapsInt) && ((psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psGetMultiCoreInfoOUT->pui64Caps, pui64CapsInt,
+ (psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64))) != PVRSRV_OK))
+ {
+ psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto GetMultiCoreInfo_exit;
+ }
+ }
+
+GetMultiCoreInfo_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psGetMultiCoreInfoOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeEventObjectWaitTimeout(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psEventObjectWaitTimeoutIN_UI8,
+ IMG_UINT8 * psEventObjectWaitTimeoutOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutIN =
+ (PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *)
+ IMG_OFFSET_ADDR(psEventObjectWaitTimeoutIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutOUT =
+ (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *)
+ IMG_OFFSET_ADDR(psEventObjectWaitTimeoutOUT_UI8, 0);
+
+ IMG_HANDLE hOSEventKM = psEventObjectWaitTimeoutIN->hOSEventKM;
+ IMG_HANDLE hOSEventKMInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psEventObjectWaitTimeoutOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&hOSEventKMInt,
+ hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, IMG_TRUE);
+ if (unlikely(psEventObjectWaitTimeoutOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto EventObjectWaitTimeout_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psEventObjectWaitTimeoutOUT->eError =
+ OSEventObjectWaitTimeout(hOSEventKMInt, psEventObjectWaitTimeoutIN->ui64uiTimeoutus);
+
+EventObjectWaitTimeout_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (hOSEventKMInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hOSEventKM, PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+static_assert(PVRSRV_PROCESS_STAT_TYPE_COUNT <= IMG_UINT32_MAX,
+ "PVRSRV_PROCESS_STAT_TYPE_COUNT must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psFindProcessMemStatsIN_UI8,
+ IMG_UINT8 * psFindProcessMemStatsOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *psFindProcessMemStatsIN =
+ (PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *psFindProcessMemStatsOUT =
+ (PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsOUT_UI8,
+ 0);
+
+ IMG_UINT64 *pui64MemStatsArrayInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64)) + 0;
+
+ if (psFindProcessMemStatsIN->ui32ArrSize > PVRSRV_PROCESS_STAT_TYPE_COUNT)
+ {
+ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto FindProcessMemStats_exit;
+ }
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psFindProcessMemStatsOUT->pui64MemStatsArray = psFindProcessMemStatsIN->pui64MemStatsArray;
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto FindProcessMemStats_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psFindProcessMemStatsIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psFindProcessMemStatsIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto FindProcessMemStats_exit;
+ }
+ }
+ }
+
+ if (psFindProcessMemStatsIN->ui32ArrSize != 0)
+ {
+ pui64MemStatsArrayInt =
+ (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64);
+ }
+
+ psFindProcessMemStatsOUT->eError =
+ PVRSRVFindProcessMemStatsKM(psFindProcessMemStatsIN->ui32PID,
+ psFindProcessMemStatsIN->ui32ArrSize,
+ psFindProcessMemStatsIN->bbAllProcessStats,
+ pui64MemStatsArrayInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psFindProcessMemStatsOUT->eError != PVRSRV_OK))
+ {
+ goto FindProcessMemStats_exit;
+ }
+
+ /* If dest ptr is non-null and we have data to copy */
+ if ((pui64MemStatsArrayInt) &&
+ ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64)) > 0))
+ {
+ if (unlikely
+ (OSCopyToUser
+ (NULL, (void __user *)psFindProcessMemStatsOUT->pui64MemStatsArray,
+ pui64MemStatsArrayInt,
+ (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT64))) != PVRSRV_OK))
+ {
+ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto FindProcessMemStats_exit;
+ }
+ }
+
+FindProcessMemStats_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psFindProcessMemStatsOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static PVRSRV_ERROR _AcquireInfoPagepsPMRIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVReleaseInfoPageKM((PMR *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeAcquireInfoPage(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psAcquireInfoPageIN_UI8,
+ IMG_UINT8 * psAcquireInfoPageOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *psAcquireInfoPageIN =
+ (PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *) IMG_OFFSET_ADDR(psAcquireInfoPageIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *psAcquireInfoPageOUT =
+ (PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *) IMG_OFFSET_ADDR(psAcquireInfoPageOUT_UI8, 0);
+
+ PMR *psPMRInt = NULL;
+
+ PVR_UNREFERENCED_PARAMETER(psAcquireInfoPageIN);
+
+ psAcquireInfoPageOUT->eError = PVRSRVAcquireInfoPageKM(&psPMRInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK))
+ {
+ goto AcquireInfoPage_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psAcquireInfoPageOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &psAcquireInfoPageOUT->hPMR, (void *)psPMRInt,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) & _AcquireInfoPagepsPMRIntRelease);
+ if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto AcquireInfoPage_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+AcquireInfoPage_exit:
+
+ if (psAcquireInfoPageOUT->eError != PVRSRV_OK)
+ {
+ if (psPMRInt)
+ {
+ PVRSRVReleaseInfoPageKM(psPMRInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeReleaseInfoPage(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psReleaseInfoPageIN_UI8,
+ IMG_UINT8 * psReleaseInfoPageOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *psReleaseInfoPageIN =
+ (PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *) IMG_OFFSET_ADDR(psReleaseInfoPageIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *psReleaseInfoPageOUT =
+ (PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *) IMG_OFFSET_ADDR(psReleaseInfoPageOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psReleaseInfoPageOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (IMG_HANDLE) psReleaseInfoPageIN->hPMR,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+ if (unlikely((psReleaseInfoPageOUT->eError != PVRSRV_OK) &&
+ (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(psReleaseInfoPageOUT->eError)));
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto ReleaseInfoPage_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ReleaseInfoPage_exit:
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitSRVCOREBridge(void);
+void DeinitSRVCOREBridge(void);
+
+/*
+ * Register all SRVCORE functions with services
+ */
+PVRSRV_ERROR InitSRVCOREBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT,
+ PVRSRVBridgeConnect, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT,
+ PVRSRVBridgeDisconnect, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT,
+ PVRSRVBridgeAcquireGlobalEventObject, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT,
+ PVRSRVBridgeReleaseGlobalEventObject, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN,
+ PVRSRVBridgeEventObjectOpen, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT,
+ PVRSRVBridgeEventObjectWait, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE,
+ PVRSRVBridgeEventObjectClose, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO,
+ PVRSRVBridgeDumpDebugInfo, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED,
+ PVRSRVBridgeGetDevClockSpeed, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT,
+ PVRSRVBridgeHWOpTimeout, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK,
+ PVRSRVBridgeAlignmentCheck, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS,
+ PVRSRVBridgeGetDeviceStatus, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO,
+ PVRSRVBridgeGetMultiCoreInfo, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT,
+ PVRSRVBridgeEventObjectWaitTimeout, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS,
+ PVRSRVBridgeFindProcessMemStats, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE,
+ PVRSRVBridgeAcquireInfoPage, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE,
+ PVRSRVBridgeReleaseInfoPage, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all srvcore functions with services
+ */
+void DeinitSRVCOREBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+ PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+ PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE,
+ PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Client bridge header for sync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for sync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_SYNC_BRIDGE_H
+#define CLIENT_SYNC_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_sync_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge,
+ IMG_HANDLE * phSyncHandle,
+ IMG_UINT32 * pui32SyncPrimVAddr,
+ IMG_UINT32 * pui32SyncPrimBlockSize,
+ IMG_HANDLE * phhSyncPMR);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge, IMG_HANDLE hSyncHandle);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimSet(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Index, IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle, IMG_UINT32 ui32Offset);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncAllocEvent(IMG_HANDLE hBridge,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32FWAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR * puiClassName);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncFreeEvent(IMG_HANDLE hBridge, IMG_UINT32 ui32FWAddr);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge,
+ PVRSRV_FENCE hFence);
+
+#endif /* CLIENT_SYNC_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Direct client bridge for sync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the client side of the bridge for sync
+ which is used in calls from Server context.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_sync_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+#include <powervr/pvrsrv_sync_ext.h>
+
+#include "sync.h"
+#include "sync_server.h"
+#include "pdump.h"
+#include "pvrsrv_sync_km.h"
+#include "sync_fallback_server.h"
+#include "sync_checkpoint.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge,
+ IMG_HANDLE * phSyncHandle,
+ IMG_UINT32 * pui32SyncPrimVAddr,
+ IMG_UINT32 * pui32SyncPrimBlockSize,
+ IMG_HANDLE * phhSyncPMR)
+{
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+ PMR *pshSyncPMRInt = NULL;
+
+ eError =
+ PVRSRVAllocSyncPrimitiveBlockKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ &psSyncHandleInt,
+ pui32SyncPrimVAddr,
+ pui32SyncPrimBlockSize, &pshSyncPMRInt);
+
+ *phSyncHandle = psSyncHandleInt;
+ *phhSyncPMR = pshSyncPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge, IMG_HANDLE hSyncHandle)
+{
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError = PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimSet(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Index, IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError = PVRSRVSyncPrimSetKM(psSyncHandleInt, ui32Index, ui32Value);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle, IMG_UINT32 ui32Offset)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError = PVRSRVSyncPrimPDumpKM(psSyncHandleInt, ui32Offset);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError = PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, ui32Offset, ui32Value);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt,
+ ui32Offset, ui32Value, ui32Mask, eOperator, uiPDumpFlags);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt,
+ ui32Offset, uiWriteOffset, uiPacketSize, uiBufferSize);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+ PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+ PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncAllocEvent(IMG_HANDLE hBridge,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32FWAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR * puiClassName)
+{
+ PVRSRV_ERROR eError;
+
+ eError =
+ PVRSRVSyncAllocEventKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ bServerSync, ui32FWAddr, ui32ClassNameSize, puiClassName);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncFreeEvent(IMG_HANDLE hBridge, IMG_UINT32 ui32FWAddr)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PVRSRVSyncFreeEventKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ui32FWAddr);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge,
+ PVRSRV_FENCE hFence)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ eError = PVRSRVSyncCheckpointSignalledPDumpPolKM(hFence);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hFence);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for sync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for sync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SYNC_BRIDGE_H
+#define COMMON_SYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+#include <powervr/pvrsrv_sync_ext.h>
+
+#define PVRSRV_BRIDGE_SYNC_CMD_FIRST 0
+#define PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMSET PVRSRV_BRIDGE_SYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP PVRSRV_BRIDGE_SYNC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE PVRSRV_BRIDGE_SYNC_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP PVRSRV_BRIDGE_SYNC_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SYNC_CMD_LAST (PVRSRV_BRIDGE_SYNC_CMD_FIRST+9)
+
+/*******************************************
+ AllocSyncPrimitiveBlock
+ *******************************************/
+
+/* Bridge in structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __packed PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK;
+
+/* Bridge out structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_HANDLE hhSyncPMR;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32SyncPrimBlockSize;
+ IMG_UINT32 ui32SyncPrimVAddr;
+} __packed PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK;
+
+/*******************************************
+ FreeSyncPrimitiveBlock
+ *******************************************/
+
+/* Bridge in structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK_TAG
+{
+ IMG_HANDLE hSyncHandle;
+} __packed PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK;
+
+/* Bridge out structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK;
+
+/*******************************************
+ SyncPrimSet
+ *******************************************/
+
+/* Bridge in structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSET_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Index;
+ IMG_UINT32 ui32Value;
+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMSET;
+
+/* Bridge out structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSET_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMSET;
+
+/*******************************************
+ SyncPrimPDump
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Offset;
+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP;
+
+/* Bridge out structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP;
+
+/*******************************************
+ SyncPrimPDumpValue
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE;
+
+/* Bridge out structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE;
+
+/*******************************************
+ SyncPrimPDumpPol
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ PDUMP_POLL_OPERATOR eOperator;
+ IMG_UINT32 ui32Mask;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+ PDUMP_FLAGS_T uiPDumpFlags;
+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL;
+
+/* Bridge out structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL;
+
+/*******************************************
+ SyncPrimPDumpCBP
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP_TAG
+{
+ IMG_DEVMEM_SIZE_T uiBufferSize;
+ IMG_DEVMEM_SIZE_T uiPacketSize;
+ IMG_DEVMEM_OFFSET_T uiWriteOffset;
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Offset;
+} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP;
+
+/* Bridge out structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP;
+
+/*******************************************
+ SyncAllocEvent
+ *******************************************/
+
+/* Bridge in structure for SyncAllocEvent */
+typedef struct PVRSRV_BRIDGE_IN_SYNCALLOCEVENT_TAG
+{
+ const IMG_CHAR *puiClassName;
+ IMG_UINT32 ui32ClassNameSize;
+ IMG_UINT32 ui32FWAddr;
+ IMG_BOOL bServerSync;
+} __packed PVRSRV_BRIDGE_IN_SYNCALLOCEVENT;
+
+/* Bridge out structure for SyncAllocEvent */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT;
+
+/*******************************************
+ SyncFreeEvent
+ *******************************************/
+
+/* Bridge in structure for SyncFreeEvent */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFREEEVENT_TAG
+{
+ IMG_UINT32 ui32FWAddr;
+} __packed PVRSRV_BRIDGE_IN_SYNCFREEEVENT;
+
+/* Bridge out structure for SyncFreeEvent */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFREEEVENT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFREEEVENT;
+
+/*******************************************
+ SyncCheckpointSignalledPDumpPol
+ *******************************************/
+
+/* Bridge in structure for SyncCheckpointSignalledPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG
+{
+ PVRSRV_FENCE hFence;
+} __packed PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL;
+
+/* Bridge out structure for SyncCheckpointSignalledPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL;
+
+#endif /* COMMON_SYNC_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for sync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for sync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+#include "pdump.h"
+#include "pvrsrv_sync_km.h"
+#include "sync_fallback_server.h"
+#include "sync_checkpoint.h"
+
+#include "common_sync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _AllocSyncPrimitiveBlockpsSyncHandleIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVFreeSyncPrimitiveBlockKM((SYNC_PRIMITIVE_BLOCK *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psAllocSyncPrimitiveBlockIN_UI8,
+ IMG_UINT8 * psAllocSyncPrimitiveBlockOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockIN =
+ (PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *)
+ IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockOUT =
+ (PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *)
+ IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockOUT_UI8, 0);
+
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+ PMR *pshSyncPMRInt = NULL;
+
+ PVR_UNREFERENCED_PARAMETER(psAllocSyncPrimitiveBlockIN);
+
+ psAllocSyncPrimitiveBlockOUT->hSyncHandle = NULL;
+
+ psAllocSyncPrimitiveBlockOUT->eError =
+ PVRSRVAllocSyncPrimitiveBlockKM(psConnection, OSGetDevNode(psConnection),
+ &psSyncHandleInt,
+ &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimVAddr,
+ &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimBlockSize,
+ &pshSyncPMRInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK))
+ {
+ goto AllocSyncPrimitiveBlock_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psAllocSyncPrimitiveBlockOUT->
+ hSyncHandle,
+ (void *)psSyncHandleInt,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _AllocSyncPrimitiveBlockpsSyncHandleIntRelease);
+ if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto AllocSyncPrimitiveBlock_exit;
+ }
+
+ psAllocSyncPrimitiveBlockOUT->eError =
+ PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+ &psAllocSyncPrimitiveBlockOUT->hhSyncPMR,
+ (void *)pshSyncPMRInt,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ psAllocSyncPrimitiveBlockOUT->hSyncHandle);
+ if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto AllocSyncPrimitiveBlock_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+AllocSyncPrimitiveBlock_exit:
+
+ if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+ {
+ if (psAllocSyncPrimitiveBlockOUT->hSyncHandle)
+ {
+ PVRSRV_ERROR eError;
+
+ /* Lock over handle creation cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE)
+ psAllocSyncPrimitiveBlockOUT->
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psSyncHandleInt = NULL;
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ }
+
+ if (psSyncHandleInt)
+ {
+ PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeFreeSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psFreeSyncPrimitiveBlockIN_UI8,
+ IMG_UINT8 * psFreeSyncPrimitiveBlockOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockIN =
+ (PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *)
+ IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockOUT =
+ (PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *)
+ IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psFreeSyncPrimitiveBlockOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psFreeSyncPrimitiveBlockIN->hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ if (unlikely((psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_OK) &&
+ (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psFreeSyncPrimitiveBlockOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto FreeSyncPrimitiveBlock_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+FreeSyncPrimitiveBlock_exit:
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncPrimSetIN_UI8,
+ IMG_UINT8 * psSyncPrimSetOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCPRIMSET *psSyncPrimSetIN =
+ (PVRSRV_BRIDGE_IN_SYNCPRIMSET *) IMG_OFFSET_ADDR(psSyncPrimSetIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCPRIMSET *psSyncPrimSetOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCPRIMSET *) IMG_OFFSET_ADDR(psSyncPrimSetOUT_UI8, 0);
+
+ IMG_HANDLE hSyncHandle = psSyncPrimSetIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncPrimSetOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+ if (unlikely(psSyncPrimSetOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto SyncPrimSet_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psSyncPrimSetOUT->eError =
+ PVRSRVSyncPrimSetKM(psSyncHandleInt,
+ psSyncPrimSetIN->ui32Index, psSyncPrimSetIN->ui32Value);
+
+SyncPrimSet_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+#if defined(PDUMP)
+
+static IMG_INT
+PVRSRVBridgeSyncPrimPDump(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncPrimPDumpIN_UI8,
+ IMG_UINT8 * psSyncPrimPDumpOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *psSyncPrimPDumpIN =
+ (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *) IMG_OFFSET_ADDR(psSyncPrimPDumpIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *psSyncPrimPDumpOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *) IMG_OFFSET_ADDR(psSyncPrimPDumpOUT_UI8, 0);
+
+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncPrimPDumpOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+ if (unlikely(psSyncPrimPDumpOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto SyncPrimPDump_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psSyncPrimPDumpOUT->eError =
+ PVRSRVSyncPrimPDumpKM(psSyncHandleInt, psSyncPrimPDumpIN->ui32Offset);
+
+SyncPrimPDump_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDump NULL
+#endif
+
+#if defined(PDUMP)
+
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpValue(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncPrimPDumpValueIN_UI8,
+ IMG_UINT8 * psSyncPrimPDumpValueOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueIN =
+ (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *) IMG_OFFSET_ADDR(psSyncPrimPDumpValueIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *) IMG_OFFSET_ADDR(psSyncPrimPDumpValueOUT_UI8,
+ 0);
+
+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpValueIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncPrimPDumpValueOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+ if (unlikely(psSyncPrimPDumpValueOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto SyncPrimPDumpValue_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psSyncPrimPDumpValueOUT->eError =
+ PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt,
+ psSyncPrimPDumpValueIN->ui32Offset,
+ psSyncPrimPDumpValueIN->ui32Value);
+
+SyncPrimPDumpValue_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpValue NULL
+#endif
+
+#if defined(PDUMP)
+
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncPrimPDumpPolIN_UI8,
+ IMG_UINT8 * psSyncPrimPDumpPolOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolIN =
+ (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *) IMG_OFFSET_ADDR(psSyncPrimPDumpPolIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *) IMG_OFFSET_ADDR(psSyncPrimPDumpPolOUT_UI8, 0);
+
+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpPolIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncPrimPDumpPolOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+ if (unlikely(psSyncPrimPDumpPolOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto SyncPrimPDumpPol_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psSyncPrimPDumpPolOUT->eError =
+ PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt,
+ psSyncPrimPDumpPolIN->ui32Offset,
+ psSyncPrimPDumpPolIN->ui32Value,
+ psSyncPrimPDumpPolIN->ui32Mask,
+ psSyncPrimPDumpPolIN->eOperator,
+ psSyncPrimPDumpPolIN->uiPDumpFlags);
+
+SyncPrimPDumpPol_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpPol NULL
+#endif
+
+#if defined(PDUMP)
+
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncPrimPDumpCBPIN_UI8,
+ IMG_UINT8 * psSyncPrimPDumpCBPOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPIN =
+ (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *) IMG_OFFSET_ADDR(psSyncPrimPDumpCBPIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *) IMG_OFFSET_ADDR(psSyncPrimPDumpCBPOUT_UI8, 0);
+
+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpCBPIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncPrimPDumpCBPOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+ if (unlikely(psSyncPrimPDumpCBPOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto SyncPrimPDumpCBP_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psSyncPrimPDumpCBPOUT->eError =
+ PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt,
+ psSyncPrimPDumpCBPIN->ui32Offset,
+ psSyncPrimPDumpCBPIN->uiWriteOffset,
+ psSyncPrimPDumpCBPIN->uiPacketSize,
+ psSyncPrimPDumpCBPIN->uiBufferSize);
+
+SyncPrimPDumpCBP_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpCBP NULL
+#endif
+
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncAllocEventIN_UI8,
+ IMG_UINT8 * psSyncAllocEventOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *psSyncAllocEventIN =
+ (PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *) IMG_OFFSET_ADDR(psSyncAllocEventIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *psSyncAllocEventOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *) IMG_OFFSET_ADDR(psSyncAllocEventOUT_UI8, 0);
+
+ IMG_CHAR *uiClassNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psSyncAllocEventIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH))
+ {
+ psSyncAllocEventOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto SyncAllocEvent_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psSyncAllocEventOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto SyncAllocEvent_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psSyncAllocEventIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncAllocEventIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psSyncAllocEventOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncAllocEvent_exit;
+ }
+ }
+ }
+
+ if (psSyncAllocEventIN->ui32ClassNameSize != 0)
+ {
+ uiClassNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiClassNameInt, (const void __user *)psSyncAllocEventIN->puiClassName,
+ psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psSyncAllocEventOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncAllocEvent_exit;
+ }
+ ((IMG_CHAR *)
+ uiClassNameInt)[(psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ psSyncAllocEventOUT->eError =
+ PVRSRVSyncAllocEventKM(psConnection, OSGetDevNode(psConnection),
+ psSyncAllocEventIN->bServerSync,
+ psSyncAllocEventIN->ui32FWAddr,
+ psSyncAllocEventIN->ui32ClassNameSize, uiClassNameInt);
+
+SyncAllocEvent_exit:
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psSyncAllocEventOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFreeEvent(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFreeEventIN_UI8,
+ IMG_UINT8 * psSyncFreeEventOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFREEEVENT *psSyncFreeEventIN =
+ (PVRSRV_BRIDGE_IN_SYNCFREEEVENT *) IMG_OFFSET_ADDR(psSyncFreeEventIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *psSyncFreeEventOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *) IMG_OFFSET_ADDR(psSyncFreeEventOUT_UI8, 0);
+
+ psSyncFreeEventOUT->eError =
+ PVRSRVSyncFreeEventKM(psConnection, OSGetDevNode(psConnection),
+ psSyncFreeEventIN->ui32FWAddr);
+
+ return 0;
+}
+
+#if defined(PDUMP)
+
+static IMG_INT
+PVRSRVBridgeSyncCheckpointSignalledPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncCheckpointSignalledPDumpPolIN_UI8,
+ IMG_UINT8 * psSyncCheckpointSignalledPDumpPolOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *psSyncCheckpointSignalledPDumpPolIN =
+ (PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *)
+ IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *psSyncCheckpointSignalledPDumpPolOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *)
+ IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolOUT_UI8, 0);
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psSyncCheckpointSignalledPDumpPolOUT->eError =
+ PVRSRVSyncCheckpointSignalledPDumpPolKM(psSyncCheckpointSignalledPDumpPolIN->hFence);
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncCheckpointSignalledPDumpPol NULL
+#endif
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitSYNCBridge(void);
+void DeinitSYNCBridge(void);
+
+/*
+ * Register all SYNC functions with services
+ */
+PVRSRV_ERROR InitSYNCBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK,
+ PVRSRVBridgeAllocSyncPrimitiveBlock, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK,
+ PVRSRVBridgeFreeSyncPrimitiveBlock, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET,
+ PVRSRVBridgeSyncPrimSet, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP,
+ PVRSRVBridgeSyncPrimPDump, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE,
+ PVRSRVBridgeSyncPrimPDumpValue, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL,
+ PVRSRVBridgeSyncPrimPDumpPol, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP,
+ PVRSRVBridgeSyncPrimPDumpCBP, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT,
+ PVRSRVBridgeSyncAllocEvent, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT,
+ PVRSRVBridgeSyncFreeEvent, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+ PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL,
+ PVRSRVBridgeSyncCheckpointSignalledPDumpPol, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all sync functions with services
+ */
+void DeinitSYNCBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC,
+ PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for syncfallback
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for syncfallback
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SYNCFALLBACK_BRIDGE_H
+#define COMMON_SYNCFALLBACK_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST 0
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATEPVR PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINERELEASE PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUP PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEMERGE PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCERELEASE PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEWAIT PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUMP PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATESW PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCECREATESW PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINEADVANCESW PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTINSECURE PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYINSECURE PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTINSECURE PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTSECURE PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYSECURE PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+14
+#define PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTSECURE PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+15
+#define PVRSRV_BRIDGE_SYNCFALLBACK_CMD_LAST (PVRSRV_BRIDGE_SYNCFALLBACK_CMD_FIRST+15)
+
+/*******************************************
+ SyncFbTimelineCreatePVR
+ *******************************************/
+
+/* Bridge in structure for SyncFbTimelineCreatePVR */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATEPVR_TAG
+{
+ const IMG_CHAR *puiTimelineName;
+ IMG_UINT32 ui32TimelineNameSize;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATEPVR;
+
+/* Bridge out structure for SyncFbTimelineCreatePVR */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATEPVR_TAG
+{
+ IMG_HANDLE hTimeline;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATEPVR;
+
+/*******************************************
+ SyncFbTimelineRelease
+ *******************************************/
+
+/* Bridge in structure for SyncFbTimelineRelease */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBTIMELINERELEASE_TAG
+{
+ IMG_HANDLE hTimeline;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBTIMELINERELEASE;
+
+/* Bridge out structure for SyncFbTimelineRelease */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBTIMELINERELEASE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBTIMELINERELEASE;
+
+/*******************************************
+ SyncFbFenceDup
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceDup */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEDUP_TAG
+{
+ IMG_HANDLE hInFence;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEDUP;
+
+/* Bridge out structure for SyncFbFenceDup */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUP_TAG
+{
+ IMG_HANDLE hOutFence;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUP;
+
+/*******************************************
+ SyncFbFenceMerge
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceMerge */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEMERGE_TAG
+{
+ IMG_HANDLE hInFence1;
+ IMG_HANDLE hInFence2;
+ const IMG_CHAR *puiFenceName;
+ IMG_UINT32 ui32FenceNameSize;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEMERGE;
+
+/* Bridge out structure for SyncFbFenceMerge */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEMERGE_TAG
+{
+ IMG_HANDLE hOutFence;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEMERGE;
+
+/*******************************************
+ SyncFbFenceRelease
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceRelease */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCERELEASE_TAG
+{
+ IMG_HANDLE hFence;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCERELEASE;
+
+/* Bridge out structure for SyncFbFenceRelease */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCERELEASE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCERELEASE;
+
+/*******************************************
+ SyncFbFenceWait
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceWait */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEWAIT_TAG
+{
+ IMG_HANDLE hFence;
+ IMG_UINT32 ui32Timeout;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEWAIT;
+
+/* Bridge out structure for SyncFbFenceWait */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEWAIT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEWAIT;
+
+/*******************************************
+ SyncFbFenceDump
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceDump */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEDUMP_TAG
+{
+ IMG_HANDLE hFence;
+ const IMG_CHAR *puiDescStr;
+ const IMG_CHAR *puiFileStr;
+ const IMG_CHAR *puiModuleStr;
+ IMG_UINT32 ui32DescStrLength;
+ IMG_UINT32 ui32FileStrLength;
+ IMG_UINT32 ui32Line;
+ IMG_UINT32 ui32ModuleStrLength;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEDUMP;
+
+/* Bridge out structure for SyncFbFenceDump */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUMP_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUMP;
+
+/*******************************************
+ SyncFbTimelineCreateSW
+ *******************************************/
+
+/* Bridge in structure for SyncFbTimelineCreateSW */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATESW_TAG
+{
+ const IMG_CHAR *puiTimelineName;
+ IMG_UINT32 ui32TimelineNameSize;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATESW;
+
+/* Bridge out structure for SyncFbTimelineCreateSW */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATESW_TAG
+{
+ IMG_HANDLE hTimeline;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATESW;
+
+/*******************************************
+ SyncFbFenceCreateSW
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceCreateSW */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCECREATESW_TAG
+{
+ IMG_HANDLE hTimeline;
+ const IMG_CHAR *puiFenceName;
+ IMG_UINT32 ui32FenceNameSize;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCECREATESW;
+
+/* Bridge out structure for SyncFbFenceCreateSW */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCECREATESW_TAG
+{
+ IMG_UINT64 ui64SyncPtIdx;
+ IMG_HANDLE hOutFence;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCECREATESW;
+
+/*******************************************
+ SyncFbTimelineAdvanceSW
+ *******************************************/
+
+/* Bridge in structure for SyncFbTimelineAdvanceSW */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBTIMELINEADVANCESW_TAG
+{
+ IMG_HANDLE hTimeline;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBTIMELINEADVANCESW;
+
+/* Bridge out structure for SyncFbTimelineAdvanceSW */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBTIMELINEADVANCESW_TAG
+{
+ IMG_UINT64 ui64SyncPtIdx;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBTIMELINEADVANCESW;
+
+/*******************************************
+ SyncFbFenceExportInsecure
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceExportInsecure */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTINSECURE_TAG
+{
+ IMG_HANDLE hFence;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTINSECURE;
+
+/* Bridge out structure for SyncFbFenceExportInsecure */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTINSECURE_TAG
+{
+ IMG_HANDLE hExport;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTINSECURE;
+
+/*******************************************
+ SyncFbFenceExportDestroyInsecure
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceExportDestroyInsecure */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYINSECURE_TAG
+{
+ IMG_HANDLE hExport;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYINSECURE;
+
+/* Bridge out structure for SyncFbFenceExportDestroyInsecure */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYINSECURE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYINSECURE;
+
+/*******************************************
+ SyncFbFenceImportInsecure
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceImportInsecure */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTINSECURE_TAG
+{
+ IMG_HANDLE hImport;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTINSECURE;
+
+/* Bridge out structure for SyncFbFenceImportInsecure */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTINSECURE_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTINSECURE;
+
+/*******************************************
+ SyncFbFenceExportSecure
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceExportSecure */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTSECURE_TAG
+{
+ IMG_HANDLE hFence;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTSECURE;
+
+/* Bridge out structure for SyncFbFenceExportSecure */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTSECURE_TAG
+{
+ IMG_SECURE_TYPE Export;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTSECURE;
+
+/*******************************************
+ SyncFbFenceExportDestroySecure
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceExportDestroySecure */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYSECURE_TAG
+{
+ IMG_HANDLE hExport;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYSECURE;
+
+/* Bridge out structure for SyncFbFenceExportDestroySecure */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYSECURE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYSECURE;
+
+/*******************************************
+ SyncFbFenceImportSecure
+ *******************************************/
+
+/* Bridge in structure for SyncFbFenceImportSecure */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTSECURE_TAG
+{
+ IMG_SECURE_TYPE Import;
+} __packed PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTSECURE;
+
+/* Bridge out structure for SyncFbFenceImportSecure */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTSECURE_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTSECURE;
+
+#endif /* COMMON_SYNCFALLBACK_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for syncfallback
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for syncfallback
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync_fallback_server.h"
+#include "pvrsrv_sync_server.h"
+
+#include "common_syncfallback_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+static PVRSRV_ERROR ReleaseExport(void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ return PVRSRV_OK;
+}
+#endif
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static PVRSRV_ERROR _SyncFbTimelineCreatePVRpsTimelineIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = SyncFbTimelineRelease((PVRSRV_TIMELINE_SERVER *) pvData);
+ return eError;
+}
+
+static_assert(SYNC_FB_TIMELINE_MAX_LENGTH <= IMG_UINT32_MAX,
+ "SYNC_FB_TIMELINE_MAX_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncFbTimelineCreatePVR(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbTimelineCreatePVRIN_UI8,
+ IMG_UINT8 * psSyncFbTimelineCreatePVROUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATEPVR *psSyncFbTimelineCreatePVRIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATEPVR *)
+ IMG_OFFSET_ADDR(psSyncFbTimelineCreatePVRIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATEPVR *psSyncFbTimelineCreatePVROUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATEPVR *)
+ IMG_OFFSET_ADDR(psSyncFbTimelineCreatePVROUT_UI8, 0);
+
+ IMG_CHAR *uiTimelineNameInt = NULL;
+ PVRSRV_TIMELINE_SERVER *psTimelineInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely
+ (psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize > SYNC_FB_TIMELINE_MAX_LENGTH))
+ {
+ psSyncFbTimelineCreatePVROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto SyncFbTimelineCreatePVR_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psSyncFbTimelineCreatePVROUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto SyncFbTimelineCreatePVR_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psSyncFbTimelineCreatePVRIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncFbTimelineCreatePVRIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psSyncFbTimelineCreatePVROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncFbTimelineCreatePVR_exit;
+ }
+ }
+ }
+
+ if (psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize != 0)
+ {
+ uiTimelineNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiTimelineNameInt,
+ (const void __user *)psSyncFbTimelineCreatePVRIN->puiTimelineName,
+ psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize * sizeof(IMG_CHAR)) !=
+ PVRSRV_OK)
+ {
+ psSyncFbTimelineCreatePVROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncFbTimelineCreatePVR_exit;
+ }
+ ((IMG_CHAR *)
+ uiTimelineNameInt)[(psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize *
+ sizeof(IMG_CHAR)) - 1] = '\0';
+ }
+
+ psSyncFbTimelineCreatePVROUT->eError =
+ SyncFbTimelineCreatePVR(psSyncFbTimelineCreatePVRIN->ui32TimelineNameSize,
+ uiTimelineNameInt, &psTimelineInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psSyncFbTimelineCreatePVROUT->eError != PVRSRV_OK))
+ {
+ goto SyncFbTimelineCreatePVR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbTimelineCreatePVROUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &psSyncFbTimelineCreatePVROUT->hTimeline,
+ (void *)psTimelineInt,
+ PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ (PFN_HANDLE_RELEASE) &
+ _SyncFbTimelineCreatePVRpsTimelineIntRelease);
+ if (unlikely(psSyncFbTimelineCreatePVROUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbTimelineCreatePVR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbTimelineCreatePVR_exit:
+
+ if (psSyncFbTimelineCreatePVROUT->eError != PVRSRV_OK)
+ {
+ if (psTimelineInt)
+ {
+ SyncFbTimelineRelease(psTimelineInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psSyncFbTimelineCreatePVROUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbTimelineRelease(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbTimelineReleaseIN_UI8,
+ IMG_UINT8 * psSyncFbTimelineReleaseOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBTIMELINERELEASE *psSyncFbTimelineReleaseIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBTIMELINERELEASE *)
+ IMG_OFFSET_ADDR(psSyncFbTimelineReleaseIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBTIMELINERELEASE *psSyncFbTimelineReleaseOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBTIMELINERELEASE *)
+ IMG_OFFSET_ADDR(psSyncFbTimelineReleaseOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbTimelineReleaseOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (IMG_HANDLE) psSyncFbTimelineReleaseIN->hTimeline,
+ PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER);
+ if (unlikely((psSyncFbTimelineReleaseOUT->eError != PVRSRV_OK) &&
+ (psSyncFbTimelineReleaseOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psSyncFbTimelineReleaseOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psSyncFbTimelineReleaseOUT->eError)));
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbTimelineRelease_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbTimelineRelease_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _SyncFbFenceDuppsOutFenceIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = SyncFbFenceRelease((PVRSRV_FENCE_SERVER *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceDup(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbFenceDupIN_UI8,
+ IMG_UINT8 * psSyncFbFenceDupOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBFENCEDUP *psSyncFbFenceDupIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBFENCEDUP *) IMG_OFFSET_ADDR(psSyncFbFenceDupIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUP *psSyncFbFenceDupOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUP *) IMG_OFFSET_ADDR(psSyncFbFenceDupOUT_UI8, 0);
+
+ IMG_HANDLE hInFence = psSyncFbFenceDupIN->hInFence;
+ PVRSRV_FENCE_SERVER *psInFenceInt = NULL;
+ PVRSRV_FENCE_SERVER *psOutFenceInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncFbFenceDupOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (void **)&psInFenceInt,
+ hInFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+ if (unlikely(psSyncFbFenceDupOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceDup_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceDupOUT->eError = SyncFbFenceDup(psInFenceInt, &psOutFenceInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psSyncFbFenceDupOUT->eError != PVRSRV_OK))
+ {
+ goto SyncFbFenceDup_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceDupOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &psSyncFbFenceDupOUT->hOutFence, (void *)psOutFenceInt,
+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) & _SyncFbFenceDuppsOutFenceIntRelease);
+ if (unlikely(psSyncFbFenceDupOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceDup_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbFenceDup_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psInFenceInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hInFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ if (psSyncFbFenceDupOUT->eError != PVRSRV_OK)
+ {
+ if (psOutFenceInt)
+ {
+ SyncFbFenceRelease(psOutFenceInt);
+ }
+ }
+
+ return 0;
+}
+
+static PVRSRV_ERROR _SyncFbFenceMergepsOutFenceIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = SyncFbFenceRelease((PVRSRV_FENCE_SERVER *) pvData);
+ return eError;
+}
+
+static_assert(SYNC_FB_FENCE_MAX_LENGTH <= IMG_UINT32_MAX,
+ "SYNC_FB_FENCE_MAX_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceMerge(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbFenceMergeIN_UI8,
+ IMG_UINT8 * psSyncFbFenceMergeOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBFENCEMERGE *psSyncFbFenceMergeIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBFENCEMERGE *) IMG_OFFSET_ADDR(psSyncFbFenceMergeIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBFENCEMERGE *psSyncFbFenceMergeOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBFENCEMERGE *) IMG_OFFSET_ADDR(psSyncFbFenceMergeOUT_UI8, 0);
+
+ IMG_HANDLE hInFence1 = psSyncFbFenceMergeIN->hInFence1;
+ PVRSRV_FENCE_SERVER *psInFence1Int = NULL;
+ IMG_HANDLE hInFence2 = psSyncFbFenceMergeIN->hInFence2;
+ PVRSRV_FENCE_SERVER *psInFence2Int = NULL;
+ IMG_CHAR *uiFenceNameInt = NULL;
+ PVRSRV_FENCE_SERVER *psOutFenceInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psSyncFbFenceMergeIN->ui32FenceNameSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psSyncFbFenceMergeIN->ui32FenceNameSize > SYNC_FB_FENCE_MAX_LENGTH))
+ {
+ psSyncFbFenceMergeOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto SyncFbFenceMerge_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psSyncFbFenceMergeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto SyncFbFenceMerge_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psSyncFbFenceMergeIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncFbFenceMergeIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psSyncFbFenceMergeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncFbFenceMerge_exit;
+ }
+ }
+ }
+
+ if (psSyncFbFenceMergeIN->ui32FenceNameSize != 0)
+ {
+ uiFenceNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psSyncFbFenceMergeIN->ui32FenceNameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncFbFenceMergeIN->ui32FenceNameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiFenceNameInt, (const void __user *)psSyncFbFenceMergeIN->puiFenceName,
+ psSyncFbFenceMergeIN->ui32FenceNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psSyncFbFenceMergeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncFbFenceMerge_exit;
+ }
+ ((IMG_CHAR *)
+ uiFenceNameInt)[(psSyncFbFenceMergeIN->ui32FenceNameSize * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncFbFenceMergeOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (void **)&psInFence1Int,
+ hInFence1, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+ if (unlikely(psSyncFbFenceMergeOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceMerge_exit;
+ }
+
+ /* Look up the address from the handle */
+ psSyncFbFenceMergeOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (void **)&psInFence2Int,
+ hInFence2, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+ if (unlikely(psSyncFbFenceMergeOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceMerge_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceMergeOUT->eError =
+ SyncFbFenceMerge(psInFence1Int,
+ psInFence2Int,
+ psSyncFbFenceMergeIN->ui32FenceNameSize,
+ uiFenceNameInt, &psOutFenceInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psSyncFbFenceMergeOUT->eError != PVRSRV_OK))
+ {
+ goto SyncFbFenceMerge_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceMergeOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &psSyncFbFenceMergeOUT->hOutFence, (void *)psOutFenceInt,
+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) & _SyncFbFenceMergepsOutFenceIntRelease);
+ if (unlikely(psSyncFbFenceMergeOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceMerge_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbFenceMerge_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psInFence1Int)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hInFence1, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+ }
+
+ /* Unreference the previously looked up handle */
+ if (psInFence2Int)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hInFence2, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ if (psSyncFbFenceMergeOUT->eError != PVRSRV_OK)
+ {
+ if (psOutFenceInt)
+ {
+ SyncFbFenceRelease(psOutFenceInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psSyncFbFenceMergeOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceRelease(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbFenceReleaseIN_UI8,
+ IMG_UINT8 * psSyncFbFenceReleaseOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBFENCERELEASE *psSyncFbFenceReleaseIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBFENCERELEASE *) IMG_OFFSET_ADDR(psSyncFbFenceReleaseIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBFENCERELEASE *psSyncFbFenceReleaseOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBFENCERELEASE *) IMG_OFFSET_ADDR(psSyncFbFenceReleaseOUT_UI8,
+ 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceReleaseOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (IMG_HANDLE) psSyncFbFenceReleaseIN->hFence,
+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+ if (unlikely((psSyncFbFenceReleaseOUT->eError != PVRSRV_OK) &&
+ (psSyncFbFenceReleaseOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psSyncFbFenceReleaseOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psSyncFbFenceReleaseOUT->eError)));
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceRelease_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbFenceRelease_exit:
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceWait(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbFenceWaitIN_UI8,
+ IMG_UINT8 * psSyncFbFenceWaitOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBFENCEWAIT *psSyncFbFenceWaitIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBFENCEWAIT *) IMG_OFFSET_ADDR(psSyncFbFenceWaitIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBFENCEWAIT *psSyncFbFenceWaitOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBFENCEWAIT *) IMG_OFFSET_ADDR(psSyncFbFenceWaitOUT_UI8, 0);
+
+ IMG_HANDLE hFence = psSyncFbFenceWaitIN->hFence;
+ PVRSRV_FENCE_SERVER *psFenceInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncFbFenceWaitOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (void **)&psFenceInt,
+ hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+ if (unlikely(psSyncFbFenceWaitOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceWait_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceWaitOUT->eError =
+ SyncFbFenceWait(psFenceInt, psSyncFbFenceWaitIN->ui32Timeout);
+
+SyncFbFenceWait_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psFenceInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ return 0;
+}
+
+static_assert((SYNC_FB_FILE_STRING_MAX + 1) <= IMG_UINT32_MAX,
+ "(SYNC_FB_FILE_STRING_MAX+1) must not be larger than IMG_UINT32_MAX");
+static_assert((SYNC_FB_MODULE_STRING_LEN_MAX + 1) <= IMG_UINT32_MAX,
+ "(SYNC_FB_MODULE_STRING_LEN_MAX+1) must not be larger than IMG_UINT32_MAX");
+static_assert((SYNC_FB_DESC_STRING_LEN_MAX + 1) <= IMG_UINT32_MAX,
+ "(SYNC_FB_DESC_STRING_LEN_MAX+1) must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceDump(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbFenceDumpIN_UI8,
+ IMG_UINT8 * psSyncFbFenceDumpOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBFENCEDUMP *psSyncFbFenceDumpIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBFENCEDUMP *) IMG_OFFSET_ADDR(psSyncFbFenceDumpIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUMP *psSyncFbFenceDumpOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBFENCEDUMP *) IMG_OFFSET_ADDR(psSyncFbFenceDumpOUT_UI8, 0);
+
+ IMG_HANDLE hFence = psSyncFbFenceDumpIN->hFence;
+ PVRSRV_FENCE_SERVER *psFenceInt = NULL;
+ IMG_CHAR *uiFileStrInt = NULL;
+ IMG_CHAR *uiModuleStrInt = NULL;
+ IMG_CHAR *uiDescStrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psSyncFbFenceDumpIN->ui32FileStrLength * sizeof(IMG_CHAR)) +
+ ((IMG_UINT64) psSyncFbFenceDumpIN->ui32ModuleStrLength * sizeof(IMG_CHAR)) +
+ ((IMG_UINT64) psSyncFbFenceDumpIN->ui32DescStrLength * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psSyncFbFenceDumpIN->ui32FileStrLength > (SYNC_FB_FILE_STRING_MAX + 1)))
+ {
+ psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto SyncFbFenceDump_exit;
+ }
+
+ if (unlikely
+ (psSyncFbFenceDumpIN->ui32ModuleStrLength > (SYNC_FB_MODULE_STRING_LEN_MAX + 1)))
+ {
+ psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto SyncFbFenceDump_exit;
+ }
+
+ if (unlikely(psSyncFbFenceDumpIN->ui32DescStrLength > (SYNC_FB_DESC_STRING_LEN_MAX + 1)))
+ {
+ psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto SyncFbFenceDump_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto SyncFbFenceDump_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psSyncFbFenceDumpIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncFbFenceDumpIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncFbFenceDump_exit;
+ }
+ }
+ }
+
+ if (psSyncFbFenceDumpIN->ui32FileStrLength != 0)
+ {
+ uiFileStrInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psSyncFbFenceDumpIN->ui32FileStrLength * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncFbFenceDumpIN->ui32FileStrLength * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiFileStrInt, (const void __user *)psSyncFbFenceDumpIN->puiFileStr,
+ psSyncFbFenceDumpIN->ui32FileStrLength * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncFbFenceDump_exit;
+ }
+ ((IMG_CHAR *)
+ uiFileStrInt)[(psSyncFbFenceDumpIN->ui32FileStrLength * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+ if (psSyncFbFenceDumpIN->ui32ModuleStrLength != 0)
+ {
+ uiModuleStrInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psSyncFbFenceDumpIN->ui32ModuleStrLength * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncFbFenceDumpIN->ui32ModuleStrLength * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiModuleStrInt, (const void __user *)psSyncFbFenceDumpIN->puiModuleStr,
+ psSyncFbFenceDumpIN->ui32ModuleStrLength * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncFbFenceDump_exit;
+ }
+ ((IMG_CHAR *)
+ uiModuleStrInt)[(psSyncFbFenceDumpIN->ui32ModuleStrLength * sizeof(IMG_CHAR)) -
+ 1] = '\0';
+ }
+ if (psSyncFbFenceDumpIN->ui32DescStrLength != 0)
+ {
+ uiDescStrInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psSyncFbFenceDumpIN->ui32DescStrLength * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncFbFenceDumpIN->ui32DescStrLength * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiDescStrInt, (const void __user *)psSyncFbFenceDumpIN->puiDescStr,
+ psSyncFbFenceDumpIN->ui32DescStrLength * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psSyncFbFenceDumpOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncFbFenceDump_exit;
+ }
+ ((IMG_CHAR *)
+ uiDescStrInt)[(psSyncFbFenceDumpIN->ui32DescStrLength * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncFbFenceDumpOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (void **)&psFenceInt,
+ hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+ if (unlikely(psSyncFbFenceDumpOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceDump_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceDumpOUT->eError =
+ SyncFbFenceDump(psFenceInt,
+ psSyncFbFenceDumpIN->ui32Line,
+ psSyncFbFenceDumpIN->ui32FileStrLength,
+ uiFileStrInt,
+ psSyncFbFenceDumpIN->ui32ModuleStrLength,
+ uiModuleStrInt, psSyncFbFenceDumpIN->ui32DescStrLength, uiDescStrInt);
+
+SyncFbFenceDump_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psFenceInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psSyncFbFenceDumpOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static PVRSRV_ERROR _SyncFbTimelineCreateSWpsTimelineIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = SyncFbTimelineRelease((PVRSRV_TIMELINE_SERVER *) pvData);
+ return eError;
+}
+
+static_assert(SYNC_FB_FENCE_MAX_LENGTH <= IMG_UINT32_MAX,
+ "SYNC_FB_FENCE_MAX_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncFbTimelineCreateSW(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbTimelineCreateSWIN_UI8,
+ IMG_UINT8 * psSyncFbTimelineCreateSWOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATESW *psSyncFbTimelineCreateSWIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBTIMELINECREATESW *)
+ IMG_OFFSET_ADDR(psSyncFbTimelineCreateSWIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATESW *psSyncFbTimelineCreateSWOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBTIMELINECREATESW *)
+ IMG_OFFSET_ADDR(psSyncFbTimelineCreateSWOUT_UI8, 0);
+
+ IMG_CHAR *uiTimelineNameInt = NULL;
+ PVRSRV_TIMELINE_SERVER *psTimelineInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psSyncFbTimelineCreateSWIN->ui32TimelineNameSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psSyncFbTimelineCreateSWIN->ui32TimelineNameSize > SYNC_FB_FENCE_MAX_LENGTH))
+ {
+ psSyncFbTimelineCreateSWOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto SyncFbTimelineCreateSW_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psSyncFbTimelineCreateSWOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto SyncFbTimelineCreateSW_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psSyncFbTimelineCreateSWIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncFbTimelineCreateSWIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psSyncFbTimelineCreateSWOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncFbTimelineCreateSW_exit;
+ }
+ }
+ }
+
+ if (psSyncFbTimelineCreateSWIN->ui32TimelineNameSize != 0)
+ {
+ uiTimelineNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset +=
+ psSyncFbTimelineCreateSWIN->ui32TimelineNameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncFbTimelineCreateSWIN->ui32TimelineNameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiTimelineNameInt,
+ (const void __user *)psSyncFbTimelineCreateSWIN->puiTimelineName,
+ psSyncFbTimelineCreateSWIN->ui32TimelineNameSize * sizeof(IMG_CHAR)) !=
+ PVRSRV_OK)
+ {
+ psSyncFbTimelineCreateSWOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncFbTimelineCreateSW_exit;
+ }
+ ((IMG_CHAR *)
+ uiTimelineNameInt)[(psSyncFbTimelineCreateSWIN->ui32TimelineNameSize *
+ sizeof(IMG_CHAR)) - 1] = '\0';
+ }
+
+ psSyncFbTimelineCreateSWOUT->eError =
+ SyncFbTimelineCreateSW(psSyncFbTimelineCreateSWIN->ui32TimelineNameSize,
+ uiTimelineNameInt, &psTimelineInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psSyncFbTimelineCreateSWOUT->eError != PVRSRV_OK))
+ {
+ goto SyncFbTimelineCreateSW_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbTimelineCreateSWOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &psSyncFbTimelineCreateSWOUT->hTimeline,
+ (void *)psTimelineInt,
+ PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ (PFN_HANDLE_RELEASE) &
+ _SyncFbTimelineCreateSWpsTimelineIntRelease);
+ if (unlikely(psSyncFbTimelineCreateSWOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbTimelineCreateSW_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbTimelineCreateSW_exit:
+
+ if (psSyncFbTimelineCreateSWOUT->eError != PVRSRV_OK)
+ {
+ if (psTimelineInt)
+ {
+ SyncFbTimelineRelease(psTimelineInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psSyncFbTimelineCreateSWOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static PVRSRV_ERROR _SyncFbFenceCreateSWpsOutFenceIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = SyncFbFenceRelease((PVRSRV_FENCE_SERVER *) pvData);
+ return eError;
+}
+
+static_assert(SYNC_FB_FENCE_MAX_LENGTH <= IMG_UINT32_MAX,
+ "SYNC_FB_FENCE_MAX_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceCreateSW(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbFenceCreateSWIN_UI8,
+ IMG_UINT8 * psSyncFbFenceCreateSWOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBFENCECREATESW *psSyncFbFenceCreateSWIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBFENCECREATESW *) IMG_OFFSET_ADDR(psSyncFbFenceCreateSWIN_UI8,
+ 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBFENCECREATESW *psSyncFbFenceCreateSWOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBFENCECREATESW *) IMG_OFFSET_ADDR(psSyncFbFenceCreateSWOUT_UI8,
+ 0);
+
+ IMG_HANDLE hTimeline = psSyncFbFenceCreateSWIN->hTimeline;
+ PVRSRV_TIMELINE_SERVER *psTimelineInt = NULL;
+ IMG_CHAR *uiFenceNameInt = NULL;
+ PVRSRV_FENCE_SERVER *psOutFenceInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psSyncFbFenceCreateSWIN->ui32FenceNameSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psSyncFbFenceCreateSWIN->ui32FenceNameSize > SYNC_FB_FENCE_MAX_LENGTH))
+ {
+ psSyncFbFenceCreateSWOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto SyncFbFenceCreateSW_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psSyncFbFenceCreateSWOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto SyncFbFenceCreateSW_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psSyncFbFenceCreateSWIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncFbFenceCreateSWIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psSyncFbFenceCreateSWOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncFbFenceCreateSW_exit;
+ }
+ }
+ }
+
+ if (psSyncFbFenceCreateSWIN->ui32FenceNameSize != 0)
+ {
+ uiFenceNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psSyncFbFenceCreateSWIN->ui32FenceNameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncFbFenceCreateSWIN->ui32FenceNameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiFenceNameInt,
+ (const void __user *)psSyncFbFenceCreateSWIN->puiFenceName,
+ psSyncFbFenceCreateSWIN->ui32FenceNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psSyncFbFenceCreateSWOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncFbFenceCreateSW_exit;
+ }
+ ((IMG_CHAR *)
+ uiFenceNameInt)[(psSyncFbFenceCreateSWIN->ui32FenceNameSize * sizeof(IMG_CHAR)) -
+ 1] = '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncFbFenceCreateSWOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (void **)&psTimelineInt,
+ hTimeline,
+ PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER, IMG_TRUE);
+ if (unlikely(psSyncFbFenceCreateSWOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceCreateSW_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceCreateSWOUT->eError =
+ SyncFbFenceCreateSW(psConnection, OSGetDevNode(psConnection),
+ psTimelineInt,
+ psSyncFbFenceCreateSWIN->ui32FenceNameSize,
+ uiFenceNameInt,
+ &psOutFenceInt, &psSyncFbFenceCreateSWOUT->ui64SyncPtIdx);
+ /* Exit early if bridged call fails */
+ if (unlikely(psSyncFbFenceCreateSWOUT->eError != PVRSRV_OK))
+ {
+ goto SyncFbFenceCreateSW_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceCreateSWOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &psSyncFbFenceCreateSWOUT->hOutFence, (void *)psOutFenceInt,
+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _SyncFbFenceCreateSWpsOutFenceIntRelease);
+ if (unlikely(psSyncFbFenceCreateSWOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceCreateSW_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbFenceCreateSW_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psTimelineInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hTimeline, PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ if (psSyncFbFenceCreateSWOUT->eError != PVRSRV_OK)
+ {
+ if (psOutFenceInt)
+ {
+ SyncFbFenceRelease(psOutFenceInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psSyncFbFenceCreateSWOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbTimelineAdvanceSW(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbTimelineAdvanceSWIN_UI8,
+ IMG_UINT8 * psSyncFbTimelineAdvanceSWOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBTIMELINEADVANCESW *psSyncFbTimelineAdvanceSWIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBTIMELINEADVANCESW *)
+ IMG_OFFSET_ADDR(psSyncFbTimelineAdvanceSWIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBTIMELINEADVANCESW *psSyncFbTimelineAdvanceSWOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBTIMELINEADVANCESW *)
+ IMG_OFFSET_ADDR(psSyncFbTimelineAdvanceSWOUT_UI8, 0);
+
+ IMG_HANDLE hTimeline = psSyncFbTimelineAdvanceSWIN->hTimeline;
+ PVRSRV_TIMELINE_SERVER *psTimelineInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncFbTimelineAdvanceSWOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (void **)&psTimelineInt,
+ hTimeline,
+ PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER, IMG_TRUE);
+ if (unlikely(psSyncFbTimelineAdvanceSWOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbTimelineAdvanceSW_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbTimelineAdvanceSWOUT->eError =
+ SyncFbTimelineAdvanceSW(psTimelineInt, &psSyncFbTimelineAdvanceSWOUT->ui64SyncPtIdx);
+
+SyncFbTimelineAdvanceSW_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psTimelineInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hTimeline, PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ return 0;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+static PVRSRV_ERROR _SyncFbFenceExportInsecurepsExportIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = SyncFbFenceExportDestroyInsecure((PVRSRV_FENCE_EXPORT *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceExportInsecure(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbFenceExportInsecureIN_UI8,
+ IMG_UINT8 * psSyncFbFenceExportInsecureOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTINSECURE *psSyncFbFenceExportInsecureIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTINSECURE *)
+ IMG_OFFSET_ADDR(psSyncFbFenceExportInsecureIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTINSECURE *psSyncFbFenceExportInsecureOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTINSECURE *)
+ IMG_OFFSET_ADDR(psSyncFbFenceExportInsecureOUT_UI8, 0);
+
+ IMG_HANDLE hFence = psSyncFbFenceExportInsecureIN->hFence;
+ PVRSRV_FENCE_SERVER *psFenceInt = NULL;
+ PVRSRV_FENCE_EXPORT *psExportInt = NULL;
+ IMG_HANDLE hExportInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncFbFenceExportInsecureOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (void **)&psFenceInt,
+ hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+ if (unlikely(psSyncFbFenceExportInsecureOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceExportInsecure_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceExportInsecureOUT->eError =
+ SyncFbFenceExportInsecure(psFenceInt, &psExportInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psSyncFbFenceExportInsecureOUT->eError != PVRSRV_OK))
+ {
+ goto SyncFbFenceExportInsecure_exit;
+ }
+
+ /*
+ * For cases where we need a cross process handle we actually allocate two.
+ *
+ * The first one is a connection specific handle and it gets given the real
+ * release function. This handle does *NOT* get returned to the caller. It's
+ * purpose is to release any leaked resources when we either have a bad or
+ * abnormally terminated client. If we didn't do this then the resource
+ * wouldn't be freed until driver unload. If the resource is freed normally,
+ * this handle can be looked up via the cross process handle and then
+ * released accordingly.
+ *
+ * The second one is a cross process handle and it gets given a noop release
+ * function. This handle does get returned to the caller.
+ */
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceExportInsecureOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, &hExportInt,
+ (void *)psExportInt, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ (PFN_HANDLE_RELEASE) &
+ _SyncFbFenceExportInsecurepsExportIntRelease);
+ if (unlikely(psSyncFbFenceExportInsecureOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceExportInsecure_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Lock over handle creation. */
+ LockHandle(KERNEL_HANDLE_BASE);
+ psSyncFbFenceExportInsecureOUT->eError = PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE,
+ &psSyncFbFenceExportInsecureOUT->
+ hExport,
+ (void *)psExportInt,
+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ (PFN_HANDLE_RELEASE) &
+ ReleaseExport);
+ if (unlikely(psSyncFbFenceExportInsecureOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ goto SyncFbFenceExportInsecure_exit;
+ }
+ /* Release now we have created handles. */
+ UnlockHandle(KERNEL_HANDLE_BASE);
+
+SyncFbFenceExportInsecure_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psFenceInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ if (psSyncFbFenceExportInsecureOUT->eError != PVRSRV_OK)
+ {
+ if (psSyncFbFenceExportInsecureOUT->hExport)
+ {
+ PVRSRV_ERROR eError;
+
+ /* Lock over handle creation cleanup. */
+ LockHandle(KERNEL_HANDLE_BASE);
+
+ eError = PVRSRVDestroyHandleUnlocked(KERNEL_HANDLE_BASE,
+ (IMG_HANDLE)
+ psSyncFbFenceExportInsecureOUT->
+ hExport,
+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle(KERNEL_HANDLE_BASE);
+
+ }
+
+ if (hExportInt)
+ {
+ PVRSRV_ERROR eError;
+ /* Lock over handle creation cleanup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ eError =
+ PVRSRVDestroyHandleUnlocked(psConnection->psProcessHandleBase->
+ psHandleBase, hExportInt,
+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s", __func__, PVRSRVGetErrorString(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psExportInt = NULL;
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ }
+
+ if (psExportInt)
+ {
+ SyncFbFenceExportDestroyInsecure(psExportInt);
+ }
+ }
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncFbFenceExportInsecure NULL
+#endif
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceExportDestroyInsecure(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbFenceExportDestroyInsecureIN_UI8,
+ IMG_UINT8 * psSyncFbFenceExportDestroyInsecureOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYINSECURE *psSyncFbFenceExportDestroyInsecureIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYINSECURE *)
+ IMG_OFFSET_ADDR(psSyncFbFenceExportDestroyInsecureIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYINSECURE *psSyncFbFenceExportDestroyInsecureOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYINSECURE *)
+ IMG_OFFSET_ADDR(psSyncFbFenceExportDestroyInsecureOUT_UI8, 0);
+
+ PVRSRV_FENCE_EXPORT *psExportInt = NULL;
+ IMG_HANDLE hExportInt = NULL;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* Lock over handle destruction. */
+ LockHandle(KERNEL_HANDLE_BASE);
+ psSyncFbFenceExportDestroyInsecureOUT->eError =
+ PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+ (void **)&psExportInt,
+ (IMG_HANDLE) psSyncFbFenceExportDestroyInsecureIN->hExport,
+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT, IMG_FALSE);
+ if (unlikely(psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_OK))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__,
+ PVRSRVGetErrorString(psSyncFbFenceExportDestroyInsecureOUT->eError)));
+ }
+ PVR_ASSERT(psSyncFbFenceExportDestroyInsecureOUT->eError == PVRSRV_OK);
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(KERNEL_HANDLE_BASE);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ /*
+ * Find the connection specific handle that represents the same data
+ * as the cross process handle as releasing it will actually call the
+ * data's real release function (see the function where the cross
+ * process handle is allocated for more details).
+ */
+ psSyncFbFenceExportDestroyInsecureOUT->eError =
+ PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &hExportInt,
+ psExportInt, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+ if (unlikely(psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_OK))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__,
+ PVRSRVGetErrorString(psSyncFbFenceExportDestroyInsecureOUT->eError)));
+ }
+ PVR_ASSERT(psSyncFbFenceExportDestroyInsecureOUT->eError == PVRSRV_OK);
+
+ psSyncFbFenceExportDestroyInsecureOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hExportInt, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+ if (unlikely((psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_OK) &&
+ (psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__,
+ PVRSRVGetErrorString(psSyncFbFenceExportDestroyInsecureOUT->eError)));
+ }
+ PVR_ASSERT((psSyncFbFenceExportDestroyInsecureOUT->eError == PVRSRV_OK) ||
+ (psSyncFbFenceExportDestroyInsecureOUT->eError == PVRSRV_ERROR_RETRY));
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Lock over handle destruction. */
+ LockHandle(KERNEL_HANDLE_BASE);
+
+ psSyncFbFenceExportDestroyInsecureOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(KERNEL_HANDLE_BASE,
+ (IMG_HANDLE) psSyncFbFenceExportDestroyInsecureIN->
+ hExport, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+ if (unlikely
+ ((psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_OK)
+ && (psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psSyncFbFenceExportDestroyInsecureOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__,
+ PVRSRVGetErrorString(psSyncFbFenceExportDestroyInsecureOUT->eError)));
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ goto SyncFbFenceExportDestroyInsecure_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(KERNEL_HANDLE_BASE);
+
+SyncFbFenceExportDestroyInsecure_exit:
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncFbFenceExportDestroyInsecure NULL
+#endif
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+static PVRSRV_ERROR _SyncFbFenceImportInsecurepsSyncHandleIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = SyncFbFenceRelease((PVRSRV_FENCE_SERVER *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceImportInsecure(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbFenceImportInsecureIN_UI8,
+ IMG_UINT8 * psSyncFbFenceImportInsecureOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTINSECURE *psSyncFbFenceImportInsecureIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTINSECURE *)
+ IMG_OFFSET_ADDR(psSyncFbFenceImportInsecureIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTINSECURE *psSyncFbFenceImportInsecureOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTINSECURE *)
+ IMG_OFFSET_ADDR(psSyncFbFenceImportInsecureOUT_UI8, 0);
+
+ IMG_HANDLE hImport = psSyncFbFenceImportInsecureIN->hImport;
+ PVRSRV_FENCE_EXPORT *psImportInt = NULL;
+ PVRSRV_FENCE_SERVER *psSyncHandleInt = NULL;
+
+ /* Lock over handle lookup. */
+ LockHandle(KERNEL_HANDLE_BASE);
+
+ /* Look up the address from the handle */
+ psSyncFbFenceImportInsecureOUT->eError =
+ PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+ (void **)&psImportInt,
+ hImport, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT, IMG_TRUE);
+ if (unlikely(psSyncFbFenceImportInsecureOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(KERNEL_HANDLE_BASE);
+ goto SyncFbFenceImportInsecure_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(KERNEL_HANDLE_BASE);
+
+ psSyncFbFenceImportInsecureOUT->eError =
+ SyncFbFenceImportInsecure(psConnection, OSGetDevNode(psConnection),
+ psImportInt, &psSyncHandleInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psSyncFbFenceImportInsecureOUT->eError != PVRSRV_OK))
+ {
+ goto SyncFbFenceImportInsecure_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceImportInsecureOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &psSyncFbFenceImportInsecureOUT->hSyncHandle,
+ (void *)psSyncHandleInt,
+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _SyncFbFenceImportInsecurepsSyncHandleIntRelease);
+ if (unlikely(psSyncFbFenceImportInsecureOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceImportInsecure_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbFenceImportInsecure_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(KERNEL_HANDLE_BASE);
+
+ /* Unreference the previously looked up handle */
+ if (psImportInt)
+ {
+ PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+ hImport, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(KERNEL_HANDLE_BASE);
+
+ if (psSyncFbFenceImportInsecureOUT->eError != PVRSRV_OK)
+ {
+ if (psSyncHandleInt)
+ {
+ SyncFbFenceRelease(psSyncHandleInt);
+ }
+ }
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncFbFenceImportInsecure NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceExportSecure(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbFenceExportSecureIN_UI8,
+ IMG_UINT8 * psSyncFbFenceExportSecureOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTSECURE *psSyncFbFenceExportSecureIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTSECURE *)
+ IMG_OFFSET_ADDR(psSyncFbFenceExportSecureIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTSECURE *psSyncFbFenceExportSecureOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTSECURE *)
+ IMG_OFFSET_ADDR(psSyncFbFenceExportSecureOUT_UI8, 0);
+
+ IMG_HANDLE hFence = psSyncFbFenceExportSecureIN->hFence;
+ PVRSRV_FENCE_SERVER *psFenceInt = NULL;
+ PVRSRV_FENCE_EXPORT *psExportInt = NULL;
+ CONNECTION_DATA *psSecureConnection;
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncFbFenceExportSecureOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (void **)&psFenceInt,
+ hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, IMG_TRUE);
+ if (unlikely(psSyncFbFenceExportSecureOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceExportSecure_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceExportSecureOUT->eError =
+ SyncFbFenceExportSecure(psConnection, OSGetDevNode(psConnection),
+ psFenceInt,
+ &psSyncFbFenceExportSecureOUT->Export,
+ &psExportInt, &psSecureConnection);
+ /* Exit early if bridged call fails */
+ if (unlikely(psSyncFbFenceExportSecureOUT->eError != PVRSRV_OK))
+ {
+ goto SyncFbFenceExportSecure_exit;
+ }
+
+SyncFbFenceExportSecure_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (psFenceInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hFence, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ if (psSyncFbFenceExportSecureOUT->eError != PVRSRV_OK)
+ {
+ if (psExportInt)
+ {
+ SyncFbFenceExportDestroySecure(psExportInt);
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceExportDestroySecure(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbFenceExportDestroySecureIN_UI8,
+ IMG_UINT8 * psSyncFbFenceExportDestroySecureOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYSECURE *psSyncFbFenceExportDestroySecureIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBFENCEEXPORTDESTROYSECURE *)
+ IMG_OFFSET_ADDR(psSyncFbFenceExportDestroySecureIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYSECURE *psSyncFbFenceExportDestroySecureOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBFENCEEXPORTDESTROYSECURE *)
+ IMG_OFFSET_ADDR(psSyncFbFenceExportDestroySecureOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psSyncFbFenceExportDestroySecureOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psSyncFbFenceExportDestroySecureIN->
+ hExport, PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT);
+ if (unlikely
+ ((psSyncFbFenceExportDestroySecureOUT->eError != PVRSRV_OK)
+ && (psSyncFbFenceExportDestroySecureOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL)
+ && (psSyncFbFenceExportDestroySecureOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__,
+ PVRSRVGetErrorString(psSyncFbFenceExportDestroySecureOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto SyncFbFenceExportDestroySecure_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+SyncFbFenceExportDestroySecure_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _SyncFbFenceImportSecurepsSyncHandleIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = SyncFbFenceRelease((PVRSRV_FENCE_SERVER *) pvData);
+ return eError;
+}
+
+static IMG_INT
+PVRSRVBridgeSyncFbFenceImportSecure(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncFbFenceImportSecureIN_UI8,
+ IMG_UINT8 * psSyncFbFenceImportSecureOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTSECURE *psSyncFbFenceImportSecureIN =
+ (PVRSRV_BRIDGE_IN_SYNCFBFENCEIMPORTSECURE *)
+ IMG_OFFSET_ADDR(psSyncFbFenceImportSecureIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTSECURE *psSyncFbFenceImportSecureOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCFBFENCEIMPORTSECURE *)
+ IMG_OFFSET_ADDR(psSyncFbFenceImportSecureOUT_UI8, 0);
+
+ PVRSRV_FENCE_SERVER *psSyncHandleInt = NULL;
+
+ psSyncFbFenceImportSecureOUT->eError =
+ SyncFbFenceImportSecure(psConnection, OSGetDevNode(psConnection),
+ psSyncFbFenceImportSecureIN->Import, &psSyncHandleInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psSyncFbFenceImportSecureOUT->eError != PVRSRV_OK))
+ {
+ goto SyncFbFenceImportSecure_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+ psSyncFbFenceImportSecureOUT->eError =
+ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &psSyncFbFenceImportSecureOUT->hSyncHandle,
+ (void *)psSyncHandleInt,
+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE) &
+ _SyncFbFenceImportSecurepsSyncHandleIntRelease);
+ if (unlikely(psSyncFbFenceImportSecureOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+ goto SyncFbFenceImportSecure_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase);
+
+SyncFbFenceImportSecure_exit:
+
+ if (psSyncFbFenceImportSecureOUT->eError != PVRSRV_OK)
+ {
+ if (psSyncHandleInt)
+ {
+ SyncFbFenceRelease(psSyncHandleInt);
+ }
+ }
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitSYNCFALLBACKBridge(void);
+void DeinitSYNCFALLBACKBridge(void);
+
+/*
+ * Register all SYNCFALLBACK functions with services
+ */
+PVRSRV_ERROR InitSYNCFALLBACKBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATEPVR,
+ PVRSRVBridgeSyncFbTimelineCreatePVR, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINERELEASE,
+ PVRSRVBridgeSyncFbTimelineRelease, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK, PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUP,
+ PVRSRVBridgeSyncFbFenceDup, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEMERGE,
+ PVRSRVBridgeSyncFbFenceMerge, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCERELEASE,
+ PVRSRVBridgeSyncFbFenceRelease, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEWAIT,
+ PVRSRVBridgeSyncFbFenceWait, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUMP,
+ PVRSRVBridgeSyncFbFenceDump, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATESW,
+ PVRSRVBridgeSyncFbTimelineCreateSW, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCECREATESW,
+ PVRSRVBridgeSyncFbFenceCreateSW, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINEADVANCESW,
+ PVRSRVBridgeSyncFbTimelineAdvanceSW, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTINSECURE,
+ PVRSRVBridgeSyncFbFenceExportInsecure, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYINSECURE,
+ PVRSRVBridgeSyncFbFenceExportDestroyInsecure, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTINSECURE,
+ PVRSRVBridgeSyncFbFenceImportInsecure, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTSECURE,
+ PVRSRVBridgeSyncFbFenceExportSecure, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYSECURE,
+ PVRSRVBridgeSyncFbFenceExportDestroySecure, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTSECURE,
+ PVRSRVBridgeSyncFbFenceImportSecure, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all syncfallback functions with services
+ */
+void DeinitSYNCFALLBACKBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATEPVR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINERELEASE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEMERGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCERELEASE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEWAIT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEDUMP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINECREATESW);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCECREATESW);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBTIMELINEADVANCESW);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTINSECURE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYINSECURE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTINSECURE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTSECURE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEEXPORTDESTROYSECURE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCFALLBACK,
+ PVRSRV_BRIDGE_SYNCFALLBACK_SYNCFBFENCEIMPORTSECURE);
+
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Client bridge header for synctracking
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for synctracking
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_SYNCTRACKING_BRIDGE_H
+#define CLIENT_SYNCTRACKING_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_synctracking_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge, IMG_HANDLE hhRecord);
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+ IMG_HANDLE * phhRecord,
+ IMG_HANDLE hhServerSyncPrimBlock,
+ IMG_UINT32 ui32ui32FwBlockAddr,
+ IMG_UINT32 ui32ui32SyncOffset,
+ IMG_BOOL bbServerSync,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR * puiClassName);
+
+#endif /* CLIENT_SYNCTRACKING_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Direct client bridge for synctracking
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the client side of the bridge for synctracking
+ which is used in calls from Server context.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_synctracking_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+
+#include "sync.h"
+#include "sync_server.h"
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge, IMG_HANDLE hhRecord)
+{
+ PVRSRV_ERROR eError;
+ SYNC_RECORD_HANDLE pshRecordInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ pshRecordInt = (SYNC_RECORD_HANDLE) hhRecord;
+
+ eError = PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+ IMG_HANDLE * phhRecord,
+ IMG_HANDLE hhServerSyncPrimBlock,
+ IMG_UINT32 ui32ui32FwBlockAddr,
+ IMG_UINT32 ui32ui32SyncOffset,
+ IMG_BOOL bbServerSync,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR * puiClassName)
+{
+ PVRSRV_ERROR eError;
+ SYNC_RECORD_HANDLE pshRecordInt = NULL;
+ SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt;
+
+ pshServerSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK *) hhServerSyncPrimBlock;
+
+ eError =
+ PVRSRVSyncRecordAddKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge),
+ &pshRecordInt,
+ pshServerSyncPrimBlockInt,
+ ui32ui32FwBlockAddr,
+ ui32ui32SyncOffset,
+ bbServerSync, ui32ClassNameSize, puiClassName);
+
+ *phhRecord = pshRecordInt;
+ return eError;
+}
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for synctracking
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for synctracking
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SYNCTRACKING_BRIDGE_H
+#define COMMON_SYNCTRACKING_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST 0
+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST (PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1)
+
+/*******************************************
+ SyncRecordRemoveByHandle
+ *******************************************/
+
+/* Bridge in structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+ IMG_HANDLE hhRecord;
+} __packed PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE;
+
+/* Bridge out structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE;
+
+/*******************************************
+ SyncRecordAdd
+ *******************************************/
+
+/* Bridge in structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDADD_TAG
+{
+ IMG_HANDLE hhServerSyncPrimBlock;
+ const IMG_CHAR *puiClassName;
+ IMG_UINT32 ui32ClassNameSize;
+ IMG_UINT32 ui32ui32FwBlockAddr;
+ IMG_UINT32 ui32ui32SyncOffset;
+ IMG_BOOL bbServerSync;
+} __packed PVRSRV_BRIDGE_IN_SYNCRECORDADD;
+
+/* Bridge out structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDADD_TAG
+{
+ IMG_HANDLE hhRecord;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_SYNCRECORDADD;
+
+#endif /* COMMON_SYNCTRACKING_BRIDGE_H */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Server bridge for synctracking
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for synctracking
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+
+#include "common_synctracking_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeSyncRecordRemoveByHandle(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncRecordRemoveByHandleIN_UI8,
+ IMG_UINT8 * psSyncRecordRemoveByHandleOUT_UI8,
+ CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleIN =
+ (PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *)
+ IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *)
+ IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleOUT_UI8, 0);
+
+ /* Lock over handle destruction. */
+ LockHandle(psConnection->psHandleBase);
+
+ psSyncRecordRemoveByHandleOUT->eError =
+ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psSyncRecordRemoveByHandleIN->hhRecord,
+ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE);
+ if (unlikely((psSyncRecordRemoveByHandleOUT->eError != PVRSRV_OK) &&
+ (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) &&
+ (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_RETRY)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s",
+ __func__, PVRSRVGetErrorString(psSyncRecordRemoveByHandleOUT->eError)));
+ UnlockHandle(psConnection->psHandleBase);
+ goto SyncRecordRemoveByHandle_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+SyncRecordRemoveByHandle_exit:
+
+ return 0;
+}
+
+static PVRSRV_ERROR _SyncRecordAddpshRecordIntRelease(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ eError = PVRSRVSyncRecordRemoveByHandleKM((SYNC_RECORD_HANDLE) pvData);
+ return eError;
+}
+
+static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX,
+ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX");
+
+static IMG_INT
+PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry,
+ IMG_UINT8 * psSyncRecordAddIN_UI8,
+ IMG_UINT8 * psSyncRecordAddOUT_UI8, CONNECTION_DATA * psConnection)
+{
+ PVRSRV_BRIDGE_IN_SYNCRECORDADD *psSyncRecordAddIN =
+ (PVRSRV_BRIDGE_IN_SYNCRECORDADD *) IMG_OFFSET_ADDR(psSyncRecordAddIN_UI8, 0);
+ PVRSRV_BRIDGE_OUT_SYNCRECORDADD *psSyncRecordAddOUT =
+ (PVRSRV_BRIDGE_OUT_SYNCRECORDADD *) IMG_OFFSET_ADDR(psSyncRecordAddOUT_UI8, 0);
+
+ SYNC_RECORD_HANDLE pshRecordInt = NULL;
+ IMG_HANDLE hhServerSyncPrimBlock = psSyncRecordAddIN->hhServerSyncPrimBlock;
+ SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt = NULL;
+ IMG_CHAR *uiClassNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+
+ IMG_UINT32 ui32BufferSize = 0;
+ IMG_UINT64 ui64BufferSize =
+ ((IMG_UINT64) psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0;
+
+ if (unlikely(psSyncRecordAddIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH))
+ {
+ psSyncRecordAddOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG;
+ goto SyncRecordAdd_exit;
+ }
+
+ if (ui64BufferSize > IMG_UINT32_MAX)
+ {
+ psSyncRecordAddOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL;
+ goto SyncRecordAdd_exit;
+ }
+
+ ui32BufferSize = (IMG_UINT32) ui64BufferSize;
+
+ if (ui32BufferSize != 0)
+ {
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset =
+ PVR_ALIGN(sizeof(*psSyncRecordAddIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize =
+ ui32InBufferOffset >=
+ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncRecordAddIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if (!pArrayArgsBuffer)
+ {
+ psSyncRecordAddOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncRecordAdd_exit;
+ }
+ }
+ }
+
+ if (psSyncRecordAddIN->ui32ClassNameSize != 0)
+ {
+ uiClassNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset);
+ ui32NextOffset += psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if (OSCopyFromUser
+ (NULL, uiClassNameInt, (const void __user *)psSyncRecordAddIN->puiClassName,
+ psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK)
+ {
+ psSyncRecordAddOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncRecordAdd_exit;
+ }
+ ((IMG_CHAR *)
+ uiClassNameInt)[(psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) - 1] =
+ '\0';
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Look up the address from the handle */
+ psSyncRecordAddOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **)&pshServerSyncPrimBlockInt,
+ hhServerSyncPrimBlock,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE);
+ if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto SyncRecordAdd_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ psSyncRecordAddOUT->eError =
+ PVRSRVSyncRecordAddKM(psConnection, OSGetDevNode(psConnection),
+ &pshRecordInt,
+ pshServerSyncPrimBlockInt,
+ psSyncRecordAddIN->ui32ui32FwBlockAddr,
+ psSyncRecordAddIN->ui32ui32SyncOffset,
+ psSyncRecordAddIN->bbServerSync,
+ psSyncRecordAddIN->ui32ClassNameSize, uiClassNameInt);
+ /* Exit early if bridged call fails */
+ if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK))
+ {
+ goto SyncRecordAdd_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle(psConnection->psHandleBase);
+
+ psSyncRecordAddOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+ &psSyncRecordAddOUT->hhRecord,
+ (void *)pshRecordInt,
+ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE,
+ (PFN_HANDLE_RELEASE) &
+ _SyncRecordAddpshRecordIntRelease);
+ if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK))
+ {
+ UnlockHandle(psConnection->psHandleBase);
+ goto SyncRecordAdd_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+SyncRecordAdd_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle(psConnection->psHandleBase);
+
+ /* Unreference the previously looked up handle */
+ if (pshServerSyncPrimBlockInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hhServerSyncPrimBlock,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle(psConnection->psHandleBase);
+
+ if (psSyncRecordAddOUT->eError != PVRSRV_OK)
+ {
+ if (pshRecordInt)
+ {
+ PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ if (psSyncRecordAddOUT->eError == PVRSRV_OK)
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+#endif /* PVRSRV_NEED_PVR_ASSERT */
+
+ if (!bHaveEnoughSpace && pArrayArgsBuffer)
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ return 0;
+}
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void);
+void DeinitSYNCTRACKINGBridge(void);
+
+/*
+ * Register all SYNCTRACKING functions with services
+ */
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING,
+ PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE,
+ PVRSRVBridgeSyncRecordRemoveByHandle, NULL);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD,
+ PVRSRVBridgeSyncRecordAdd, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all synctracking functions with services
+ */
+void DeinitSYNCTRACKINGBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING,
+ PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING,
+ PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD);
+
+}
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 36.V.52.182 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_36_V_52_182_H
+#define RGXCONFIG_KM_36_V_52_182_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 36
+#define RGX_BNC_KM_N 52
+#define RGX_BNC_KM_C 182
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_FBCDC (50U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (50U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (1U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (2U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH (19U)
+#define RGX_FEATURE_XPU_MAX_SLAVES (3U)
+#define RGX_FEATURE_XPU_REGISTER_BROADCAST (1U)
+
+#endif /* RGXCONFIG_KM_36_V_52_182_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 36.V.54.280 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_36_V_54_280_H
+#define RGXCONFIG_KM_36_V_54_280_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 36
+#define RGX_BNC_KM_N 54
+#define RGX_BNC_KM_C 280
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U)
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_FBCDC (50U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (50U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_IRQ_PER_OS
+#define RGX_FEATURE_LAYOUT_MARS (1U)
+#define RGX_FEATURE_MIPS
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_RASTER_PIPES (1U)
+#define RGX_FEATURE_PBE2_IN_XE
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PHYS_BUS_WIDTH (36U)
+#define RGX_FEATURE_ROGUEXE
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U)
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U)
+#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U)
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TFBC_DELTA_CORRELATION
+#define RGX_FEATURE_TFBC_LOSSY_37_PERCENT
+#define RGX_FEATURE_TFBC_NATIVE_YUV10
+#define RGX_FEATURE_TILE_SIZE_X (16U)
+#define RGX_FEATURE_TILE_SIZE_Y (16U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_XE_ARCHITECTURE (1U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY
+#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH (19U)
+#define RGX_FEATURE_XPU_MAX_SLAVES (3U)
+#define RGX_FEATURE_XPU_REGISTER_BROADCAST (1U)
+
+#endif /* RGXCONFIG_KM_36_V_54_280_H */
#ifndef RGXCORE_KM_1_39_4_19_H
#define RGXCORE_KM_1_39_4_19_H
-/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @2784771 */
/******************************************************************************
#ifndef RGXCORE_KM_1_75_2_30_H
#define RGXCORE_KM_1_75_2_30_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @2309075 */
/******************************************************************************
#ifndef RGXCORE_KM_1_82_4_5_H
#define RGXCORE_KM_1_82_4_5_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @2503111 */
/******************************************************************************
#ifndef RGXCORE_KM_15_5_1_64_H
#define RGXCORE_KM_15_5_1_64_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @3846532 */
/******************************************************************************
#ifndef RGXCORE_KM_22_102_54_38_H
#define RGXCORE_KM_22_102_54_38_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @4831550 */
/******************************************************************************
#define FIX_HW_BRN_63553
#define FIX_HW_BRN_64502
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_22_104_208_318_H
#define RGXCORE_KM_22_104_208_318_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5124208 */
/******************************************************************************
#define FIX_HW_BRN_63553
#define FIX_HW_BRN_64502
#define FIX_HW_BRN_65101
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_22_105_208_318_H
#define RGXCORE_KM_22_105_208_318_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5324554 */
/******************************************************************************
#define FIX_HW_BRN_63553
#define FIX_HW_BRN_64502
#define FIX_HW_BRN_65101
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_22_30_54_25_H
#define RGXCORE_KM_22_30_54_25_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @4086500 */
/******************************************************************************
#define FIX_HW_BRN_63553
#define FIX_HW_BRN_64502
#define FIX_HW_BRN_65273
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_22_40_54_30_H
#define RGXCORE_KM_22_40_54_30_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @4094817 */
/******************************************************************************
#define FIX_HW_BRN_63553
#define FIX_HW_BRN_64502
#define FIX_HW_BRN_65273
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_22_46_54_330_H
#define RGXCORE_KM_22_46_54_330_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @4136505 */
/******************************************************************************
#define FIX_HW_BRN_64502
#define FIX_HW_BRN_65101
#define FIX_HW_BRN_65273
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_22_49_21_16_H
#define RGXCORE_KM_22_49_21_16_H
-/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @4158766 */
/******************************************************************************
#define FIX_HW_BRN_63553
#define FIX_HW_BRN_64502
#define FIX_HW_BRN_65273
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_22_67_54_30_H
#define RGXCORE_KM_22_67_54_30_H
-/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @4339986 */
/******************************************************************************
#define FIX_HW_BRN_63553
#define FIX_HW_BRN_64502
#define FIX_HW_BRN_65273
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_22_68_54_30_H
#define RGXCORE_KM_22_68_54_30_H
-/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @4339984 */
/******************************************************************************
#define FIX_HW_BRN_63553
#define FIX_HW_BRN_64502
#define FIX_HW_BRN_65273
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_22_86_104_218_H
#define RGXCORE_KM_22_86_104_218_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @4665024 */
/******************************************************************************
#define FIX_HW_BRN_63553
#define FIX_HW_BRN_64502
#define FIX_HW_BRN_65101
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_22_87_104_18_H
#define RGXCORE_KM_22_87_104_18_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @4658768 */
/******************************************************************************
#define FIX_HW_BRN_63553
#define FIX_HW_BRN_64502
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_24_50_208_504_H
#define RGXCORE_KM_24_50_208_504_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5086680 */
/******************************************************************************
*****************************************************************************/
#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_24_56_208_505_H
#define RGXCORE_KM_24_56_208_505_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5203837 */
/******************************************************************************
*****************************************************************************/
#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_24_66_54_204_H
#define RGXCORE_KM_24_66_54_204_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5200207 */
/******************************************************************************
*****************************************************************************/
#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_24_67_104_504_H
#define RGXCORE_KM_24_67_104_504_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5203838 */
/******************************************************************************
*****************************************************************************/
#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_29_14_108_208_H
#define RGXCORE_KM_29_14_108_208_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5682134 */
/******************************************************************************
#define FIX_HW_BRN_63553
#define FIX_HW_BRN_68186
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_29_19_52_202_H
#define RGXCORE_KM_29_19_52_202_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5691248 */
/******************************************************************************
#define FIX_HW_BRN_63553
#define FIX_HW_BRN_68186
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_33_15_11_3_H
#define RGXCORE_KM_33_15_11_3_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5820045 */
/******************************************************************************
*****************************************************************************/
#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_33_8_22_1_H
#define RGXCORE_KM_33_8_22_1_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5544349 */
/******************************************************************************
*****************************************************************************/
#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 36.29.52.182
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_36_29_52_182_H
+#define RGXCORE_KM_36_29_52_182_H
+
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
+/* CS: @5908879 */
+
+/******************************************************************************
+ * BVNC = 36.29.52.182
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 36
+#define RGX_BVNC_KM_V 29
+#define RGX_BVNC_KM_N 52
+#define RGX_BVNC_KM_C 182
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_47025
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_36_29_52_182_H */
#ifndef RGXCORE_KM_36_50_54_182_H
#define RGXCORE_KM_36_50_54_182_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5849603 */
/******************************************************************************
*****************************************************************************/
#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_36_52_104_182_H
#define RGXCORE_KM_36_52_104_182_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5849605 */
/******************************************************************************
*****************************************************************************/
#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_36_53_104_796_H
#define RGXCORE_KM_36_53_104_796_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5896094 */
/******************************************************************************
* Errata
*****************************************************************************/
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_36_54_54_183_H
#define RGXCORE_KM_36_54_54_183_H
-/* Automatically generated file (04/10/2021 09:01:51): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5908021 */
/******************************************************************************
*****************************************************************************/
#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_36_55_54_103_H
#define RGXCORE_KM_36_55_54_103_H
-/* Automatically generated file (04/10/2021 09:01:51): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5908021 */
/******************************************************************************
*****************************************************************************/
#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
#ifndef RGXCORE_KM_36_56_104_183_H
#define RGXCORE_KM_36_56_104_183_H
-/* Automatically generated file (04/10/2021 09:01:51): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @5942195 */
/******************************************************************************
*****************************************************************************/
#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 36.60.54.280
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_36_60_54_280_H
+#define RGXCORE_KM_36_60_54_280_H
+
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
+/* CS: @6040409 */
+
+/******************************************************************************
+ * BVNC = 36.60.54.280
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 36
+#define RGX_BVNC_KM_V 60
+#define RGX_BVNC_KM_N 54
+#define RGX_BVNC_KM_C 280
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_63553
+#define FIX_HW_BRN_71317
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_42290
+#define HW_ERN_42606
+#define HW_ERN_47025
+#define HW_ERN_57596
+
+
+
+#endif /* RGXCORE_KM_36_60_54_280_H */
#ifndef RGXCORE_KM_4_31_4_55_H
#define RGXCORE_KM_4_31_4_55_H
-/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @2919104 */
/******************************************************************************
#ifndef RGXCORE_KM_4_40_2_51_H
#define RGXCORE_KM_4_40_2_51_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @3254374 */
/******************************************************************************
#ifndef RGXCORE_KM_4_45_2_58_H
#define RGXCORE_KM_4_45_2_58_H
-/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @3547765 */
/******************************************************************************
#ifndef RGXCORE_KM_4_46_6_62_H
#define RGXCORE_KM_4_46_6_62_H
-/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @4015666 */
/******************************************************************************
#ifndef RGXCORE_KM_5_9_1_46_H
#define RGXCORE_KM_5_9_1_46_H
-/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @2967148 */
/******************************************************************************
#ifndef RGXCORE_KM_6_34_4_35_H
#define RGXCORE_KM_6_34_4_35_H
-/* Automatically generated file (04/10/2021 09:01:49): Do not edit manually */
+/* Automatically generated file (21/01/2022 09:01:15): Do not edit manually */
/* CS: @3533654 */
/******************************************************************************
#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS (51U)
#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK (IMG_UINT64_C(0x0008000000000000))
-#define RGX_FEATURE_WATCHDOG_TIMER_POS (52U)
-#define RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK (IMG_UINT64_C(0x0010000000000000))
+#define RGX_FEATURE_VOLCANIC_TB_POS (52U)
+#define RGX_FEATURE_VOLCANIC_TB_BIT_MASK (IMG_UINT64_C(0x0010000000000000))
-#define RGX_FEATURE_WORKGROUP_PROTECTION_POS (53U)
-#define RGX_FEATURE_WORKGROUP_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0020000000000000))
+#define RGX_FEATURE_WATCHDOG_TIMER_POS (53U)
+#define RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK (IMG_UINT64_C(0x0020000000000000))
-#define RGX_FEATURE_XE_MEMORY_HIERARCHY_POS (54U)
-#define RGX_FEATURE_XE_MEMORY_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0040000000000000))
+#define RGX_FEATURE_WORKGROUP_PROTECTION_POS (54U)
+#define RGX_FEATURE_WORKGROUP_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0040000000000000))
-#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_POS (55U)
-#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0080000000000000))
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY_POS (55U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0080000000000000))
+
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_POS (56U)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0100000000000000))
/******************************************************************************
* for handling the corresponding values
*****************************************************************************/
-#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (2)
-#define RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX (3)
#define RGX_FEATURE_FBCDC_MAX_VALUE_IDX (4)
#define RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX (6)
-#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (4)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (5)
#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX (2)
#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX (2)
#define RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX (3)
-#define RGX_FEATURE_META_MAX_VALUE_IDX (4)
-#define RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX (1)
-#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (3)
-#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX (1)
+#define RGX_FEATURE_META_MAX_VALUE_IDX (5)
+#define RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (4)
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX (2)
#define RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX (5)
#define RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX (9)
+#define RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX (2)
#define RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX (3)
-#define RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX (4)
#define RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX (4)
-#define RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX (1)
-#define RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX (1)
+#define RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX (2)
#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX (3)
#define RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX (4)
#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX (2)
-#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (6)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (7)
#define RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX (3)
#define RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX (3)
#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX (2)
#define RGX_FEATURE_XE_ARCHITECTURE_MAX_VALUE_IDX (2)
#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX (2)
-#define RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX (2)
-#define RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX (3)
/******************************************************************************
* Features with values indexes
RGX_FEATURE_META_DMA_CHANNEL_COUNT_IDX,
RGX_FEATURE_NUM_CLUSTERS_IDX,
RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX,
+ RGX_FEATURE_NUM_MEMBUS_IDX,
RGX_FEATURE_NUM_OSIDS_IDX,
RGX_FEATURE_NUM_RASTER_PIPES_IDX,
RGX_FEATURE_PHYS_BUS_WIDTH_IDX,
#define HW_ERN_66622_POS (17U)
#define HW_ERN_66622_BIT_MASK (IMG_UINT64_C(0x0000000000020000))
-#define FIX_HW_BRN_68186_POS (18U)
-#define FIX_HW_BRN_68186_BIT_MASK (IMG_UINT64_C(0x0000000000040000))
+#define FIX_HW_BRN_66927_POS (18U)
+#define FIX_HW_BRN_66927_BIT_MASK (IMG_UINT64_C(0x0000000000040000))
+
+#define FIX_HW_BRN_68186_POS (19U)
+#define FIX_HW_BRN_68186_BIT_MASK (IMG_UINT64_C(0x0000000000080000))
+
+#define FIX_HW_BRN_71317_POS (20U)
+#define FIX_HW_BRN_71317_BIT_MASK (IMG_UINT64_C(0x0000000000100000))
/* Macro used for padding the unavailable values for features with values */
#define RGX_FEATURE_VALUE_INVALID (0xFFFFFFFEU)
* for handling the corresponding values
*****************************************************************************/
-static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, };
+static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, };
-static const IMG_UINT16 aui16_RGX_FEATURE_ECC_RAMS_values[RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, };
+static const IMG_UINT16 aui16_RGX_FEATURE_ECC_RAMS_values[RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 2, };
static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_values[RGX_FEATURE_FBCDC_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, 50, };
static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ALGORITHM_values[RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 50, };
-static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 7, };
+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 7, };
static const IMG_UINT16 aui16_RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_values[RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, };
static const IMG_UINT16 aui16_RGX_FEATURE_LAYOUT_MARS_values[RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, };
-static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, LTP217, LTP218, MTP218, };
+static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, LTP217, LTP218, MTP218, MTP219, };
-static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, };
+static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, };
-static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 32, };
+static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 32, 256, };
-static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, };
+static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, };
static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, };
static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 6, 7, 8, 12, };
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_MEMBUS_values[RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, };
+
static const IMG_UINT16 aui16_RGX_FEATURE_NUM_OSIDS_values[RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 8, };
-static const IMG_UINT16 aui16_RGX_FEATURE_NUM_RASTER_PIPES_values[RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, };
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_RASTER_PIPES_values[RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, 2, };
static const IMG_UINT16 aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, 36, 40, };
-static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, };
+static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, };
-static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, };
+static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, };
static const IMG_UINT16 aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values[RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, };
static const IMG_UINT16 aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 512, };
-static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 8, 16, 64, 128, };
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 8, 16, 64, 128, 512, };
static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_X_values[RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16, 32, };
static const IMG_UINT16 aui16_RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_values[RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 19, };
-static const IMG_UINT16 aui16_RGX_FEATURE_XPU_MAX_SLAVES_values[RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, };
+static const IMG_UINT16 aui16_RGX_FEATURE_XPU_MAX_SLAVES_values[RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 3, };
-static const IMG_UINT16 aui16_RGX_FEATURE_XPU_REGISTER_BROADCAST_values[RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, };
+static const IMG_UINT16 aui16_RGX_FEATURE_XPU_REGISTER_BROADCAST_values[RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, };
/******************************************************************************
* Indexed using enum RGX_FEATURE_WITH_VALUE_INDEX from rgx_bvnc_defs_km.h
*****************************************************************************/
-static const IMG_UINT16 * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = {
+static const void * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = {
aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values,
aui16_RGX_FEATURE_ECC_RAMS_values,
aui16_RGX_FEATURE_FBCDC_values,
aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values,
aui16_RGX_FEATURE_NUM_CLUSTERS_values,
aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values,
+ aui16_RGX_FEATURE_NUM_MEMBUS_values,
aui16_RGX_FEATURE_NUM_OSIDS_values,
aui16_RGX_FEATURE_NUM_RASTER_PIPES_values,
aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values,
RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX,
RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX,
RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX,
+ RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX,
RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX,
RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX,
RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX,
RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX,
};
+#define RGX_FEATURE_VALUE_TYPE_UINT16 (0x0000U)
+#define RGX_FEATURE_VALUE_TYPE_UINT32 (0x8000U)
+#define RGX_FEATURE_TYPE_BIT_SHIFT 14
/******************************************************************************
* Bit-positions for features with values
*****************************************************************************/
static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = {
- (0U), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */
- (2U), /* RGX_FEATURE_ECC_RAMS_POS */
- (4U), /* RGX_FEATURE_FBCDC_POS */
- (7U), /* RGX_FEATURE_FBCDC_ALGORITHM_POS */
- (10U), /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */
- (13U), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_POS */
- (15U), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_POS */
- (17U), /* RGX_FEATURE_LAYOUT_MARS_POS */
- (19U), /* RGX_FEATURE_META_POS */
- (22U), /* RGX_FEATURE_META_COREMEM_BANKS_POS */
- (23U), /* RGX_FEATURE_META_COREMEM_SIZE_POS */
- (25U), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */
- (26U), /* RGX_FEATURE_NUM_CLUSTERS_POS */
- (29U), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */
- (33U), /* RGX_FEATURE_NUM_OSIDS_POS */
- (35U), /* RGX_FEATURE_NUM_RASTER_PIPES_POS */
- (37U), /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */
- (40U), /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */
- (41U), /* RGX_FEATURE_SCALABLE_VCE_POS */
- (42U), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_POS */
- (44U), /* RGX_FEATURE_SLC_BANKS_POS */
- (47U), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */
- (49U), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */
- (52U), /* RGX_FEATURE_TILE_SIZE_X_POS */
- (54U), /* RGX_FEATURE_TILE_SIZE_Y_POS */
- (56U), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */
- (58U), /* RGX_FEATURE_XE_ARCHITECTURE_POS */
- (60U), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_POS */
- (62U), /* RGX_FEATURE_XPU_MAX_SLAVES_POS */
- (64U), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_POS */
+ (0U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */
+ (2U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_ECC_RAMS_POS */
+ (4U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_POS */
+ (7U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_ALGORITHM_POS */
+ (10U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */
+ (13U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_POS */
+ (15U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_POS */
+ (17U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_LAYOUT_MARS_POS */
+ (19U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_POS */
+ (22U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_COREMEM_BANKS_POS */
+ (24U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_COREMEM_SIZE_POS */
+ (27U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */
+ (29U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_CLUSTERS_POS */
+ (32U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */
+ (36U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_MEMBUS_POS */
+ (38U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_OSIDS_POS */
+ (40U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_RASTER_PIPES_POS */
+ (43U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */
+ (46U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */
+ (48U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SCALABLE_VCE_POS */
+ (50U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_POS */
+ (52U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_BANKS_POS */
+ (55U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */
+ (57U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */
+ (60U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TILE_SIZE_X_POS */
+ (62U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TILE_SIZE_Y_POS */
+ (64U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */
+ (66U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_XE_ARCHITECTURE_POS */
+ (68U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_POS */
+ (70U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_XPU_MAX_SLAVES_POS */
+ (72U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_XPU_REGISTER_BROADCAST_POS */
};
(IMG_UINT64_C(0x0000000000018000)), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_BIT_MASK */
(IMG_UINT64_C(0x0000000000060000)), /* RGX_FEATURE_LAYOUT_MARS_BIT_MASK */
(IMG_UINT64_C(0x0000000000380000)), /* RGX_FEATURE_META_BIT_MASK */
- (IMG_UINT64_C(0x0000000000400000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */
- (IMG_UINT64_C(0x0000000001800000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */
- (IMG_UINT64_C(0x0000000002000000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */
- (IMG_UINT64_C(0x000000001C000000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */
- (IMG_UINT64_C(0x00000001E0000000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */
- (IMG_UINT64_C(0x0000000600000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */
- (IMG_UINT64_C(0x0000001800000000)), /* RGX_FEATURE_NUM_RASTER_PIPES_BIT_MASK */
- (IMG_UINT64_C(0x000000E000000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */
- (IMG_UINT64_C(0x0000010000000000)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */
- (IMG_UINT64_C(0x0000020000000000)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */
- (IMG_UINT64_C(0x00000C0000000000)), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_BIT_MASK */
- (IMG_UINT64_C(0x0000700000000000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */
- (IMG_UINT64_C(0x0001800000000000)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */
- (IMG_UINT64_C(0x000E000000000000)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */
- (IMG_UINT64_C(0x0030000000000000)), /* RGX_FEATURE_TILE_SIZE_X_BIT_MASK */
- (IMG_UINT64_C(0x00C0000000000000)), /* RGX_FEATURE_TILE_SIZE_Y_BIT_MASK */
- (IMG_UINT64_C(0x0300000000000000)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */
- (IMG_UINT64_C(0x0C00000000000000)), /* RGX_FEATURE_XE_ARCHITECTURE_BIT_MASK */
- (IMG_UINT64_C(0x3000000000000000)), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_BIT_MASK */
- (IMG_UINT64_C(0xC000000000000000)), /* RGX_FEATURE_XPU_MAX_SLAVES_BIT_MASK */
- (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000C00000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */
+ (IMG_UINT64_C(0x0000000007000000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */
+ (IMG_UINT64_C(0x0000000018000000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */
+ (IMG_UINT64_C(0x00000000E0000000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */
+ (IMG_UINT64_C(0x0000000F00000000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */
+ (IMG_UINT64_C(0x0000003000000000)), /* RGX_FEATURE_NUM_MEMBUS_BIT_MASK */
+ (IMG_UINT64_C(0x000000C000000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */
+ (IMG_UINT64_C(0x0000070000000000)), /* RGX_FEATURE_NUM_RASTER_PIPES_BIT_MASK */
+ (IMG_UINT64_C(0x0000380000000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */
+ (IMG_UINT64_C(0x0000C00000000000)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */
+ (IMG_UINT64_C(0x0003000000000000)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */
+ (IMG_UINT64_C(0x000C000000000000)), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_BIT_MASK */
+ (IMG_UINT64_C(0x0070000000000000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */
+ (IMG_UINT64_C(0x0180000000000000)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */
+ (IMG_UINT64_C(0x0E00000000000000)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */
+ (IMG_UINT64_C(0x3000000000000000)), /* RGX_FEATURE_TILE_SIZE_X_BIT_MASK */
+ (IMG_UINT64_C(0xC000000000000000)), /* RGX_FEATURE_TILE_SIZE_Y_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */
+ (IMG_UINT64_C(0x000000000000000C)), /* RGX_FEATURE_XE_ARCHITECTURE_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000000030)), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_BIT_MASK */
+ (IMG_UINT64_C(0x00000000000000C0)), /* RGX_FEATURE_XPU_MAX_SLAVES_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000000300)), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_BIT_MASK */
};
static const IMG_UINT64 gaFeatures[][4]=
{
- { IMG_UINT64_C(0x000100000002001e), IMG_UINT64_C(0x0000400000402025), IMG_UINT64_C(0x01aa8068689aa481), IMG_UINT64_C(0x0000000000000000) }, /* 1.0.2.30 */
- { IMG_UINT64_C(0x0001000000040005), IMG_UINT64_C(0x0000400000402024), IMG_UINT64_C(0x01aa80686c9aa481), IMG_UINT64_C(0x0000000000000000) }, /* 1.0.4.5 */
- { IMG_UINT64_C(0x0001000000040013), IMG_UINT64_C(0x0000400000402025), IMG_UINT64_C(0x01aa80686c9aa481), IMG_UINT64_C(0x0000000000000000) }, /* 1.0.4.19 */
- { IMG_UINT64_C(0x0004000000020033), IMG_UINT64_C(0x0082c04000c0222f), IMG_UINT64_C(0x01aa8068e912a901), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.2.51 */
- { IMG_UINT64_C(0x000400000002003a), IMG_UINT64_C(0x0082c04000c0322f), IMG_UINT64_C(0x01aa806ce912a901), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.2.58 */
- { IMG_UINT64_C(0x0004000000040037), IMG_UINT64_C(0x0082c04000c0222e), IMG_UINT64_C(0x01aa8068ed12a901), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.4.55 */
- { IMG_UINT64_C(0x000400000006003e), IMG_UINT64_C(0x0082c04000c0322f), IMG_UINT64_C(0x01aab074f112a901), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.6.62 */
- { IMG_UINT64_C(0x000500000001002e), IMG_UINT64_C(0x0000004004402205), IMG_UINT64_C(0x05a69068248aa501), IMG_UINT64_C(0x0000000000000000) }, /* 5.0.1.46 */
- { IMG_UINT64_C(0x0006000000040023), IMG_UINT64_C(0x0082c04000c0222f), IMG_UINT64_C(0x01aa8068ed12a901), IMG_UINT64_C(0x0000000000000000) }, /* 6.0.4.35 */
- { IMG_UINT64_C(0x000f000000010040), IMG_UINT64_C(0x0000004004403205), IMG_UINT64_C(0x05a8906c448aa501), IMG_UINT64_C(0x0000000000000000) }, /* 15.0.1.64 */
- { IMG_UINT64_C(0x0016000000150010), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0554942c44020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.21.16 */
- { IMG_UINT64_C(0x0016000000360019), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558942c64020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.54.25 */
- { IMG_UINT64_C(0x001600000036001e), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558942c84020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.54.30 */
- { IMG_UINT64_C(0x0016000000360026), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558944c84020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.54.38 */
- { IMG_UINT64_C(0x001600000036014a), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558942c8402a591), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.54.330 */
- { IMG_UINT64_C(0x0016000000680012), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558944cc4020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.104.18 */
- { IMG_UINT64_C(0x00160000006800da), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558944cc402a591), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.104.218 */
- { IMG_UINT64_C(0x0016000000d0013e), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558a4550802a591), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.208.318 */
- { IMG_UINT64_C(0x00180000003600cc), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x0558984c8402a591), IMG_UINT64_C(0x0000000000000000) }, /* 24.0.54.204 */
- { IMG_UINT64_C(0x00180000006801f8), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x0558984ca402a591), IMG_UINT64_C(0x0000000000000000) }, /* 24.0.104.504 */
- { IMG_UINT64_C(0x0018000000d001f8), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x055aa8550802a591), IMG_UINT64_C(0x0000000000000000) }, /* 24.0.208.504 */
- { IMG_UINT64_C(0x0018000000d001f9), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x055aa8550802a591), IMG_UINT64_C(0x0000000000000000) }, /* 24.0.208.505 */
- { IMG_UINT64_C(0x001d0000003400ca), IMG_UINT64_C(0x004181c2844f74a5), IMG_UINT64_C(0x0556984c4402a621), IMG_UINT64_C(0x0000000000000000) }, /* 29.0.52.202 */
- { IMG_UINT64_C(0x001d0000006c00d0), IMG_UINT64_C(0x004181c2844f74a5), IMG_UINT64_C(0x055aa854e802a621), IMG_UINT64_C(0x0000000000000000) }, /* 29.0.108.208 */
- { IMG_UINT64_C(0x00210000000b0003), IMG_UINT64_C(0x00400092844b5085), IMG_UINT64_C(0x0552984a24020001), IMG_UINT64_C(0x0000000000000000) }, /* 33.0.11.3 */
- { IMG_UINT64_C(0x0021000000160001), IMG_UINT64_C(0x004180c2854b70a5), IMG_UINT64_C(0x0556984c44020001), IMG_UINT64_C(0x0000000000000000) }, /* 33.0.22.1 */
- { IMG_UINT64_C(0x0024000000360067), IMG_UINT64_C(0x004180d2844b38a5), IMG_UINT64_C(0x0556984c8402aeb1), IMG_UINT64_C(0x0000000000000000) }, /* 36.0.54.103 */
- { IMG_UINT64_C(0x00240000003600b6), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984c8404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.54.182 */
- { IMG_UINT64_C(0x00240000003600b7), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984c8404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.54.183 */
- { IMG_UINT64_C(0x00240000006800b6), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984ca404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.104.182 */
- { IMG_UINT64_C(0x00240000006800b7), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984ca404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.104.183 */
- { IMG_UINT64_C(0x002400000068031c), IMG_UINT64_C(0x0071a0d2864a78a5), IMG_UINT64_C(0x5556984ca404aeb5), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.104.796 */
+ { IMG_UINT64_C(0x000100000002001e), IMG_UINT64_C(0x0000400000402025), IMG_UINT64_C(0xaa801a03411aa481), IMG_UINT64_C(0x0000000000000001) }, /* 1.0.2.30 */
+ { IMG_UINT64_C(0x0001000000040005), IMG_UINT64_C(0x0000400000402024), IMG_UINT64_C(0xaa801a03611aa481), IMG_UINT64_C(0x0000000000000001) }, /* 1.0.4.5 */
+ { IMG_UINT64_C(0x0001000000040013), IMG_UINT64_C(0x0000400000402025), IMG_UINT64_C(0xaa801a03611aa481), IMG_UINT64_C(0x0000000000000001) }, /* 1.0.4.19 */
+ { IMG_UINT64_C(0x0004000000020033), IMG_UINT64_C(0x0102c04000c0222f), IMG_UINT64_C(0xaa801a074212a901), IMG_UINT64_C(0x0000000000000001) }, /* 4.0.2.51 */
+ { IMG_UINT64_C(0x000400000002003a), IMG_UINT64_C(0x0102c04000c0322f), IMG_UINT64_C(0xaa801a874212a901), IMG_UINT64_C(0x0000000000000001) }, /* 4.0.2.58 */
+ { IMG_UINT64_C(0x0004000000040037), IMG_UINT64_C(0x0102c04000c0222e), IMG_UINT64_C(0xaa801a076212a901), IMG_UINT64_C(0x0000000000000001) }, /* 4.0.4.55 */
+ { IMG_UINT64_C(0x000400000006003e), IMG_UINT64_C(0x0102c04000c0322f), IMG_UINT64_C(0xaab01b878212a901), IMG_UINT64_C(0x0000000000000001) }, /* 4.0.6.62 */
+ { IMG_UINT64_C(0x000500000001002e), IMG_UINT64_C(0x0000004004402205), IMG_UINT64_C(0xa6901a01210aa501), IMG_UINT64_C(0x0000000000000005) }, /* 5.0.1.46 */
+ { IMG_UINT64_C(0x0006000000040023), IMG_UINT64_C(0x0102c04000c0222f), IMG_UINT64_C(0xaa801a076212a901), IMG_UINT64_C(0x0000000000000001) }, /* 6.0.4.35 */
+ { IMG_UINT64_C(0x000f000000010040), IMG_UINT64_C(0x0000004004403205), IMG_UINT64_C(0xa8901a82210aa501), IMG_UINT64_C(0x0000000000000005) }, /* 15.0.1.64 */
+ { IMG_UINT64_C(0x0016000000150010), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x54940a8220020001), IMG_UINT64_C(0x0000000000000005) }, /* 22.0.21.16 */
+ { IMG_UINT64_C(0x0016000000360019), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x58940a8320020001), IMG_UINT64_C(0x0000000000000005) }, /* 22.0.54.25 */
+ { IMG_UINT64_C(0x001600000036001e), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x58940a8420020001), IMG_UINT64_C(0x0000000000000005) }, /* 22.0.54.30 */
+ { IMG_UINT64_C(0x0016000000360026), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x5894128420020001), IMG_UINT64_C(0x0000000000000005) }, /* 22.0.54.38 */
+ { IMG_UINT64_C(0x001600000036014a), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x58940a842002a591), IMG_UINT64_C(0x0000000000000005) }, /* 22.0.54.330 */
+ { IMG_UINT64_C(0x0016000000680012), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x5894128620020001), IMG_UINT64_C(0x0000000000000005) }, /* 22.0.104.18 */
+ { IMG_UINT64_C(0x00160000006800da), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x589412862002a591), IMG_UINT64_C(0x0000000000000005) }, /* 22.0.104.218 */
+ { IMG_UINT64_C(0x0016000000d0013e), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x58a413884002a591), IMG_UINT64_C(0x0000000000000005) }, /* 22.0.208.318 */
+ { IMG_UINT64_C(0x00180000003600cc), IMG_UINT64_C(0x008001c2844f7425), IMG_UINT64_C(0x589812842002a591), IMG_UINT64_C(0x0000000000000005) }, /* 24.0.54.204 */
+ { IMG_UINT64_C(0x00180000006801f8), IMG_UINT64_C(0x008001c2844f7425), IMG_UINT64_C(0x589812852002a591), IMG_UINT64_C(0x0000000000000005) }, /* 24.0.104.504 */
+ { IMG_UINT64_C(0x0018000000d001f8), IMG_UINT64_C(0x008001c2844f7425), IMG_UINT64_C(0x5aa813884002a591), IMG_UINT64_C(0x0000000000000005) }, /* 24.0.208.504 */
+ { IMG_UINT64_C(0x0018000000d001f9), IMG_UINT64_C(0x008001c2844f7425), IMG_UINT64_C(0x5aa813884002a591), IMG_UINT64_C(0x0000000000000005) }, /* 24.0.208.505 */
+ { IMG_UINT64_C(0x001d0000003400ca), IMG_UINT64_C(0x008181c2844f74a5), IMG_UINT64_C(0x569812822002a621), IMG_UINT64_C(0x0000000000000005) }, /* 29.0.52.202 */
+ { IMG_UINT64_C(0x001d0000006c00d0), IMG_UINT64_C(0x008181c2844f74a5), IMG_UINT64_C(0x5aa813874002a621), IMG_UINT64_C(0x0000000000000005) }, /* 29.0.108.208 */
+ { IMG_UINT64_C(0x00210000000b0003), IMG_UINT64_C(0x00800092844b5085), IMG_UINT64_C(0x5298124120020001), IMG_UINT64_C(0x0000000000000005) }, /* 33.0.11.3 */
+ { IMG_UINT64_C(0x0021000000160001), IMG_UINT64_C(0x008180c2854b70a5), IMG_UINT64_C(0x5698128220020001), IMG_UINT64_C(0x0000000000000005) }, /* 33.0.22.1 */
+ { IMG_UINT64_C(0x00240000003400b6), IMG_UINT64_C(0x008000d2844b78a5), IMG_UINT64_C(0x569812822004b2b1), IMG_UINT64_C(0x0000000000000295) }, /* 36.0.52.182 */
+ { IMG_UINT64_C(0x0024000000360067), IMG_UINT64_C(0x008180d2844b38a5), IMG_UINT64_C(0x569812842002b2b1), IMG_UINT64_C(0x0000000000000005) }, /* 36.0.54.103 */
+ { IMG_UINT64_C(0x00240000003600b6), IMG_UINT64_C(0x008180d2844b78a5), IMG_UINT64_C(0x569812842004b2b1), IMG_UINT64_C(0x0000000000000295) }, /* 36.0.54.182 */
+ { IMG_UINT64_C(0x00240000003600b7), IMG_UINT64_C(0x008180d2844b78a5), IMG_UINT64_C(0x569812842004b2b1), IMG_UINT64_C(0x0000000000000295) }, /* 36.0.54.183 */
+ { IMG_UINT64_C(0x0024000000360118), IMG_UINT64_C(0x00819cd2844b78a5), IMG_UINT64_C(0x569812842004b2b1), IMG_UINT64_C(0x0000000000000295) }, /* 36.0.54.280 */
+ { IMG_UINT64_C(0x00240000006800b6), IMG_UINT64_C(0x008180d2844b78a5), IMG_UINT64_C(0x569812852004b2b1), IMG_UINT64_C(0x0000000000000295) }, /* 36.0.104.182 */
+ { IMG_UINT64_C(0x00240000006800b7), IMG_UINT64_C(0x008180d2844b78a5), IMG_UINT64_C(0x569812852004b2b1), IMG_UINT64_C(0x0000000000000295) }, /* 36.0.104.183 */
+ { IMG_UINT64_C(0x002400000068031c), IMG_UINT64_C(0x00e1a0d2864a78a5), IMG_UINT64_C(0x569812852004b2b9), IMG_UINT64_C(0x0000000000000295) }, /* 36.0.104.796 */
};
/******************************************************************************
{ IMG_UINT64_C(0x0001005200040005), IMG_UINT64_C(0x0000000000000000) }, /* 1.82.4.5 */
{ IMG_UINT64_C(0x0004001f00040037), IMG_UINT64_C(0x000000000000108a) }, /* 4.31.4.55 */
{ IMG_UINT64_C(0x0004002800020033), IMG_UINT64_C(0x000000000000108a) }, /* 4.40.2.51 */
- { IMG_UINT64_C(0x0004002b0006003e), IMG_UINT64_C(0x000000000000508a) }, /* 4.43.6.62 */
{ IMG_UINT64_C(0x0004002d0002003a), IMG_UINT64_C(0x000000000000500a) }, /* 4.45.2.58 */
{ IMG_UINT64_C(0x0004002e0006003e), IMG_UINT64_C(0x000000000000508a) }, /* 4.46.6.62 */
{ IMG_UINT64_C(0x000500090001002e), IMG_UINT64_C(0x0000000000000001) }, /* 5.9.1.46 */
{ IMG_UINT64_C(0x0006002200040023), IMG_UINT64_C(0x000000000000100a) }, /* 6.34.4.35 */
{ IMG_UINT64_C(0x000f000500010040), IMG_UINT64_C(0x0000000000004008) }, /* 15.5.1.64 */
- { IMG_UINT64_C(0x0016001e00360019), IMG_UINT64_C(0x0000000000016b08) }, /* 22.30.54.25 */
- { IMG_UINT64_C(0x001600280036001e), IMG_UINT64_C(0x0000000000016b08) }, /* 22.40.54.30 */
- { IMG_UINT64_C(0x0016002e0036014a), IMG_UINT64_C(0x000000000001ea0a) }, /* 22.46.54.330 */
- { IMG_UINT64_C(0x0016003100150010), IMG_UINT64_C(0x0000000000016b08) }, /* 22.49.21.16 */
- { IMG_UINT64_C(0x001600430036001e), IMG_UINT64_C(0x0000000000016708) }, /* 22.67.54.30 */
- { IMG_UINT64_C(0x001600440036001e), IMG_UINT64_C(0x0000000000016508) }, /* 22.68.54.30 */
- { IMG_UINT64_C(0x00160056006800da), IMG_UINT64_C(0x000000000000e408) }, /* 22.86.104.218 */
- { IMG_UINT64_C(0x0016005700680012), IMG_UINT64_C(0x0000000000006508) }, /* 22.87.104.18 */
- { IMG_UINT64_C(0x0016006600360026), IMG_UINT64_C(0x0000000000006508) }, /* 22.102.54.38 */
- { IMG_UINT64_C(0x0016006800d0013e), IMG_UINT64_C(0x000000000000e40a) }, /* 22.104.208.318 */
- { IMG_UINT64_C(0x0016006900d0013e), IMG_UINT64_C(0x000000000000e40a) }, /* 22.105.208.318 */
- { IMG_UINT64_C(0x0018003200d001f8), IMG_UINT64_C(0x000000000002210a) }, /* 24.50.208.504 */
- { IMG_UINT64_C(0x0018003800d001f9), IMG_UINT64_C(0x000000000002210a) }, /* 24.56.208.505 */
- { IMG_UINT64_C(0x00180042003600cc), IMG_UINT64_C(0x000000000002210a) }, /* 24.66.54.204 */
- { IMG_UINT64_C(0x00180043006801f8), IMG_UINT64_C(0x000000000002210a) }, /* 24.67.104.504 */
- { IMG_UINT64_C(0x001d000e006c00d0), IMG_UINT64_C(0x000000000006212a) }, /* 29.14.108.208 */
- { IMG_UINT64_C(0x001d0013003400ca), IMG_UINT64_C(0x000000000006212a) }, /* 29.19.52.202 */
- { IMG_UINT64_C(0x0021000800160001), IMG_UINT64_C(0x000000000000212a) }, /* 33.8.22.1 */
- { IMG_UINT64_C(0x0021000f000b0003), IMG_UINT64_C(0x000000000000212a) }, /* 33.15.11.3 */
- { IMG_UINT64_C(0x00240032003600b6), IMG_UINT64_C(0x000000000000212a) }, /* 36.50.54.182 */
- { IMG_UINT64_C(0x00240034006800b6), IMG_UINT64_C(0x000000000000212a) }, /* 36.52.104.182 */
- { IMG_UINT64_C(0x002400350068031c), IMG_UINT64_C(0x000000000000012a) }, /* 36.53.104.796 */
- { IMG_UINT64_C(0x00240036003600b7), IMG_UINT64_C(0x000000000000212a) }, /* 36.54.54.183 */
- { IMG_UINT64_C(0x0024003700360067), IMG_UINT64_C(0x000000000000212a) }, /* 36.55.54.103 */
- { IMG_UINT64_C(0x00240038006800b7), IMG_UINT64_C(0x000000000000212a) }, /* 36.56.104.183 */
+ { IMG_UINT64_C(0x0016001e00360019), IMG_UINT64_C(0x0000000000116b08) }, /* 22.30.54.25 */
+ { IMG_UINT64_C(0x001600280036001e), IMG_UINT64_C(0x0000000000116b08) }, /* 22.40.54.30 */
+ { IMG_UINT64_C(0x0016002e0036014a), IMG_UINT64_C(0x000000000011ea0a) }, /* 22.46.54.330 */
+ { IMG_UINT64_C(0x0016003100150010), IMG_UINT64_C(0x0000000000116b08) }, /* 22.49.21.16 */
+ { IMG_UINT64_C(0x001600430036001e), IMG_UINT64_C(0x0000000000116708) }, /* 22.67.54.30 */
+ { IMG_UINT64_C(0x001600440036001e), IMG_UINT64_C(0x0000000000116508) }, /* 22.68.54.30 */
+ { IMG_UINT64_C(0x00160056006800da), IMG_UINT64_C(0x000000000010e408) }, /* 22.86.104.218 */
+ { IMG_UINT64_C(0x0016005700680012), IMG_UINT64_C(0x0000000000106508) }, /* 22.87.104.18 */
+ { IMG_UINT64_C(0x0016006600360026), IMG_UINT64_C(0x0000000000106508) }, /* 22.102.54.38 */
+ { IMG_UINT64_C(0x0016006800d0013e), IMG_UINT64_C(0x000000000010e40a) }, /* 22.104.208.318 */
+ { IMG_UINT64_C(0x0016006900d0013e), IMG_UINT64_C(0x000000000010e40a) }, /* 22.105.208.318 */
+ { IMG_UINT64_C(0x0018003200d001f8), IMG_UINT64_C(0x000000000012210a) }, /* 24.50.208.504 */
+ { IMG_UINT64_C(0x0018003800d001f9), IMG_UINT64_C(0x000000000012210a) }, /* 24.56.208.505 */
+ { IMG_UINT64_C(0x00180042003600cc), IMG_UINT64_C(0x000000000012210a) }, /* 24.66.54.204 */
+ { IMG_UINT64_C(0x00180043006801f8), IMG_UINT64_C(0x000000000012210a) }, /* 24.67.104.504 */
+ { IMG_UINT64_C(0x001d000e006c00d0), IMG_UINT64_C(0x00000000001a212a) }, /* 29.14.108.208 */
+ { IMG_UINT64_C(0x001d0013003400ca), IMG_UINT64_C(0x00000000001a212a) }, /* 29.19.52.202 */
+ { IMG_UINT64_C(0x0021000800160001), IMG_UINT64_C(0x000000000010212a) }, /* 33.8.22.1 */
+ { IMG_UINT64_C(0x0021000f000b0003), IMG_UINT64_C(0x000000000010212a) }, /* 33.15.11.3 */
+ { IMG_UINT64_C(0x0024001d003400b6), IMG_UINT64_C(0x000000000010212a) }, /* 36.29.52.182 */
+ { IMG_UINT64_C(0x00240032003600b6), IMG_UINT64_C(0x000000000010212a) }, /* 36.50.54.182 */
+ { IMG_UINT64_C(0x00240034006800b6), IMG_UINT64_C(0x000000000010212a) }, /* 36.52.104.182 */
+ { IMG_UINT64_C(0x002400350068031c), IMG_UINT64_C(0x000000000010012a) }, /* 36.53.104.796 */
+ { IMG_UINT64_C(0x00240036003600b7), IMG_UINT64_C(0x000000000010212a) }, /* 36.54.54.183 */
+ { IMG_UINT64_C(0x0024003700360067), IMG_UINT64_C(0x000000000010212a) }, /* 36.55.54.103 */
+ { IMG_UINT64_C(0x00240038006800b7), IMG_UINT64_C(0x000000000010212a) }, /* 36.56.104.183 */
+ { IMG_UINT64_C(0x0024003c00360118), IMG_UINT64_C(0x000000000010212a) }, /* 36.60.54.280 */
};
#if defined(DEBUG)
-#define FEATURE_NO_VALUES_NAMES_MAX_IDX (56)
+#define FEATURE_NO_VALUES_NAMES_MAX_IDX (57)
static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_MAX_IDX] =
{
"TPU_FILTERING_MODE_CONTROL",
"VDM_DRAWINDIRECT",
"VDM_OBJECT_LEVEL_LLS",
+ "VOLCANIC_TB",
"WATCHDOG_TIMER",
"WORKGROUP_PROTECTION",
"XE_MEMORY_HIERARCHY",
"XT_TOP_INFRASTRUCTURE",
};
-#define ERNSBRNS_IDS_MAX_IDX (19)
+#define ERNSBRNS_IDS_MAX_IDX (21)
static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] =
{
65101,
65273,
66622,
+ 66927,
68186,
+ 71317,
};
#endif /* defined(DEBUG) */
#define RGX_CR_SOFT_RESET2_VERTEX_EN (0x00000001U)
+/*
+ Register RGX_CR_EVENT_ENABLE
+*/
+#define RGX_CR_EVENT_ENABLE (0x0128U)
+#define RGX_CR_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E01DFFFF))
+#define RGX_CR_EVENT_ENABLE__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_SHIFT (31U)
+#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_EN (0x80000000U)
+#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_SHIFT (30U)
+#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_EN (0x40000000U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_SHIFT (29U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U)
+#define RGX_CR_EVENT_ENABLE_DPX_OUT_OF_MEMORY_SHIFT (28U)
+#define RGX_CR_EVENT_ENABLE_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_DPX_OUT_OF_MEMORY_EN (0x10000000U)
+#define RGX_CR_EVENT_ENABLE_DPX_MMU_PAGE_FAULT_SHIFT (27U)
+#define RGX_CR_EVENT_ENABLE_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU)
+#define RGX_CR_EVENT_ENABLE_DPX_MMU_PAGE_FAULT_EN (0x08000000U)
+#define RGX_CR_EVENT_ENABLE_RPM_OUT_OF_MEMORY_SHIFT (26U)
+#define RGX_CR_EVENT_ENABLE_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_RPM_OUT_OF_MEMORY_EN (0x04000000U)
+#define RGX_CR_EVENT_ENABLE_FBA_FC3_FINISHED_SHIFT (25U)
+#define RGX_CR_EVENT_ENABLE_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_FBA_FC3_FINISHED_EN (0x02000000U)
+#define RGX_CR_EVENT_ENABLE_FBA_FC2_FINISHED_SHIFT (24U)
+#define RGX_CR_EVENT_ENABLE_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_FBA_FC2_FINISHED_EN (0x01000000U)
+#define RGX_CR_EVENT_ENABLE_FBA_FC1_FINISHED_SHIFT (23U)
+#define RGX_CR_EVENT_ENABLE_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU)
+#define RGX_CR_EVENT_ENABLE_FBA_FC1_FINISHED_EN (0x00800000U)
+#define RGX_CR_EVENT_ENABLE_FBA_FC0_FINISHED_SHIFT (22U)
+#define RGX_CR_EVENT_ENABLE_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU)
+#define RGX_CR_EVENT_ENABLE_FBA_FC0_FINISHED_EN (0x00400000U)
+#define RGX_CR_EVENT_ENABLE_RDM_FC3_FINISHED_SHIFT (21U)
+#define RGX_CR_EVENT_ENABLE_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_EVENT_ENABLE_RDM_FC3_FINISHED_EN (0x00200000U)
+#define RGX_CR_EVENT_ENABLE_RDM_FC2_FINISHED_SHIFT (20U)
+#define RGX_CR_EVENT_ENABLE_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_EVENT_ENABLE_RDM_FC2_FINISHED_EN (0x00100000U)
+#define RGX_CR_EVENT_ENABLE_SAFETY_SHIFT (20U)
+#define RGX_CR_EVENT_ENABLE_SAFETY_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_EVENT_ENABLE_SAFETY_EN (0x00100000U)
+#define RGX_CR_EVENT_ENABLE_RDM_FC1_FINISHED_SHIFT (19U)
+#define RGX_CR_EVENT_ENABLE_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_EVENT_ENABLE_RDM_FC1_FINISHED_EN (0x00080000U)
+#define RGX_CR_EVENT_ENABLE_SLAVE_REQ_SHIFT (19U)
+#define RGX_CR_EVENT_ENABLE_SLAVE_REQ_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_EVENT_ENABLE_SLAVE_REQ_EN (0x00080000U)
+#define RGX_CR_EVENT_ENABLE_RDM_FC0_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_ENABLE_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_ENABLE_RDM_FC0_FINISHED_EN (0x00040000U)
+#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U)
+#define RGX_CR_EVENT_ENABLE_SHG_FINISHED_SHIFT (17U)
+#define RGX_CR_EVENT_ENABLE_SHG_FINISHED_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_EVENT_ENABLE_SHG_FINISHED_EN (0x00020000U)
+#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_SHIFT (17U)
+#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_SHIFT (16U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_EN (0x00010000U)
+#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_SHIFT (15U)
+#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_CLRMSK (0xFFFF7FFFU)
+#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_EN (0x00008000U)
+#define RGX_CR_EVENT_ENABLE_ZLS_FINISHED_SHIFT (14U)
+#define RGX_CR_EVENT_ENABLE_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU)
+#define RGX_CR_EVENT_ENABLE_ZLS_FINISHED_EN (0x00004000U)
+#define RGX_CR_EVENT_ENABLE_GPIO_ACK_SHIFT (13U)
+#define RGX_CR_EVENT_ENABLE_GPIO_ACK_CLRMSK (0xFFFFDFFFU)
+#define RGX_CR_EVENT_ENABLE_GPIO_ACK_EN (0x00002000U)
+#define RGX_CR_EVENT_ENABLE_GPIO_REQ_SHIFT (12U)
+#define RGX_CR_EVENT_ENABLE_GPIO_REQ_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_EVENT_ENABLE_GPIO_REQ_EN (0x00001000U)
+#define RGX_CR_EVENT_ENABLE_POWER_ABORT_SHIFT (11U)
+#define RGX_CR_EVENT_ENABLE_POWER_ABORT_CLRMSK (0xFFFFF7FFU)
+#define RGX_CR_EVENT_ENABLE_POWER_ABORT_EN (0x00000800U)
+#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_SHIFT (10U)
+#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU)
+#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_EN (0x00000400U)
+#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_SHIFT (9U)
+#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU)
+#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_EN (0x00000200U)
+#define RGX_CR_EVENT_ENABLE_PM_3D_MEM_FREE_SHIFT (8U)
+#define RGX_CR_EVENT_ENABLE_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_EVENT_ENABLE_PM_3D_MEM_FREE_EN (0x00000100U)
+#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_SHIFT (7U)
+#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_EN (0x00000080U)
+#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_SHIFT (6U)
+#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_EN (0x00000040U)
+#define RGX_CR_EVENT_ENABLE_TA_FINISHED_SHIFT (5U)
+#define RGX_CR_EVENT_ENABLE_TA_FINISHED_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_EVENT_ENABLE_TA_FINISHED_EN (0x00000020U)
+#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_SHIFT (4U)
+#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_EN (0x00000010U)
+#define RGX_CR_EVENT_ENABLE_PIXELBE_END_RENDER_SHIFT (3U)
+#define RGX_CR_EVENT_ENABLE_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_EVENT_ENABLE_PIXELBE_END_RENDER_EN (0x00000008U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_SHIFT (2U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_EN (0x00000004U)
+#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_SHIFT (1U)
+#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_EN (0x00000002U)
+#define RGX_CR_EVENT_ENABLE_TLA_COMPLETE_SHIFT (0U)
+#define RGX_CR_EVENT_ENABLE_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_EVENT_ENABLE_TLA_COMPLETE_EN (0x00000001U)
+
+
/*
Register RGX_CR_EVENT_STATUS
*/
#endif
#define IMG_EXPLICIT_INCLUDE_HWDEFS
-#if defined(__KERNEL__)
+#if defined(__KERNEL__) || defined(SUPPORT_SERVICES_SC_UNITTESTS_SERVER)
#include "rgx_cr_defs_km.h"
#endif
#undef IMG_EXPLICIT_INCLUDE_HWDEFS
#define GET_N(x) (((x) & (N_POSTION_MASK)) >> (N_POSITION))
#define GET_C(x) (((x) & (C_POSTION_MASK)) >> (C_POSITION))
-#define BVNC_PACK(B,V,N,C) ((((IMG_UINT64)(B))) << (B_POSITION) | \
- (((IMG_UINT64)(V))) << (V_POSITION) | \
- (((IMG_UINT64)(N))) << (N_POSITION) | \
- (((IMG_UINT64)(C))) << (C_POSITION) \
+#define BVNC_PACK(B,V,N,C) (((((IMG_UINT64)(B))) << (B_POSITION)) | \
+ ((((IMG_UINT64)(V))) << (V_POSITION)) | \
+ ((((IMG_UINT64)(N))) << (N_POSITION)) | \
+ ((((IMG_UINT64)(C))) << (C_POSITION)) \
)
#define RGX_CR_CORE_ID_CONFIG_N_SHIFT (8U)
#define RGX_CR_CORE_ID_CONFIG_N_CLRMSK (0XFFFF00FFU)
#define RGX_CR_CORE_ID_CONFIG_C_CLRMSK (0XFFFFFF00U)
-#define RGXFW_MAX_NUM_OS (8U)
-#define RGXFW_HOST_OS (0U)
-#define RGXFW_GUEST_OSID_START (1U)
+#if defined(RGX_FEATURE_NUM_OSIDS)
+#define RGXFW_MAX_NUM_OSIDS (RGX_FEATURE_NUM_OSIDS)
+#else
+#define RGXFW_MAX_NUM_OSIDS (8U)
+#endif
+
+#define RGXFW_HOST_DRIVER_ID (0U)
+#define RGXFW_GUEST_DRIVER_ID_START (RGXFW_HOST_DRIVER_ID + 1U)
#define RGXFW_THREAD_0 (0U)
#define RGXFW_THREAD_1 (1U)
#define RGX_META_COREMEM_SIZE (RGX_FEATURE_META_COREMEM_SIZE*1024U)
#define RGX_META_COREMEM (1)
#define RGX_META_COREMEM_CODE (1)
-#if !defined(FIX_HW_BRN_50767) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)
+#if !defined(FIX_HW_BRN_50767) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED == 1)
#define RGX_META_COREMEM_DATA (1)
#endif
#else
#define RGXFW_META_SUPPORT_2ND_THREAD
#endif
-
-/*
- * FW MMU contexts
- */
-#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_FEATURE_META)
-#define MMU_CONTEXT_MAPPING_FWPRIV (0x0U) /* FW code/private data */
-#define MMU_CONTEXT_MAPPING_FWIF (0x7U) /* Host/FW data */
-#else
-#define MMU_CONTEXT_MAPPING_FWPRIV (0x0U)
-#define MMU_CONTEXT_MAPPING_FWIF (0x0U)
-#endif
-
-
/*
* Utility macros to calculate CAT_BASE register addresses
*/
#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK
#define FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT (12U)
-
/******************************************************************************
* WA HWBRNs
*****************************************************************************/
*/
#define RGX_VIRTUALISATION_REG_SIZE_PER_OS (RGX_CR_MTS_SCHEDULE1 - RGX_CR_MTS_SCHEDULE)
+/*
+ * Renaming MTS sideband bitfields to emphasize that the Register Bank number
+ * of the MTS register used identifies a specific Driver/VM rather than the OSID tag
+ * emitted on bus memory transactions.
+ */
+#define RGX_MTS_SBDATA_DRIVERID_CLRMSK RGX_CR_MTS_BGCTX_SBDATA0_OS_ID_CLRMSK
+#define RGX_MTS_SBDATA_DRIVERID_SHIFT RGX_CR_MTS_BGCTX_SBDATA0_OS_ID_SHIFT
+
+/*
+ * Register Bank containing registers secured against host access
+ */
+#define RGX_HOST_SECURE_REGBANK_OFFSET (0xF0000U)
+#define RGX_HOST_SECURE_REGBANK_SIZE (0x10000U)
+
/*
* Macro used to indicate which version of HWPerf is active
*/
*/
#define RGX_TRP_MAX_NUM_CORES (4U)
+/*
+ * Maximum number of cores supported by WGP
+ */
+#define RGX_WGP_MAX_NUM_CORES (8U)
+
#endif /* RGXDEFS_KM_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 27.V.254.2 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_27_V_254_2_H
+#define RGXCONFIG_KM_27_V_254_2_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 27
+#define RGX_BNC_KM_N 254
+#define RGX_BNC_KM_C 2
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (3U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (512U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (1U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_MMU_VERSION (3U)
+#define RGX_FEATURE_NUM_CLUSTERS (2U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (2U)
+#define RGX_FEATURE_NUM_MEMBUS (1U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (1U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (2U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (1U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (0U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (16384U)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1U)
+#define RGX_FEATURE_SCALABLE_VCE (1U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_27_V_254_2_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 30.V.1632.1 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_30_V_1632_1_H
+#define RGXCONFIG_KM_30_V_1632_1_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 30
+#define RGX_BNC_KM_N 1632
+#define RGX_BNC_KM_C 1
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (512U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (2U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (4U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (16U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (2U)
+#define RGX_FEATURE_NUM_MEMBUS (2U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (2U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (1U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (0U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (16384U)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (2U)
+#define RGX_FEATURE_SCALABLE_VCE (2U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (4U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (512U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_30_V_1632_1_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 30.V.408.101 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_30_V_408_101_H
+#define RGXCONFIG_KM_30_V_408_101_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 30
+#define RGX_BNC_KM_N 408
+#define RGX_BNC_KM_C 101
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (512U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (1U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (1U)
+#define RGX_FEATURE_NUM_MEMBUS (1U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (1U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (1U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (0U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (16384U)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1U)
+#define RGX_FEATURE_SCALABLE_VCE (1U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_30_V_408_101_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 30.V.816.20 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_30_V_816_20_H
+#define RGXCONFIG_KM_30_V_816_20_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 30
+#define RGX_BNC_KM_N 816
+#define RGX_BNC_KM_C 20
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (512U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (2U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (2U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (2U)
+#define RGX_FEATURE_NUM_MEMBUS (1U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (1U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (1U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (0U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (16384U)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1U)
+#define RGX_FEATURE_SCALABLE_VCE (1U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (2U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (256U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_30_V_816_20_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 35.V.1632.21 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_35_V_1632_21_H
+#define RGXCONFIG_KM_35_V_1632_21_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 35
+#define RGX_BNC_KM_N 1632
+#define RGX_BNC_KM_C 21
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (2U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (4U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (16U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (2U)
+#define RGX_FEATURE_NUM_MEMBUS (2U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (2U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (2U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (0U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (2U)
+#define RGX_FEATURE_SCALABLE_VCE (2U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (4U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (512U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_35_V_1632_21_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 35.V.1632.23 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_35_V_1632_23_H
+#define RGXCONFIG_KM_35_V_1632_23_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 35
+#define RGX_BNC_KM_N 1632
+#define RGX_BNC_KM_C 23
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (2U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (4U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (16U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (2U)
+#define RGX_FEATURE_NUM_MEMBUS (2U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (2U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (2U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (0U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (2U)
+#define RGX_FEATURE_SCALABLE_VCE (2U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (4U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (512U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_35_V_1632_23_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 35.V.1632.34 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_35_V_1632_34_H
+#define RGXCONFIG_KM_35_V_1632_34_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 35
+#define RGX_BNC_KM_N 1632
+#define RGX_BNC_KM_C 34
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (1U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (2U)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (2U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (4U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (16U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (2U)
+#define RGX_FEATURE_NUM_MEMBUS (2U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (2U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (2U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (0U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U)
+#define RGX_FEATURE_RISCV_FW_PROCESSOR
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (2U)
+#define RGX_FEATURE_SCALABLE_VCE (2U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (4U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_REGION_PROTECTION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_WATCHDOG_TIMER
+#define RGX_FEATURE_WORKGROUP_PROTECTION
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_35_V_1632_34_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 35.V.408.101 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_35_V_408_101_H
+#define RGXCONFIG_KM_35_V_408_101_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 35
+#define RGX_BNC_KM_N 408
+#define RGX_BNC_KM_C 101
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (512U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (1U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (1U)
+#define RGX_FEATURE_NUM_MEMBUS (1U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (1U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (1U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (0U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (16384U)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1U)
+#define RGX_FEATURE_SCALABLE_VCE (1U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_35_V_408_101_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 35.V.408.23 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_35_V_408_23_H
+#define RGXCONFIG_KM_35_V_408_23_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 35
+#define RGX_BNC_KM_N 408
+#define RGX_BNC_KM_C 23
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (1U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (1U)
+#define RGX_FEATURE_NUM_MEMBUS (1U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (1U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (2U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (0U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1U)
+#define RGX_FEATURE_SCALABLE_VCE (1U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_35_V_408_23_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 35.V.408.33 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_35_V_408_33_H
+#define RGXCONFIG_KM_35_V_408_33_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 35
+#define RGX_BNC_KM_N 408
+#define RGX_BNC_KM_C 33
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (1U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (1U)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (2U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (1U)
+#define RGX_FEATURE_NUM_MEMBUS (1U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (1U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (2U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (0U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U)
+#define RGX_FEATURE_RISCV_FW_PROCESSOR
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1U)
+#define RGX_FEATURE_SCALABLE_VCE (1U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_REGION_PROTECTION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_WATCHDOG_TIMER
+#define RGX_FEATURE_WORKGROUP_PROTECTION
+#define RGX_FEATURE_WORKGROUP_PROTECTION_SMP
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_35_V_408_33_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 35.V.408.34 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_35_V_408_34_H
+#define RGXCONFIG_KM_35_V_408_34_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 35
+#define RGX_BNC_KM_N 408
+#define RGX_BNC_KM_C 34
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (1U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (1U)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (2U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (1U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (4U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (1U)
+#define RGX_FEATURE_NUM_MEMBUS (1U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (1U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (2U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (0U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U)
+#define RGX_FEATURE_RISCV_FW_PROCESSOR
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (1U)
+#define RGX_FEATURE_SCALABLE_VCE (1U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (1U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_REGION_PROTECTION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_WATCHDOG_TIMER
+#define RGX_FEATURE_WORKGROUP_PROTECTION
+#define RGX_FEATURE_WORKGROUP_PROTECTION_SMP
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_35_V_408_34_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 38.V.2448.402 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_38_V_2448_402_H
+#define RGXCONFIG_KM_38_V_2448_402_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 38
+#define RGX_BNC_KM_N 2448
+#define RGX_BNC_KM_C 402
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (4U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (4U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (2U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (6U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (2U)
+#define RGX_FEATURE_NUM_MEMBUS (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (3U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (2U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (3U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U)
+#define RGX_FEATURE_RT_RAC_PER_SPU
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (2U)
+#define RGX_FEATURE_SCALABLE_VCE (3U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (8U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_38_V_2448_402_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 70.V.2448.1041 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_70_V_2448_1041_H
+#define RGXCONFIG_KM_70_V_2448_1041_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 70
+#define RGX_BNC_KM_N 2448
+#define RGX_BNC_KM_C 1041
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (5U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (5U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (3U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (2U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (6U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (2U)
+#define RGX_FEATURE_NUM_MEMBUS (3U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (3U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (3U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (0U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U)
+#define RGX_FEATURE_RT_RAC_PER_SPU
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (2U)
+#define RGX_FEATURE_SCALABLE_VCE (3U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (6U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (1536U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_70_V_2448_1041_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 70.V.2448.1360 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_70_V_2448_1360_H
+#define RGXCONFIG_KM_70_V_2448_1360_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 70
+#define RGX_BNC_KM_N 2448
+#define RGX_BNC_KM_C 1360
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (5U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (5U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (3U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (2U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (6U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (2U)
+#define RGX_FEATURE_NUM_MEMBUS (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (3U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (3U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (3U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U)
+#define RGX_FEATURE_RT_RAC_PER_SPU
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (2U)
+#define RGX_FEATURE_SCALABLE_VCE (3U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (8U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_70_V_2448_1360_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 70.V.2448.4119 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_70_V_2448_4119_H
+#define RGXCONFIG_KM_70_V_2448_4119_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 70
+#define RGX_BNC_KM_N 2448
+#define RGX_BNC_KM_C 4119
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (5U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (5U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (3U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (2U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (6U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (2U)
+#define RGX_FEATURE_NUM_MEMBUS (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (3U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (3U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (3U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U)
+#define RGX_FEATURE_RT_RAC_PER_SPU
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (2U)
+#define RGX_FEATURE_SCALABLE_VCE (3U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (8U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (0U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_70_V_2448_4119_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 70.V.2448.417 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_70_V_2448_417_H
+#define RGXCONFIG_KM_70_V_2448_417_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 70
+#define RGX_BNC_KM_N 2448
+#define RGX_BNC_KM_C 417
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (5U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (5U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (1U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (2U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (6U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (2U)
+#define RGX_FEATURE_NUM_MEMBUS (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (3U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (3U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (3U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U)
+#define RGX_FEATURE_RT_RAC_PER_SPU
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (2U)
+#define RGX_FEATURE_SCALABLE_VCE (3U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (8U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_70_V_2448_417_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Configuration for BVNC 70.V.2448.418 (kernel defines)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_70_V_2448_418_H
+#define RGXCONFIG_KM_70_V_2448_418_H
+
+/***** Automatically generated file. Do not edit manually ********************/
+
+/******************************************************************************
+ * B.V.N.C Validation defines
+ *****************************************************************************/
+#define RGX_BNC_KM_B 70
+#define RGX_BNC_KM_N 2448
+#define RGX_BNC_KM_C 418
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_AXI_ACE
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE
+#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (4U)
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL (4U)
+#define RGX_FEATURE_COREID_PER_OS
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7
+#define RGX_FEATURE_ECC_RAMS (0U)
+#define RGX_FEATURE_FASTRENDER_DM
+#define RGX_FEATURE_FBCDC (5U)
+#define RGX_FEATURE_FBCDC_ALGORITHM (5U)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (6U)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (2048U)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (32U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT
+#define RGX_FEATURE_GPU_VIRTUALISATION
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_HOST_SECURITY_VERSION (2U)
+#define RGX_FEATURE_LAYOUT_MARS (0U)
+#define RGX_FEATURE_MAX_TPU_PER_SPU (2U)
+#define RGX_FEATURE_META (MTP219)
+#define RGX_FEATURE_META_COREMEM_BANKS (8U)
+#define RGX_FEATURE_META_COREMEM_SIZE (96U)
+#define RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT (4U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES
+#define RGX_FEATURE_MMU_VERSION (4U)
+#define RGX_FEATURE_NUM_CLUSTERS (6U)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (24U)
+#define RGX_FEATURE_NUM_ISP_PER_SPU (2U)
+#define RGX_FEATURE_NUM_MEMBUS (4U)
+#define RGX_FEATURE_NUM_OSIDS (8U)
+#define RGX_FEATURE_NUM_SPU (3U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D
+#define RGX_FEATURE_PBE_PER_SPU (1U)
+#define RGX_FEATURE_PBVNC_COREID_REG
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_PDS_TEMPSIZE8
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES
+#define RGX_FEATURE_POWER_ISLAND_VERSION (3U)
+#define RGX_FEATURE_RAY_TRACING_ARCH (3U)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX (32768U)
+#define RGX_FEATURE_RT_RAC_PER_SPU
+#define RGX_FEATURE_S7_CACHE_HIERARCHY
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_SCALABLE_TE_ARCH (2U)
+#define RGX_FEATURE_SCALABLE_VCE (3U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP
+#define RGX_FEATURE_SIGNAL_SNOOPING
+#define RGX_FEATURE_SLC_BANKS (8U)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (1024U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT /* Specifies the SLC is */
+ /* customer-configurable. True SLC */
+ /* size must be sourced from */
+ /* register. */
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (2048U)
+#define RGX_FEATURE_SLC_VIVT
+#define RGX_FEATURE_SOC_TIMER
+#define RGX_FEATURE_SPU0_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU1_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU2_RAC_PRESENT (1U)
+#define RGX_FEATURE_SPU3_RAC_PRESENT (0U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET
+#define RGX_FEATURE_TDM_PDS_CHECKSUM
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS
+#define RGX_FEATURE_TESSELLATION
+#define RGX_FEATURE_TILE_SIZE_X (32U)
+#define RGX_FEATURE_TILE_SIZE_Y (32U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE
+#define RGX_FEATURE_USC_TIMER
+#define RGX_FEATURE_VDM_DRAWINDIRECT
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U)
+#define RGX_FEATURE_ZLS_CHECKSUM
+
+#endif /* RGXCONFIG_KM_70_V_2448_418_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 27.5.254.2
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_27_5_254_2_H
+#define RGXCORE_KM_27_5_254_2_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @5262350 */
+
+/******************************************************************************
+ * BVNC = 27.5.254.2
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 27
+#define RGX_BVNC_KM_V 5
+#define RGX_BVNC_KM_N 254
+#define RGX_BVNC_KM_C 2
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71422
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+
+
+
+#endif /* RGXCORE_KM_27_5_254_2_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 27.7.254.2
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_27_7_254_2_H
+#define RGXCORE_KM_27_7_254_2_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @5941981 */
+
+/******************************************************************************
+ * BVNC = 27.7.254.2
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 27
+#define RGX_BVNC_KM_V 7
+#define RGX_BVNC_KM_N 254
+#define RGX_BVNC_KM_C 2
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+
+
+
+#endif /* RGXCORE_KM_27_7_254_2_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 27.8.254.2
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_27_8_254_2_H
+#define RGXCORE_KM_27_8_254_2_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @5975910 */
+
+/******************************************************************************
+ * BVNC = 27.8.254.2
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 27
+#define RGX_BVNC_KM_V 8
+#define RGX_BVNC_KM_N 254
+#define RGX_BVNC_KM_C 2
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+
+
+
+#endif /* RGXCORE_KM_27_8_254_2_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 30.3.408.101
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_30_3_408_101_H
+#define RGXCORE_KM_30_3_408_101_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @5653222 */
+
+/******************************************************************************
+ * BVNC = 30.3.408.101
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 30
+#define RGX_BVNC_KM_V 3
+#define RGX_BVNC_KM_N 408
+#define RGX_BVNC_KM_C 101
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_30_3_408_101_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 30.3.816.20
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_30_3_816_20_H
+#define RGXCORE_KM_30_3_816_20_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @5690709 */
+
+/******************************************************************************
+ * BVNC = 30.3.816.20
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 30
+#define RGX_BVNC_KM_V 3
+#define RGX_BVNC_KM_N 816
+#define RGX_BVNC_KM_C 20
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_30_3_816_20_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 30.5.1632.1
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_30_5_1632_1_H
+#define RGXCORE_KM_30_5_1632_1_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @5621518 */
+
+/******************************************************************************
+ * BVNC = 30.5.1632.1
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 30
+#define RGX_BVNC_KM_V 5
+#define RGX_BVNC_KM_N 1632
+#define RGX_BVNC_KM_C 1
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_30_5_1632_1_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 35.2.1632.21
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_35_2_1632_21_H
+#define RGXCORE_KM_35_2_1632_21_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @5744712 */
+
+/******************************************************************************
+ * BVNC = 35.2.1632.21
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 35
+#define RGX_BVNC_KM_V 2
+#define RGX_BVNC_KM_N 1632
+#define RGX_BVNC_KM_C 21
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71157
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_35_2_1632_21_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 35.2.1632.23
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_35_2_1632_23_H
+#define RGXCORE_KM_35_2_1632_23_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @5928760 */
+
+/******************************************************************************
+ * BVNC = 35.2.1632.23
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 35
+#define RGX_BVNC_KM_V 2
+#define RGX_BVNC_KM_N 1632
+#define RGX_BVNC_KM_C 23
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71157
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_35_2_1632_23_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 35.2.1632.34
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_35_2_1632_34_H
+#define RGXCORE_KM_35_2_1632_34_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @6097610 */
+
+/******************************************************************************
+ * BVNC = 35.2.1632.34
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 35
+#define RGX_BVNC_KM_V 2
+#define RGX_BVNC_KM_N 1632
+#define RGX_BVNC_KM_C 34
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_35_2_1632_34_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 35.2.408.33
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_35_2_408_33_H
+#define RGXCORE_KM_35_2_408_33_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @6220991 */
+
+/******************************************************************************
+ * BVNC = 35.2.408.33
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 35
+#define RGX_BVNC_KM_V 2
+#define RGX_BVNC_KM_N 408
+#define RGX_BVNC_KM_C 33
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_35_2_408_33_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 35.2.408.34
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_35_2_408_34_H
+#define RGXCORE_KM_35_2_408_34_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @6206572 */
+
+/******************************************************************************
+ * BVNC = 35.2.408.34
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 35
+#define RGX_BVNC_KM_V 2
+#define RGX_BVNC_KM_N 408
+#define RGX_BVNC_KM_C 34
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_35_2_408_34_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 35.3.1632.23
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_35_3_1632_23_H
+#define RGXCORE_KM_35_3_1632_23_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @5906056 */
+
+/******************************************************************************
+ * BVNC = 35.3.1632.23
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 35
+#define RGX_BVNC_KM_V 3
+#define RGX_BVNC_KM_N 1632
+#define RGX_BVNC_KM_C 23
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_35_3_1632_23_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 35.3.408.101
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_35_3_408_101_H
+#define RGXCORE_KM_35_3_408_101_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @5653222 */
+
+/******************************************************************************
+ * BVNC = 35.3.408.101
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 35
+#define RGX_BVNC_KM_V 3
+#define RGX_BVNC_KM_N 408
+#define RGX_BVNC_KM_C 101
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_35_3_408_101_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 35.3.408.33
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_35_3_408_33_H
+#define RGXCORE_KM_35_3_408_33_H
+
+/* Automatically generated file (12/04/2022 10:11:20): Do not edit manually */
+/* CS: @6222078 */
+
+/******************************************************************************
+ * BVNC = 35.3.408.33
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 35
+#define RGX_BVNC_KM_V 3
+#define RGX_BVNC_KM_N 408
+#define RGX_BVNC_KM_C 33
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_35_3_408_33_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 35.4.1632.23
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_35_4_1632_23_H
+#define RGXCORE_KM_35_4_1632_23_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @5952322 */
+
+/******************************************************************************
+ * BVNC = 35.4.1632.23
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 35
+#define RGX_BVNC_KM_V 4
+#define RGX_BVNC_KM_N 1632
+#define RGX_BVNC_KM_C 23
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_35_4_1632_23_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 35.5.408.23
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_35_5_408_23_H
+#define RGXCORE_KM_35_5_408_23_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @6080671 */
+
+/******************************************************************************
+ * BVNC = 35.5.408.23
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 35
+#define RGX_BVNC_KM_V 5
+#define RGX_BVNC_KM_N 408
+#define RGX_BVNC_KM_C 23
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_35_5_408_23_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 38.4.2448.402
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_38_4_2448_402_H
+#define RGXCORE_KM_38_4_2448_402_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @5976185 */
+
+/******************************************************************************
+ * BVNC = 38.4.2448.402
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 38
+#define RGX_BVNC_KM_V 4
+#define RGX_BVNC_KM_N 2448
+#define RGX_BVNC_KM_C 402
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+#define FIX_HW_BRN_72143
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_38_4_2448_402_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 38.6.2448.402
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_38_6_2448_402_H
+#define RGXCORE_KM_38_6_2448_402_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @6068975 */
+
+/******************************************************************************
+ * BVNC = 38.6.2448.402
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 38
+#define RGX_BVNC_KM_V 6
+#define RGX_BVNC_KM_N 2448
+#define RGX_BVNC_KM_C 402
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_71960
+#define FIX_HW_BRN_72143
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_38_6_2448_402_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 70.2.2448.1041
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_70_2_2448_1041_H
+#define RGXCORE_KM_70_2_2448_1041_H
+
+/* Automatically generated file (06/05/2022 11:58:47): Do not edit manually */
+/* CS: @6243633 */
+
+/******************************************************************************
+ * BVNC = 70.2.2448.1041
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 70
+#define RGX_BVNC_KM_V 2
+#define RGX_BVNC_KM_N 2448
+#define RGX_BVNC_KM_C 1041
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_70_2_2448_1041_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 70.2.2448.1360
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_70_2_2448_1360_H
+#define RGXCORE_KM_70_2_2448_1360_H
+
+/* Automatically generated file (08/06/2022 13:43:04): Do not edit manually */
+/* CS: @6208659 */
+
+/******************************************************************************
+ * BVNC = 70.2.2448.1360
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 70
+#define RGX_BVNC_KM_V 2
+#define RGX_BVNC_KM_N 2448
+#define RGX_BVNC_KM_C 1360
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_70_2_2448_1360_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 70.2.2448.4119
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_70_2_2448_4119_H
+#define RGXCORE_KM_70_2_2448_4119_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @6216850 */
+
+/******************************************************************************
+ * BVNC = 70.2.2448.4119
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 70
+#define RGX_BVNC_KM_V 2
+#define RGX_BVNC_KM_N 2448
+#define RGX_BVNC_KM_C 4119
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_70_2_2448_4119_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 70.2.2448.417
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_70_2_2448_417_H
+#define RGXCORE_KM_70_2_2448_417_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @6207429 */
+
+/******************************************************************************
+ * BVNC = 70.2.2448.417
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 70
+#define RGX_BVNC_KM_V 2
+#define RGX_BVNC_KM_N 2448
+#define RGX_BVNC_KM_C 417
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_70_2_2448_417_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 70.2.2448.418
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_70_2_2448_418_H
+#define RGXCORE_KM_70_2_2448_418_H
+
+/* Automatically generated file (23/03/2022 09:02:26): Do not edit manually */
+/* CS: @6208659 */
+
+/******************************************************************************
+ * BVNC = 70.2.2448.418
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 70
+#define RGX_BVNC_KM_V 2
+#define RGX_BVNC_KM_N 2448
+#define RGX_BVNC_KM_C 418
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_70_2_2448_418_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 70.3.2448.1360
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCORE_KM_70_3_2448_1360_H
+#define RGXCORE_KM_70_3_2448_1360_H
+
+/* Automatically generated file (28/06/2022 10:24:53): Do not edit manually */
+/* CS: @6266740 */
+
+/******************************************************************************
+ * BVNC = 70.3.2448.1360
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 70
+#define RGX_BVNC_KM_V 3
+#define RGX_BVNC_KM_N 2448
+#define RGX_BVNC_KM_C 1360
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_65104
+#define HW_ERN_69700
+
+
+
+#endif /* RGXCORE_KM_70_3_2448_1360_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Hardware definition file rgx_bvnc_defs_km.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/******************************************************************************
+ * Auto generated file by rgxbvnc_tablegen.py *
+ * This file should not be edited manually *
+ *****************************************************************************/
+
+#ifndef RGX_BVNC_DEFS_KM_H
+#define RGX_BVNC_DEFS_KM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+#if defined(RGX_BVNC_DEFS_UM_H)
+#error "This file should not be included in conjunction with rgx_bvnc_defs_um.h"
+#endif
+
+#define BVNC_FIELD_WIDTH (16U)
+
+#define PVR_ARCH_NAME "volcanic"
+
+
+/******************************************************************************
+ * Mask and bit-position macros for features without values
+ *****************************************************************************/
+
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE_POS (0U)
+#define RGX_FEATURE_ALBIORIX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000001))
+
+#define RGX_FEATURE_AXI_ACE_POS (1U)
+#define RGX_FEATURE_AXI_ACE_BIT_MASK (IMG_UINT64_C(0x0000000000000002))
+
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE_POS (2U)
+#define RGX_FEATURE_BARREX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000004))
+
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE_POS (3U)
+#define RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE_BIT_MASK (IMG_UINT64_C(0x0000000000000008))
+
+#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE_POS (4U)
+#define RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000010))
+
+#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE_POS (5U)
+#define RGX_FEATURE_CATURIX_XTP_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000000020))
+
+#define RGX_FEATURE_CLUSTER_GROUPING_POS (6U)
+#define RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK (IMG_UINT64_C(0x0000000000000040))
+
+#define RGX_FEATURE_COMPUTE_POS (7U)
+#define RGX_FEATURE_COMPUTE_BIT_MASK (IMG_UINT64_C(0x0000000000000080))
+
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS (8U)
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK (IMG_UINT64_C(0x0000000000000100))
+
+#define RGX_FEATURE_COMPUTE_OVERLAP_POS (9U)
+#define RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK (IMG_UINT64_C(0x0000000000000200))
+
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS (10U)
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK (IMG_UINT64_C(0x0000000000000400))
+
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS_POS (11U)
+#define RGX_FEATURE_COMPUTE_SLC_MMU_AUTO_CACHE_OPS_BIT_MASK (IMG_UINT64_C(0x0000000000000800))
+
+#define RGX_FEATURE_COREID_PER_OS_POS (12U)
+#define RGX_FEATURE_COREID_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000001000))
+
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7_POS (13U)
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK (IMG_UINT64_C(0x0000000000002000))
+
+#define RGX_FEATURE_FASTRENDER_DM_POS (14U)
+#define RGX_FEATURE_FASTRENDER_DM_BIT_MASK (IMG_UINT64_C(0x0000000000004000))
+
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS_POS (15U)
+#define RGX_FEATURE_FRAG_SLC_MMU_AUTO_CACHE_OPS_BIT_MASK (IMG_UINT64_C(0x0000000000008000))
+
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER_POS (16U)
+#define RGX_FEATURE_GEOMETRY_BIF_ARBITER_BIT_MASK (IMG_UINT64_C(0x0000000000010000))
+
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS_POS (17U)
+#define RGX_FEATURE_GEOM_SLC_MMU_AUTO_CACHE_OPS_BIT_MASK (IMG_UINT64_C(0x0000000000020000))
+
+#define RGX_FEATURE_GPU_CPU_COHERENCY_POS (18U)
+#define RGX_FEATURE_GPU_CPU_COHERENCY_BIT_MASK (IMG_UINT64_C(0x0000000000040000))
+
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_POS (19U)
+#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000080000))
+
+#define RGX_FEATURE_GPU_VIRTUALISATION_POS (20U)
+#define RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK (IMG_UINT64_C(0x0000000000100000))
+
+#define RGX_FEATURE_GS_RTA_SUPPORT_POS (21U)
+#define RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000200000))
+
+#define RGX_FEATURE_HYPERVISOR_MMU_POS (22U)
+#define RGX_FEATURE_HYPERVISOR_MMU_BIT_MASK (IMG_UINT64_C(0x0000000000400000))
+
+#define RGX_FEATURE_META_DMA_POS (23U)
+#define RGX_FEATURE_META_DMA_BIT_MASK (IMG_UINT64_C(0x0000000000800000))
+
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES_POS (24U)
+#define RGX_FEATURE_META_REGISTER_UNPACKED_ACCESSES_BIT_MASK (IMG_UINT64_C(0x0000000001000000))
+
+#define RGX_FEATURE_PBE_CHECKSUM_2D_POS (25U)
+#define RGX_FEATURE_PBE_CHECKSUM_2D_BIT_MASK (IMG_UINT64_C(0x0000000002000000))
+
+#define RGX_FEATURE_PBVNC_COREID_REG_POS (26U)
+#define RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK (IMG_UINT64_C(0x0000000004000000))
+
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE_POS (27U)
+#define RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE_BIT_MASK (IMG_UINT64_C(0x0000000008000000))
+
+#define RGX_FEATURE_PDS_TEMPSIZE8_POS (28U)
+#define RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000010000000))
+
+#define RGX_FEATURE_PERFBUS_POS (29U)
+#define RGX_FEATURE_PERFBUS_BIT_MASK (IMG_UINT64_C(0x0000000020000000))
+
+#define RGX_FEATURE_PERF_COUNTER_BATCH_POS (30U)
+#define RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK (IMG_UINT64_C(0x0000000040000000))
+
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES_POS (31U)
+#define RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES_BIT_MASK (IMG_UINT64_C(0x0000000080000000))
+
+#define RGX_FEATURE_PM_MMUSTACK_POS (32U)
+#define RGX_FEATURE_PM_MMUSTACK_BIT_MASK (IMG_UINT64_C(0x0000000100000000))
+
+#define RGX_FEATURE_PM_MMU_VFP_POS (33U)
+#define RGX_FEATURE_PM_MMU_VFP_BIT_MASK (IMG_UINT64_C(0x0000000200000000))
+
+#define RGX_FEATURE_RISCV_FW_PROCESSOR_POS (34U)
+#define RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK (IMG_UINT64_C(0x0000000400000000))
+
+#define RGX_FEATURE_RT_RAC_PER_SPU_POS (35U)
+#define RGX_FEATURE_RT_RAC_PER_SPU_BIT_MASK (IMG_UINT64_C(0x0000000800000000))
+
+#define RGX_FEATURE_S7_CACHE_HIERARCHY_POS (36U)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000001000000000))
+
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS (37U)
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000002000000000))
+
+#define RGX_FEATURE_SCALABLE_VDM_GPP_POS (38U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK (IMG_UINT64_C(0x0000004000000000))
+
+#define RGX_FEATURE_SIGNAL_SNOOPING_POS (39U)
+#define RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK (IMG_UINT64_C(0x0000008000000000))
+
+#define RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_POS (40U)
+#define RGX_FEATURE_SLC_FAULT_ACCESS_ADDR_PHYS_BIT_MASK (IMG_UINT64_C(0x0000010000000000))
+
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT_POS (41U)
+#define RGX_FEATURE_SLC_SIZE_ADJUSTMENT_BIT_MASK (IMG_UINT64_C(0x0000020000000000))
+
+#define RGX_FEATURE_SLC_VIVT_POS (42U)
+#define RGX_FEATURE_SLC_VIVT_BIT_MASK (IMG_UINT64_C(0x0000040000000000))
+
+#define RGX_FEATURE_SOC_TIMER_POS (43U)
+#define RGX_FEATURE_SOC_TIMER_BIT_MASK (IMG_UINT64_C(0x0000080000000000))
+
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET_POS (44U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK (IMG_UINT64_C(0x0000100000000000))
+
+#define RGX_FEATURE_TDM_PDS_CHECKSUM_POS (45U)
+#define RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000200000000000))
+
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS_POS (46U)
+#define RGX_FEATURE_TDM_SLC_MMU_AUTO_CACHE_OPS_BIT_MASK (IMG_UINT64_C(0x0000400000000000))
+
+#define RGX_FEATURE_TESSELLATION_POS (47U)
+#define RGX_FEATURE_TESSELLATION_BIT_MASK (IMG_UINT64_C(0x0000800000000000))
+
+#define RGX_FEATURE_TILE_REGION_PROTECTION_POS (48U)
+#define RGX_FEATURE_TILE_REGION_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0001000000000000))
+
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS (49U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0002000000000000))
+
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS (50U)
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0004000000000000))
+
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE_POS (51U)
+#define RGX_FEATURE_USC_INSTRUCTION_CACHE_AUTO_INVALIDATE_BIT_MASK (IMG_UINT64_C(0x0008000000000000))
+
+#define RGX_FEATURE_USC_TIMER_POS (52U)
+#define RGX_FEATURE_USC_TIMER_BIT_MASK (IMG_UINT64_C(0x0010000000000000))
+
+#define RGX_FEATURE_VDM_DRAWINDIRECT_POS (53U)
+#define RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK (IMG_UINT64_C(0x0020000000000000))
+
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS (54U)
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK (IMG_UINT64_C(0x0040000000000000))
+
+#define RGX_FEATURE_WATCHDOG_TIMER_POS (55U)
+#define RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK (IMG_UINT64_C(0x0080000000000000))
+
+#define RGX_FEATURE_WORKGROUP_PROTECTION_POS (56U)
+#define RGX_FEATURE_WORKGROUP_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0100000000000000))
+
+#define RGX_FEATURE_WORKGROUP_PROTECTION_SMP_POS (57U)
+#define RGX_FEATURE_WORKGROUP_PROTECTION_SMP_BIT_MASK (IMG_UINT64_C(0x0200000000000000))
+
+#define RGX_FEATURE_ZLS_CHECKSUM_POS (58U)
+#define RGX_FEATURE_ZLS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0400000000000000))
+
+
+/******************************************************************************
+ * Defines for each feature with values used
+ * for handling the corresponding values
+ *****************************************************************************/
+
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (4)
+#define RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX (4)
+#define RGX_FEATURE_FBCDC_MAX_VALUE_IDX (4)
+#define RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX (4)
+#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX (4)
+#define RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX (4)
+#define RGX_FEATURE_META_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX (7)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX (7)
+#define RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX (4)
+#define RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX (5)
+#define RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX (5)
+#define RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX (4)
+#define RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX (4)
+#define RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX (5)
+#define RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX (6)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (7)
+#define RGX_FEATURE_SPU0_RAC_PRESENT_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_SPU1_RAC_PRESENT_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_SPU2_RAC_PRESENT_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_SPU3_RAC_PRESENT_MAX_VALUE_IDX (3)
+#define RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX (2)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX (2)
+
+/******************************************************************************
+ * Features with values indexes
+ *****************************************************************************/
+
+typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ {
+ RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_IDX,
+ RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_IDX,
+ RGX_FEATURE_ECC_RAMS_IDX,
+ RGX_FEATURE_FBCDC_IDX,
+ RGX_FEATURE_FBCDC_ALGORITHM_IDX,
+ RGX_FEATURE_FBCDC_ARCHITECTURE_IDX,
+ RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_IDX,
+ RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_IDX,
+ RGX_FEATURE_HOST_SECURITY_VERSION_IDX,
+ RGX_FEATURE_LAYOUT_MARS_IDX,
+ RGX_FEATURE_MAX_TPU_PER_SPU_IDX,
+ RGX_FEATURE_META_IDX,
+ RGX_FEATURE_META_COREMEM_BANKS_IDX,
+ RGX_FEATURE_META_COREMEM_SIZE_IDX,
+ RGX_FEATURE_META_DMA_CHANNEL_COUNT_IDX,
+ RGX_FEATURE_MMU_VERSION_IDX,
+ RGX_FEATURE_NUM_CLUSTERS_IDX,
+ RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX,
+ RGX_FEATURE_NUM_ISP_PER_SPU_IDX,
+ RGX_FEATURE_NUM_MEMBUS_IDX,
+ RGX_FEATURE_NUM_OSIDS_IDX,
+ RGX_FEATURE_NUM_SPU_IDX,
+ RGX_FEATURE_PBE_PER_SPU_IDX,
+ RGX_FEATURE_PHYS_BUS_WIDTH_IDX,
+ RGX_FEATURE_POWER_ISLAND_VERSION_IDX,
+ RGX_FEATURE_RAY_TRACING_ARCH_IDX,
+ RGX_FEATURE_RENDER_TARGET_XY_MAX_IDX,
+ RGX_FEATURE_SCALABLE_TE_ARCH_IDX,
+ RGX_FEATURE_SCALABLE_VCE_IDX,
+ RGX_FEATURE_SLC_BANKS_IDX,
+ RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_IDX,
+ RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_IDX,
+ RGX_FEATURE_SPU0_RAC_PRESENT_IDX,
+ RGX_FEATURE_SPU1_RAC_PRESENT_IDX,
+ RGX_FEATURE_SPU2_RAC_PRESENT_IDX,
+ RGX_FEATURE_SPU3_RAC_PRESENT_IDX,
+ RGX_FEATURE_TILE_SIZE_X_IDX,
+ RGX_FEATURE_TILE_SIZE_Y_IDX,
+ RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_IDX,
+ RGX_FEATURE_WITH_VALUES_MAX_IDX,
+} RGX_FEATURE_WITH_VALUE_INDEX;
+
+
+/******************************************************************************
+ * Mask and bit-position macros for ERNs and BRNs
+ *****************************************************************************/
+
+#define HW_ERN_65104_POS (0U)
+#define HW_ERN_65104_BIT_MASK (IMG_UINT64_C(0x0000000000000001))
+
+#define FIX_HW_BRN_66927_POS (1U)
+#define FIX_HW_BRN_66927_BIT_MASK (IMG_UINT64_C(0x0000000000000002))
+
+#define HW_ERN_69700_POS (2U)
+#define HW_ERN_69700_BIT_MASK (IMG_UINT64_C(0x0000000000000004))
+
+#define FIX_HW_BRN_71157_POS (3U)
+#define FIX_HW_BRN_71157_BIT_MASK (IMG_UINT64_C(0x0000000000000008))
+
+#define FIX_HW_BRN_71422_POS (4U)
+#define FIX_HW_BRN_71422_BIT_MASK (IMG_UINT64_C(0x0000000000000010))
+
+#define FIX_HW_BRN_71960_POS (5U)
+#define FIX_HW_BRN_71960_BIT_MASK (IMG_UINT64_C(0x0000000000000020))
+
+#define FIX_HW_BRN_72143_POS (6U)
+#define FIX_HW_BRN_72143_BIT_MASK (IMG_UINT64_C(0x0000000000000040))
+
+/* Macro used for padding the unavailable values for features with values */
+#define RGX_FEATURE_VALUE_INVALID (0xFFFFFFFEU)
+
+/* Macro used for marking a feature with value as disabled for a specific bvnc */
+#define RGX_FEATURE_VALUE_DISABLED (0xFFFFFFFFU)
+
+#endif /* RGX_BVNC_DEFS_KM_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Hardware definition file rgx_bvnc_table_km.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/******************************************************************************
+ * Auto generated file by rgxbvnc_tablegen.py *
+ * This file should not be edited manually *
+ *****************************************************************************/
+
+#ifndef RGX_BVNC_TABLE_KM_H
+#define RGX_BVNC_TABLE_KM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "rgxdefs_km.h"
+#include "rgx_bvnc_defs_km.h"
+
+#ifndef RGXBVNC_C
+#error "This file should only be included from rgxbvnc.c"
+#endif
+
+#if defined(RGX_BVNC_TABLE_UM_H)
+#error "This file should not be included in conjunction with rgx_bvnc_table_um.h"
+#endif
+
+
+/******************************************************************************
+ * Arrays for each feature with values used
+ * for handling the corresponding values
+ *****************************************************************************/
+
+static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, 5, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_values[RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_ECC_RAMS_values[RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, 2, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_values[RGX_FEATURE_FBCDC_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, 5, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ALGORITHM_values[RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, 5, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 5, 6, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_values[RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 512, 2048, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_values[RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 32, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_HOST_SECURITY_VERSION_values[RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_LAYOUT_MARS_values[RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_MAX_TPU_PER_SPU_values[RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, MTP219, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 96, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 4, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_MMU_VERSION_values[RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, 8, 12, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, 8, 16, 24, 32, 48, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_PER_SPU_values[RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_MEMBUS_values[RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_OSIDS_values[RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_SPU_values[RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_PBE_PER_SPU_values[RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_POWER_ISLAND_VERSION_values[RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_RAY_TRACING_ARCH_values[RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 3, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_RENDER_TARGET_XY_MAX_values[RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16384, 32768, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, 8, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1024, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 128, 256, 512, 1024, 1536, 2048, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SPU0_RAC_PRESENT_values[RGX_FEATURE_SPU0_RAC_PRESENT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SPU1_RAC_PRESENT_values[RGX_FEATURE_SPU1_RAC_PRESENT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SPU2_RAC_PRESENT_values[RGX_FEATURE_SPU2_RAC_PRESENT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_SPU3_RAC_PRESENT_values[RGX_FEATURE_SPU3_RAC_PRESENT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_X_values[RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_Y_values[RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, };
+
+static const IMG_UINT16 aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values[RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, };
+
+
+/******************************************************************************
+ * Table contains pointers to each feature value array for features that have
+ * values.
+ * Indexed using enum RGX_FEATURE_WITH_VALUE_INDEX from rgx_bvnc_defs_km.h
+ *****************************************************************************/
+
+static const void * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = {
+ aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values,
+ aui16_RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_values,
+ aui16_RGX_FEATURE_ECC_RAMS_values,
+ aui16_RGX_FEATURE_FBCDC_values,
+ aui16_RGX_FEATURE_FBCDC_ALGORITHM_values,
+ aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values,
+ aui16_RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_values,
+ aui16_RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_values,
+ aui16_RGX_FEATURE_HOST_SECURITY_VERSION_values,
+ aui16_RGX_FEATURE_LAYOUT_MARS_values,
+ aui16_RGX_FEATURE_MAX_TPU_PER_SPU_values,
+ aui16_RGX_FEATURE_META_values,
+ aui16_RGX_FEATURE_META_COREMEM_BANKS_values,
+ aui16_RGX_FEATURE_META_COREMEM_SIZE_values,
+ aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values,
+ aui16_RGX_FEATURE_MMU_VERSION_values,
+ aui16_RGX_FEATURE_NUM_CLUSTERS_values,
+ aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values,
+ aui16_RGX_FEATURE_NUM_ISP_PER_SPU_values,
+ aui16_RGX_FEATURE_NUM_MEMBUS_values,
+ aui16_RGX_FEATURE_NUM_OSIDS_values,
+ aui16_RGX_FEATURE_NUM_SPU_values,
+ aui16_RGX_FEATURE_PBE_PER_SPU_values,
+ aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values,
+ aui16_RGX_FEATURE_POWER_ISLAND_VERSION_values,
+ aui16_RGX_FEATURE_RAY_TRACING_ARCH_values,
+ aui16_RGX_FEATURE_RENDER_TARGET_XY_MAX_values,
+ aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values,
+ aui16_RGX_FEATURE_SCALABLE_VCE_values,
+ aui16_RGX_FEATURE_SLC_BANKS_values,
+ aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values,
+ aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values,
+ aui16_RGX_FEATURE_SPU0_RAC_PRESENT_values,
+ aui16_RGX_FEATURE_SPU1_RAC_PRESENT_values,
+ aui16_RGX_FEATURE_SPU2_RAC_PRESENT_values,
+ aui16_RGX_FEATURE_SPU3_RAC_PRESENT_values,
+ aui16_RGX_FEATURE_TILE_SIZE_X_values,
+ aui16_RGX_FEATURE_TILE_SIZE_Y_values,
+ aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values,
+};
+
+
+/******************************************************************************
+ * Array containing the lengths of the arrays containing the values.
+ * Used for indexing the aui16_<FEATURE>_values defined upwards
+ *****************************************************************************/
+
+
+static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = {
+ RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX,
+ RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_MAX_VALUE_IDX,
+ RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX,
+ RGX_FEATURE_FBCDC_MAX_VALUE_IDX,
+ RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX,
+ RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX,
+ RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX,
+ RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX,
+ RGX_FEATURE_HOST_SECURITY_VERSION_MAX_VALUE_IDX,
+ RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX,
+ RGX_FEATURE_MAX_TPU_PER_SPU_MAX_VALUE_IDX,
+ RGX_FEATURE_META_MAX_VALUE_IDX,
+ RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX,
+ RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX,
+ RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX,
+ RGX_FEATURE_MMU_VERSION_MAX_VALUE_IDX,
+ RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX,
+ RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX,
+ RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX,
+ RGX_FEATURE_NUM_MEMBUS_MAX_VALUE_IDX,
+ RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX,
+ RGX_FEATURE_NUM_SPU_MAX_VALUE_IDX,
+ RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX,
+ RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX,
+ RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX,
+ RGX_FEATURE_RAY_TRACING_ARCH_MAX_VALUE_IDX,
+ RGX_FEATURE_RENDER_TARGET_XY_MAX_MAX_VALUE_IDX,
+ RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX,
+ RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX,
+ RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX,
+ RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX,
+ RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX,
+ RGX_FEATURE_SPU0_RAC_PRESENT_MAX_VALUE_IDX,
+ RGX_FEATURE_SPU1_RAC_PRESENT_MAX_VALUE_IDX,
+ RGX_FEATURE_SPU2_RAC_PRESENT_MAX_VALUE_IDX,
+ RGX_FEATURE_SPU3_RAC_PRESENT_MAX_VALUE_IDX,
+ RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX,
+ RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX,
+ RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX,
+};
+
+#define RGX_FEATURE_VALUE_TYPE_UINT16 (0x0000U)
+#define RGX_FEATURE_VALUE_TYPE_UINT32 (0x8000U)
+#define RGX_FEATURE_TYPE_BIT_SHIFT 14
+
+/******************************************************************************
+ * Bit-positions for features with values
+ *****************************************************************************/
+
+static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = {
+ (0U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */
+ (3U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_POS */
+ (5U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_ECC_RAMS_POS */
+ (8U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_POS */
+ (11U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_ALGORITHM_POS */
+ (14U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */
+ (16U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_POS */
+ (18U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_POS */
+ (20U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_HOST_SECURITY_VERSION_POS */
+ (23U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_LAYOUT_MARS_POS */
+ (25U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_MAX_TPU_PER_SPU_POS */
+ (28U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_POS */
+ (30U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_COREMEM_BANKS_POS */
+ (32U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_COREMEM_SIZE_POS */
+ (34U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */
+ (36U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_MMU_VERSION_POS */
+ (38U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_CLUSTERS_POS */
+ (41U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */
+ (44U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_ISP_PER_SPU_POS */
+ (47U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_MEMBUS_POS */
+ (50U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_OSIDS_POS */
+ (52U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_NUM_SPU_POS */
+ (55U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_PBE_PER_SPU_POS */
+ (57U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */
+ (59U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_POWER_ISLAND_VERSION_POS */
+ (62U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_RAY_TRACING_ARCH_POS */
+ (64U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_RENDER_TARGET_XY_MAX_POS */
+ (66U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */
+ (69U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SCALABLE_VCE_POS */
+ (72U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_BANKS_POS */
+ (75U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */
+ (77U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */
+ (80U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SPU0_RAC_PRESENT_POS */
+ (82U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SPU1_RAC_PRESENT_POS */
+ (84U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SPU2_RAC_PRESENT_POS */
+ (86U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_SPU3_RAC_PRESENT_POS */
+ (88U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TILE_SIZE_X_POS */
+ (90U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_TILE_SIZE_Y_POS */
+ (92U) | RGX_FEATURE_VALUE_TYPE_UINT16, /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */
+};
+
+
+/******************************************************************************
+ * Bit-masks for features with values
+ *****************************************************************************/
+
+static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = {
+ (IMG_UINT64_C(0x0000000000000007)), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000000018)), /* RGX_FEATURE_CONTEXT_SWITCH_3D_LEVEL_BIT_MASK */
+ (IMG_UINT64_C(0x00000000000000E0)), /* RGX_FEATURE_ECC_RAMS_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000000700)), /* RGX_FEATURE_FBCDC_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000003800)), /* RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK */
+ (IMG_UINT64_C(0x000000000000C000)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000030000)), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_BIT_MASK */
+ (IMG_UINT64_C(0x00000000000C0000)), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000700000)), /* RGX_FEATURE_HOST_SECURITY_VERSION_BIT_MASK */
+ (IMG_UINT64_C(0x0000000001800000)), /* RGX_FEATURE_LAYOUT_MARS_BIT_MASK */
+ (IMG_UINT64_C(0x000000000E000000)), /* RGX_FEATURE_MAX_TPU_PER_SPU_BIT_MASK */
+ (IMG_UINT64_C(0x0000000030000000)), /* RGX_FEATURE_META_BIT_MASK */
+ (IMG_UINT64_C(0x00000000C0000000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */
+ (IMG_UINT64_C(0x0000000300000000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */
+ (IMG_UINT64_C(0x0000000C00000000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */
+ (IMG_UINT64_C(0x0000003000000000)), /* RGX_FEATURE_MMU_VERSION_BIT_MASK */
+ (IMG_UINT64_C(0x000001C000000000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */
+ (IMG_UINT64_C(0x00000E0000000000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */
+ (IMG_UINT64_C(0x0000700000000000)), /* RGX_FEATURE_NUM_ISP_PER_SPU_BIT_MASK */
+ (IMG_UINT64_C(0x0003800000000000)), /* RGX_FEATURE_NUM_MEMBUS_BIT_MASK */
+ (IMG_UINT64_C(0x000C000000000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */
+ (IMG_UINT64_C(0x0070000000000000)), /* RGX_FEATURE_NUM_SPU_BIT_MASK */
+ (IMG_UINT64_C(0x0180000000000000)), /* RGX_FEATURE_PBE_PER_SPU_BIT_MASK */
+ (IMG_UINT64_C(0x0600000000000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */
+ (IMG_UINT64_C(0x3800000000000000)), /* RGX_FEATURE_POWER_ISLAND_VERSION_BIT_MASK */
+ (IMG_UINT64_C(0xC000000000000000)), /* RGX_FEATURE_RAY_TRACING_ARCH_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_RENDER_TARGET_XY_MAX_BIT_MASK */
+ (IMG_UINT64_C(0x000000000000001C)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */
+ (IMG_UINT64_C(0x00000000000000E0)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000000700)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000001800)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */
+ (IMG_UINT64_C(0x000000000000E000)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000030000)), /* RGX_FEATURE_SPU0_RAC_PRESENT_BIT_MASK */
+ (IMG_UINT64_C(0x00000000000C0000)), /* RGX_FEATURE_SPU1_RAC_PRESENT_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000300000)), /* RGX_FEATURE_SPU2_RAC_PRESENT_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000C00000)), /* RGX_FEATURE_SPU3_RAC_PRESENT_BIT_MASK */
+ (IMG_UINT64_C(0x0000000003000000)), /* RGX_FEATURE_TILE_SIZE_X_BIT_MASK */
+ (IMG_UINT64_C(0x000000000C000000)), /* RGX_FEATURE_TILE_SIZE_Y_BIT_MASK */
+ (IMG_UINT64_C(0x0000000030000000)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */
+};
+
+
+/******************************************************************************
+ * Table mapping bitmasks for features and features with values
+ *****************************************************************************/
+
+
+static const IMG_UINT64 gaFeatures[][4]=
+{
+ { IMG_UINT64_C(0x001b000000fe0002), IMG_UINT64_C(0x0476bcf0f6b177ca), IMG_UINT64_C(0x4b14a2995295922a), IMG_UINT64_C(0x0000000015552925) }, /* 27.0.254.2 */
+ { IMG_UINT64_C(0x001e000001980065), IMG_UINT64_C(0x047efef0ffb3ffcb), IMG_UINT64_C(0x4a94926952959232), IMG_UINT64_C(0x0000000015552925) }, /* 30.0.408.101 */
+ { IMG_UINT64_C(0x001e000003300014), IMG_UINT64_C(0x047efef0ffb3ffcb), IMG_UINT64_C(0x4a94a4a954959232), IMG_UINT64_C(0x0000000015554a25) }, /* 30.0.816.20 */
+ { IMG_UINT64_C(0x001e000006600001), IMG_UINT64_C(0x047efef0ffb3ffcb), IMG_UINT64_C(0x4aa526e954959232), IMG_UINT64_C(0x0000000015556b49) }, /* 30.0.1632.1 */
+ { IMG_UINT64_C(0x0023000001980017), IMG_UINT64_C(0x047efef0ffbbffcf), IMG_UINT64_C(0x52949269529a9232), IMG_UINT64_C(0x0000000015552926) }, /* 35.0.408.23 */
+ { IMG_UINT64_C(0x0023000001980021), IMG_UINT64_C(0x07fffef4ffbbffcf), IMG_UINT64_C(0x52949265429a9252), IMG_UINT64_C(0x0000000015552926) }, /* 35.0.408.33 */
+ { IMG_UINT64_C(0x0023000001980022), IMG_UINT64_C(0x07fffef4ffbbffcf), IMG_UINT64_C(0x52949265429a9252), IMG_UINT64_C(0x0000000015552926) }, /* 35.0.408.34 */
+ { IMG_UINT64_C(0x0023000001980065), IMG_UINT64_C(0x047efef0ffb3ffcb), IMG_UINT64_C(0x4a94926952959232), IMG_UINT64_C(0x0000000015552925) }, /* 35.0.408.101 */
+ { IMG_UINT64_C(0x0023000006600015), IMG_UINT64_C(0x047efef0ffbbffcf), IMG_UINT64_C(0x52a526e9549a9232), IMG_UINT64_C(0x0000000015556b4a) }, /* 35.0.1632.21 */
+ { IMG_UINT64_C(0x0023000006600017), IMG_UINT64_C(0x047efef0ffbbffcf), IMG_UINT64_C(0x52a526e9549a9232), IMG_UINT64_C(0x0000000015556b4a) }, /* 35.0.1632.23 */
+ { IMG_UINT64_C(0x0023000006600022), IMG_UINT64_C(0x05fffef4ffbbffcf), IMG_UINT64_C(0x52a526e5449a9252), IMG_UINT64_C(0x000000001555cb4a) }, /* 35.0.1632.34 */
+ { IMG_UINT64_C(0x0026000009900192), IMG_UINT64_C(0x047efef8ffbbffdf), IMG_UINT64_C(0x92b62929549a9232), IMG_UINT64_C(0x00000000156acd6a) }, /* 38.0.2448.402 */
+ { IMG_UINT64_C(0x00460000099001a1), IMG_UINT64_C(0x047efef8ffbbffff), IMG_UINT64_C(0x9ab62929549a9b32), IMG_UINT64_C(0x00000000156acd6a) }, /* 70.0.2448.417 */
+ { IMG_UINT64_C(0x00460000099001a2), IMG_UINT64_C(0x047efef8ffbbffff), IMG_UINT64_C(0x9ab6292954aa9b32), IMG_UINT64_C(0x00000000156acd6a) }, /* 70.0.2448.418 */
+ { IMG_UINT64_C(0x0046000009900411), IMG_UINT64_C(0x047efef8ffbbffff), IMG_UINT64_C(0x5ab5a92954ba9b32), IMG_UINT64_C(0x000000001555ac6a) }, /* 70.0.2448.1041 */
+ { IMG_UINT64_C(0x0046000009900550), IMG_UINT64_C(0x047efef8ffbbffff), IMG_UINT64_C(0x9ab6292954ba9b32), IMG_UINT64_C(0x00000000156acd6a) }, /* 70.0.2448.1360 */
+ { IMG_UINT64_C(0x0046000009901017), IMG_UINT64_C(0x047efef8ffbbffff), IMG_UINT64_C(0x9ab6292954ba9b32), IMG_UINT64_C(0x000000001556cd6a) }, /* 70.0.2448.4119 */
+};
+
+/******************************************************************************
+ * Table mapping bitmasks for ERNs/BRNs
+ *****************************************************************************/
+
+
+static const IMG_UINT64 gaErnsBrns[][2]=
+{
+ { IMG_UINT64_C(0x001b000500fe0002), IMG_UINT64_C(0x0000000000000011) }, /* 27.5.254.2 */
+ { IMG_UINT64_C(0x001b000700fe0002), IMG_UINT64_C(0x0000000000000001) }, /* 27.7.254.2 */
+ { IMG_UINT64_C(0x001b000800fe0002), IMG_UINT64_C(0x0000000000000001) }, /* 27.8.254.2 */
+ { IMG_UINT64_C(0x001e000301980065), IMG_UINT64_C(0x0000000000000025) }, /* 30.3.408.101 */
+ { IMG_UINT64_C(0x001e000303300014), IMG_UINT64_C(0x0000000000000025) }, /* 30.3.816.20 */
+ { IMG_UINT64_C(0x001e000506600001), IMG_UINT64_C(0x0000000000000025) }, /* 30.5.1632.1 */
+ { IMG_UINT64_C(0x0023000201980021), IMG_UINT64_C(0x0000000000000025) }, /* 35.2.408.33 */
+ { IMG_UINT64_C(0x0023000201980022), IMG_UINT64_C(0x0000000000000025) }, /* 35.2.408.34 */
+ { IMG_UINT64_C(0x0023000206600015), IMG_UINT64_C(0x000000000000002d) }, /* 35.2.1632.21 */
+ { IMG_UINT64_C(0x0023000206600017), IMG_UINT64_C(0x000000000000002d) }, /* 35.2.1632.23 */
+ { IMG_UINT64_C(0x0023000206600022), IMG_UINT64_C(0x0000000000000025) }, /* 35.2.1632.34 */
+ { IMG_UINT64_C(0x0023000301980021), IMG_UINT64_C(0x0000000000000025) }, /* 35.3.408.33 */
+ { IMG_UINT64_C(0x0023000301980065), IMG_UINT64_C(0x0000000000000025) }, /* 35.3.408.101 */
+ { IMG_UINT64_C(0x0023000306600017), IMG_UINT64_C(0x0000000000000025) }, /* 35.3.1632.23 */
+ { IMG_UINT64_C(0x0023000406600017), IMG_UINT64_C(0x0000000000000025) }, /* 35.4.1632.23 */
+ { IMG_UINT64_C(0x0023000501980017), IMG_UINT64_C(0x0000000000000025) }, /* 35.5.408.23 */
+ { IMG_UINT64_C(0x0026000409900192), IMG_UINT64_C(0x0000000000000065) }, /* 38.4.2448.402 */
+ { IMG_UINT64_C(0x0026000609900192), IMG_UINT64_C(0x0000000000000065) }, /* 38.6.2448.402 */
+ { IMG_UINT64_C(0x00460002099001a1), IMG_UINT64_C(0x0000000000000005) }, /* 70.2.2448.417 */
+ { IMG_UINT64_C(0x00460002099001a2), IMG_UINT64_C(0x0000000000000005) }, /* 70.2.2448.418 */
+ { IMG_UINT64_C(0x0046000209900411), IMG_UINT64_C(0x0000000000000005) }, /* 70.2.2448.1041 */
+ { IMG_UINT64_C(0x0046000209900550), IMG_UINT64_C(0x0000000000000005) }, /* 70.2.2448.1360 */
+ { IMG_UINT64_C(0x0046000209901017), IMG_UINT64_C(0x0000000000000005) }, /* 70.2.2448.4119 */
+ { IMG_UINT64_C(0x0046000309900550), IMG_UINT64_C(0x0000000000000005) }, /* 70.3.2448.1360 */
+};
+
+#if defined(DEBUG)
+
+#define FEATURE_NO_VALUES_NAMES_MAX_IDX (59)
+
+static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_MAX_IDX] =
+{
+ "ALBIORIX_TOP_INFRASTRUCTURE",
+ "AXI_ACE",
+ "BARREX_TOP_INFRASTRUCTURE",
+ "BINDLESS_IMAGE_AND_TEXTURE_STATE",
+ "CATURIX_TOP_INFRASTRUCTURE",
+ "CATURIX_XTP_TOP_INFRASTRUCTURE",
+ "CLUSTER_GROUPING",
+ "COMPUTE",
+ "COMPUTE_MORTON_CAPABLE",
+ "COMPUTE_OVERLAP",
+ "COMPUTE_OVERLAP_WITH_BARRIERS",
+ "COMPUTE_SLC_MMU_AUTO_CACHE_OPS",
+ "COREID_PER_OS",
+ "DUST_POWER_ISLAND_S7",
+ "FASTRENDER_DM",
+ "FRAG_SLC_MMU_AUTO_CACHE_OPS",
+ "GEOMETRY_BIF_ARBITER",
+ "GEOM_SLC_MMU_AUTO_CACHE_OPS",
+ "GPU_CPU_COHERENCY",
+ "GPU_MULTICORE_SUPPORT",
+ "GPU_VIRTUALISATION",
+ "GS_RTA_SUPPORT",
+ "HYPERVISOR_MMU",
+ "META_DMA",
+ "META_REGISTER_UNPACKED_ACCESSES",
+ "PBE_CHECKSUM_2D",
+ "PBVNC_COREID_REG",
+ "PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE",
+ "PDS_TEMPSIZE8",
+ "PERFBUS",
+ "PERF_COUNTER_BATCH",
+ "PM_BYTE_ALIGNED_BASE_ADDRESSES",
+ "PM_MMUSTACK",
+ "PM_MMU_VFP",
+ "RISCV_FW_PROCESSOR",
+ "RT_RAC_PER_SPU",
+ "S7_CACHE_HIERARCHY",
+ "S7_TOP_INFRASTRUCTURE",
+ "SCALABLE_VDM_GPP",
+ "SIGNAL_SNOOPING",
+ "SLC_FAULT_ACCESS_ADDR_PHYS",
+ "SLC_SIZE_ADJUSTMENT",
+ "SLC_VIVT",
+ "SOC_TIMER",
+ "SYS_BUS_SECURE_RESET",
+ "TDM_PDS_CHECKSUM",
+ "TDM_SLC_MMU_AUTO_CACHE_OPS",
+ "TESSELLATION",
+ "TILE_REGION_PROTECTION",
+ "TPU_CEM_DATAMASTER_GLOBAL_REGISTERS",
+ "TPU_DM_GLOBAL_REGISTERS",
+ "USC_INSTRUCTION_CACHE_AUTO_INVALIDATE",
+ "USC_TIMER",
+ "VDM_DRAWINDIRECT",
+ "VDM_OBJECT_LEVEL_LLS",
+ "WATCHDOG_TIMER",
+ "WORKGROUP_PROTECTION",
+ "WORKGROUP_PROTECTION_SMP",
+ "ZLS_CHECKSUM",
+};
+
+#define ERNSBRNS_IDS_MAX_IDX (7)
+
+static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] =
+{
+ 65104,
+ 66927,
+ 69700,
+ 71157,
+ 71422,
+ 71960,
+ 72143,
+};
+
+#endif /* defined(DEBUG) */
+#endif /* RGX_BVNC_TABLE_KM_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Hardware definition file rgx_cr_defs_km.h
+@Brief The file contains auto-generated hardware definitions without
+ BVNC-specific compile time conditionals.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* **** Autogenerated C -- do not edit **** */
+
+/*
+ */
+
+
+#ifndef RGX_CR_DEFS_KM_H
+#define RGX_CR_DEFS_KM_H
+
+#if !defined(IMG_EXPLICIT_INCLUDE_HWDEFS)
+#error This file may only be included if explicitly defined
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+
+
+#define RGX_CR_DEFS_KM_REVISION 171
+
+/*
+ Register RGX_CR_USC_INDIRECT
+*/
+#define RGX_CR_USC_INDIRECT (0x8000U)
+#define RGX_CR_USC_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_USC_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_USC_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U)
+
+
+/*
+ Register RGX_CR_MERCER_INDIRECT
+*/
+#define RGX_CR_MERCER_INDIRECT (0x8238U)
+#define RGX_CR_MERCER_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_MERCER_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_MERCER_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U)
+
+
+/*
+ Register RGX_CR_PBE_INDIRECT
+*/
+#define RGX_CR_PBE_INDIRECT (0x83E0U)
+#define RGX_CR_PBE_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U)
+
+
+/*
+ Register RGX_CR_PBE_SHARED_INDIRECT
+*/
+#define RGX_CR_PBE_SHARED_INDIRECT (0x8388U)
+#define RGX_CR_PBE_SHARED_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_PBE_SHARED_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_PBE_SHARED_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFE0U)
+
+
+/*
+ Register RGX_CR_ISP_INDIRECT
+*/
+#define RGX_CR_ISP_INDIRECT (0x83F8U)
+#define RGX_CR_ISP_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_ISP_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_ISP_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U)
+
+
+/*
+ Register RGX_CR_TPU_INDIRECT
+*/
+#define RGX_CR_TPU_INDIRECT (0x83E8U)
+#define RGX_CR_TPU_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_TPU_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_TPU_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U)
+
+
+/*
+ Register RGX_CR_SWIFT_INDIRECT
+*/
+#define RGX_CR_SWIFT_INDIRECT (0x8308U)
+#define RGX_CR_SWIFT_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_SWIFT_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_SWIFT_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U)
+
+
+/*
+ Register RGX_CR_TEXAS_INDIRECT
+*/
+#define RGX_CR_TEXAS_INDIRECT (0x8390U)
+#define RGX_CR_TEXAS_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TEXAS_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_TEXAS_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFE0U)
+
+
+/*
+ Register RGX_CR_RAC_INDIRECT
+*/
+#define RGX_CR_RAC_INDIRECT (0x8398U)
+#define RGX_CR_RAC_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_RAC_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_RAC_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFC0U)
+
+
+/*
+ Register RGX_CR_CLK_CTRL0
+*/
+#define RGX_CR_CLK_CTRL0 (0x0000U)
+#define RGX_CR_CLK_CTRL0__PDM_GT0__MASKFULL (IMG_UINT64_C(0xFFCF0303FF3F330F))
+#define RGX_CR_CLK_CTRL0_MASKFULL (IMG_UINT64_C(0xFFCF0303FF3F3303))
+#define RGX_CR_CLK_CTRL0_BIF_TEXAS_SHIFT (62U)
+#define RGX_CR_CLK_CTRL0_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL0_BIF_TEXAS_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_BIF_TEXAS_ON (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_CLK_CTRL0_BIF_TEXAS_AUTO (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_CLK_CTRL0_FBCACHE_SHIFT (60U)
+#define RGX_CR_CLK_CTRL0_FBCACHE_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL0_FBCACHE_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_FBCACHE_ON (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_CLK_CTRL0_FBCACHE_AUTO (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_CLK_CTRL0_FBC_SHIFT (58U)
+#define RGX_CR_CLK_CTRL0_FBC_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL0_FBC_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_FBC_ON (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_CLK_CTRL0_FBC_AUTO (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_CLK_CTRL0_FBDC_SHIFT (56U)
+#define RGX_CR_CLK_CTRL0_FBDC_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL0_FBDC_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_FBDC_ON (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_CLK_CTRL0_FBDC_AUTO (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_CLK_CTRL0_FBM_SHIFT (54U)
+#define RGX_CR_CLK_CTRL0_FBM_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL0_FBM_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_FBM_ON (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_CLK_CTRL0_FBM_AUTO (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_CLK_CTRL0_PBE_SHIFT (50U)
+#define RGX_CR_CLK_CTRL0_PBE_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL0_PBE_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_PBE_ON (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_CLK_CTRL0_PBE_AUTO (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_CLK_CTRL0_MCU_L1_SHIFT (48U)
+#define RGX_CR_CLK_CTRL0_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL0_MCU_L1_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_MCU_L1_ON (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_CLK_CTRL0_MCU_L1_AUTO (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_CLK_CTRL0_BIF_SHIFT (40U)
+#define RGX_CR_CLK_CTRL0_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL0_BIF_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_BIF_ON (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_CLK_CTRL0_BIF_AUTO (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_CLK_CTRL0_TAG_SHIFT (32U)
+#define RGX_CR_CLK_CTRL0_TAG_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF))
+#define RGX_CR_CLK_CTRL0_TAG_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_TAG_ON (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_CTRL0_TAG_AUTO (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_CLK_CTRL0_MADD_SHIFT (30U)
+#define RGX_CR_CLK_CTRL0_MADD_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF))
+#define RGX_CR_CLK_CTRL0_MADD_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_MADD_ON (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CLK_CTRL0_MADD_AUTO (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_CTRL0_TF_SHIFT (28U)
+#define RGX_CR_CLK_CTRL0_TF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_CTRL0_TF_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_TF_ON (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_CTRL0_TF_AUTO (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_CTRL0_MCU_L0_SHIFT (26U)
+#define RGX_CR_CLK_CTRL0_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_CTRL0_MCU_L0_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_MCU_L0_ON (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_CTRL0_MCU_L0_AUTO (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_CTRL0_TPU_SHIFT (24U)
+#define RGX_CR_CLK_CTRL0_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_CTRL0_TPU_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_TPU_ON (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_CTRL0_TPU_AUTO (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_CTRL0_USC_SHIFT (20U)
+#define RGX_CR_CLK_CTRL0_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_CTRL0_USC_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_USC_ON (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_CTRL0_USC_AUTO (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_CTRL0_SLC_BANK_SHIFT (18U)
+#define RGX_CR_CLK_CTRL0_SLC_BANK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_CTRL0_SLC_BANK_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_SLC_BANK_ON (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_CTRL0_SLC_BANK_AUTO (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_CTRL0_SLC_SHIFT (16U)
+#define RGX_CR_CLK_CTRL0_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_CTRL0_SLC_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_SLC_ON (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_CTRL0_SLC_AUTO (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_CLK_CTRL0_PDS_SHIFT (12U)
+#define RGX_CR_CLK_CTRL0_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF))
+#define RGX_CR_CLK_CTRL0_PDS_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_PDS_ON (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_CTRL0_PDS_AUTO (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_CTRL0_PM_SHIFT (8U)
+#define RGX_CR_CLK_CTRL0_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL0_PM_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_PM_ON (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL0_PM_AUTO (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL0_ISP_BE_SHIFT (2U)
+#define RGX_CR_CLK_CTRL0_ISP_BE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3))
+#define RGX_CR_CLK_CTRL0_ISP_BE_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_ISP_BE_ON (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_CTRL0_ISP_BE_AUTO (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_CTRL0_ISP_SHIFT (0U)
+#define RGX_CR_CLK_CTRL0_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL0_ISP_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_ISP_ON (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL0_ISP_AUTO (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_CTRL0_ISP_FE_SHIFT (0U)
+#define RGX_CR_CLK_CTRL0_ISP_FE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL0_ISP_FE_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL0_ISP_FE_ON (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL0_ISP_FE_AUTO (IMG_UINT64_C(0x0000000000000002))
+
+
+/*
+ Register RGX_CR_CLK_STATUS0
+*/
+#define RGX_CR_CLK_STATUS0 (0x0008U)
+#define RGX_CR_CLK_STATUS0__PDM_GT0__MASKFULL (IMG_UINT64_C(0x00000001BF107F53))
+#define RGX_CR_CLK_STATUS0_MASKFULL (IMG_UINT64_C(0x00000001BF107F51))
+#define RGX_CR_CLK_STATUS0_MCU_L0_SHIFT (32U)
+#define RGX_CR_CLK_STATUS0_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_CLK_STATUS0_MCU_L0_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_MCU_L0_RUNNING (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_STATUS0_BIF_TEXAS_SHIFT (31U)
+#define RGX_CR_CLK_STATUS0_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_CLK_STATUS0_BIF_TEXAS_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_BIF_TEXAS_RUNNING (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_STATUS0_FBCACHE_SHIFT (29U)
+#define RGX_CR_CLK_STATUS0_FBCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_CLK_STATUS0_FBCACHE_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_FBCACHE_RUNNING (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_STATUS0_FBC_SHIFT (28U)
+#define RGX_CR_CLK_STATUS0_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_CLK_STATUS0_FBC_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_FBC_RUNNING (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_STATUS0_FBDC_SHIFT (27U)
+#define RGX_CR_CLK_STATUS0_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_CLK_STATUS0_FBDC_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_FBDC_RUNNING (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_STATUS0_FBM_SHIFT (26U)
+#define RGX_CR_CLK_STATUS0_FBM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_CLK_STATUS0_FBM_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_FBM_RUNNING (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_STATUS0_PBE_SHIFT (25U)
+#define RGX_CR_CLK_STATUS0_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_CLK_STATUS0_PBE_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_PBE_RUNNING (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_STATUS0_MCU_L1_SHIFT (24U)
+#define RGX_CR_CLK_STATUS0_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_CLK_STATUS0_MCU_L1_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_MCU_L1_RUNNING (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_STATUS0_BIF_SHIFT (20U)
+#define RGX_CR_CLK_STATUS0_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_CLK_STATUS0_BIF_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_BIF_RUNNING (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_STATUS0_TF_SHIFT (14U)
+#define RGX_CR_CLK_STATUS0_TF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_CLK_STATUS0_TF_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_TF_RUNNING (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_STATUS0_MADD_SHIFT (13U)
+#define RGX_CR_CLK_STATUS0_MADD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_CLK_STATUS0_MADD_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_MADD_RUNNING (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_STATUS0_TPU_SHIFT (12U)
+#define RGX_CR_CLK_STATUS0_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_CLK_STATUS0_TPU_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_TPU_RUNNING (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_STATUS0_TAG_SHIFT (11U)
+#define RGX_CR_CLK_STATUS0_TAG_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_CLK_STATUS0_TAG_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_TAG_RUNNING (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_STATUS0_USC_SHIFT (10U)
+#define RGX_CR_CLK_STATUS0_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_STATUS0_USC_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_USC_RUNNING (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_STATUS0_SLC_BANK_SHIFT (9U)
+#define RGX_CR_CLK_STATUS0_SLC_BANK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_STATUS0_SLC_BANK_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_SLC_BANK_RUNNING (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_STATUS0_SLC_SHIFT (8U)
+#define RGX_CR_CLK_STATUS0_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_STATUS0_SLC_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_SLC_RUNNING (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_STATUS0_PDS_SHIFT (6U)
+#define RGX_CR_CLK_STATUS0_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_STATUS0_PDS_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_PDS_RUNNING (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_STATUS0_PM_SHIFT (4U)
+#define RGX_CR_CLK_STATUS0_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS0_PM_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_PM_RUNNING (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS0_ISP_BE_SHIFT (1U)
+#define RGX_CR_CLK_STATUS0_ISP_BE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_STATUS0_ISP_BE_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_ISP_BE_RUNNING (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_STATUS0_ISP_SHIFT (0U)
+#define RGX_CR_CLK_STATUS0_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS0_ISP_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_ISP_RUNNING (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_STATUS0_ISP_FE_SHIFT (0U)
+#define RGX_CR_CLK_STATUS0_ISP_FE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS0_ISP_FE_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS0_ISP_FE_RUNNING (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_CORE_ID
+*/
+#define RGX_CR_CORE_ID (0x0020U)
+#define RGX_CR_CORE_ID_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_CORE_ID_BRANCH_ID_SHIFT (48U)
+#define RGX_CR_CORE_ID_BRANCH_ID_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_CORE_ID_VERSION_ID_SHIFT (32U)
+#define RGX_CR_CORE_ID_VERSION_ID_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF))
+#define RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT (16U)
+#define RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF))
+#define RGX_CR_CORE_ID_CONFIG_ID_SHIFT (0U)
+#define RGX_CR_CORE_ID_CONFIG_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_SPU_ENABLE
+*/
+#define RGX_CR_SPU_ENABLE (0x0050U)
+#define RGX_CR_SPU_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SPU_ENABLE_ENABLE_SHIFT (0U)
+#define RGX_CR_SPU_ENABLE_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_SOC_TIMER_GRAY
+*/
+#define RGX_CR_SOC_TIMER_GRAY (0x00E0U)
+#define RGX_CR_SOC_TIMER_GRAY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SOC_TIMER_GRAY_VALUE_SHIFT (0U)
+#define RGX_CR_SOC_TIMER_GRAY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SOC_TIMER_BINARY
+*/
+#define RGX_CR_SOC_TIMER_BINARY (0x00E8U)
+#define RGX_CR_SOC_TIMER_BINARY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SOC_TIMER_BINARY_VALUE_SHIFT (0U)
+#define RGX_CR_SOC_TIMER_BINARY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_CLK_CTRL1
+*/
+#define RGX_CR_CLK_CTRL1 (0x0080U)
+#define RGX_CR_CLK_CTRL1_MASKFULL (IMG_UINT64_C(0xFFFC3F3FFFCFFFFF))
+#define RGX_CR_CLK_CTRL1_BSC_SHIFT (62U)
+#define RGX_CR_CLK_CTRL1_BSC_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL1_BSC_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_BSC_ON (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_CLK_CTRL1_BSC_AUTO (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_SHIFT (60U)
+#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_ON (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_SAP_AUTO (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_SHIFT (58U)
+#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_ON (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_PAP_CMN_AUTO (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_SHIFT (56U)
+#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_ON (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_TPX_AUTO (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_SHIFT (54U)
+#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_ON (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_EMIPSB_AUTO (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_CLK_CTRL1_PSB_SHIFT (52U)
+#define RGX_CR_CLK_CTRL1_PSB_CLRMSK (IMG_UINT64_C(0xFFCFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL1_PSB_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_PSB_ON (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_CLK_CTRL1_PSB_AUTO (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_SHIFT (50U)
+#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_ON (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_CLK_CTRL1_TPU_USC_SELECT_AUTO (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_CLK_CTRL1_CDM_PIPE_SHIFT (44U)
+#define RGX_CR_CLK_CTRL1_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFCFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL1_CDM_PIPE_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_CDM_PIPE_ON (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_CLK_CTRL1_CDM_PIPE_AUTO (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_SHIFT (42U)
+#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFFF3FFFFFFFFFF))
+#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_ON (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_CLK_CTRL1_USC_L2ICACHE_AUTO (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_CLK_CTRL1_TCU_L1_SHIFT (40U)
+#define RGX_CR_CLK_CTRL1_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL1_TCU_L1_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_TCU_L1_ON (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_CLK_CTRL1_TCU_L1_AUTO (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_CLK_CTRL1_TDM_SHIFT (36U)
+#define RGX_CR_CLK_CTRL1_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF))
+#define RGX_CR_CLK_CTRL1_TDM_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_TDM_ON (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_CLK_CTRL1_TDM_AUTO (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_CLK_CTRL1_ASTC_SHIFT (34U)
+#define RGX_CR_CLK_CTRL1_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFF3FFFFFFFF))
+#define RGX_CR_CLK_CTRL1_ASTC_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_ASTC_ON (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_CLK_CTRL1_ASTC_AUTO (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_CLK_CTRL1_IPF_SHIFT (32U)
+#define RGX_CR_CLK_CTRL1_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF))
+#define RGX_CR_CLK_CTRL1_IPF_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_IPF_ON (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_CTRL1_IPF_AUTO (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_CLK_CTRL1_COMPUTE_SHIFT (30U)
+#define RGX_CR_CLK_CTRL1_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF))
+#define RGX_CR_CLK_CTRL1_COMPUTE_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_COMPUTE_ON (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CLK_CTRL1_COMPUTE_AUTO (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_CTRL1_PIXEL_SHIFT (28U)
+#define RGX_CR_CLK_CTRL1_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_CTRL1_PIXEL_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_PIXEL_ON (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_CTRL1_PIXEL_AUTO (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_CTRL1_VERTEX_SHIFT (26U)
+#define RGX_CR_CLK_CTRL1_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_CTRL1_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_VERTEX_ON (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_CTRL1_VERTEX_AUTO (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_CTRL1_TPF_SHIFT (24U)
+#define RGX_CR_CLK_CTRL1_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_CTRL1_TPF_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_TPF_ON (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_CTRL1_TPF_AUTO (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_CTRL1_GEO_VERTEX_SHIFT (22U)
+#define RGX_CR_CLK_CTRL1_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF))
+#define RGX_CR_CLK_CTRL1_GEO_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_GEO_VERTEX_ON (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CLK_CTRL1_GEO_VERTEX_AUTO (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_CTRL1_GEO_SHARED_SHIFT (18U)
+#define RGX_CR_CLK_CTRL1_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_CTRL1_GEO_SHARED_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_GEO_SHARED_ON (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_CTRL1_GEO_SHARED_AUTO (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_CTRL1_GEO_TESS_SHIFT (16U)
+#define RGX_CR_CLK_CTRL1_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_CTRL1_GEO_TESS_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_GEO_TESS_ON (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_CTRL1_GEO_TESS_AUTO (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_SHIFT (14U)
+#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_ON (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_SMP_AUTO (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_SHIFT (12U)
+#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_ON (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_DMA_AUTO (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_SHIFT (10U)
+#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_ON (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_TAP_AUTO (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_SHIFT (8U)
+#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_ON (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_AP_AUTO (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_SHIFT (6U)
+#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_ON (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_EMI_AUTO (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_SHIFT (4U)
+#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_ON (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_ITR_AUTO (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_SHIFT (2U)
+#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_ON (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_CPX_AUTO (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_SHIFT (0U)
+#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_OFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_ON (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL1_USC_PIPE_MOV_AUTO (IMG_UINT64_C(0x0000000000000002))
+
+
+/*
+ Register RGX_CR_CLK_STATUS1
+*/
+#define RGX_CR_CLK_STATUS1 (0x0088U)
+#define RGX_CR_CLK_STATUS1_MASKFULL (IMG_UINT64_C(0x00000003FFFE77FB))
+#define RGX_CR_CLK_STATUS1_TILING_SHIFT (33U)
+#define RGX_CR_CLK_STATUS1_TILING_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF))
+#define RGX_CR_CLK_STATUS1_TILING_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_TILING_RUNNING (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_CLK_STATUS1_GEO_VDM_SHIFT (32U)
+#define RGX_CR_CLK_STATUS1_GEO_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_CLK_STATUS1_GEO_VDM_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_GEO_VDM_RUNNING (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_SHIFT (31U)
+#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_SAP_RUNNING (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_SHIFT (30U)
+#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_PAP_CMN_RUNNING (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_SHIFT (29U)
+#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_TPX_RUNNING (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_SHIFT (28U)
+#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_EMIPSB_RUNNING (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_STATUS1_BSC_SHIFT (27U)
+#define RGX_CR_CLK_STATUS1_BSC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_CLK_STATUS1_BSC_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_BSC_RUNNING (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_STATUS1_PSB_SHIFT (26U)
+#define RGX_CR_CLK_STATUS1_PSB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_CLK_STATUS1_PSB_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_PSB_RUNNING (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_SHIFT (25U)
+#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_TPU_USC_SELECT_RUNNING (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_SHIFT (24U)
+#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_SMP_RUNNING (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_SHIFT (23U)
+#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_DMA_RUNNING (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_SHIFT (22U)
+#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_TAP_RUNNING (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_SHIFT (21U)
+#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_AP_RUNNING (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_SHIFT (20U)
+#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_EMI_RUNNING (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_SHIFT (19U)
+#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_ITR_RUNNING (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_SHIFT (18U)
+#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_CPX_RUNNING (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_SHIFT (17U)
+#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_PIPE_MOV_RUNNING (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_CLK_STATUS1_CDM_PIPE_SHIFT (14U)
+#define RGX_CR_CLK_STATUS1_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_CLK_STATUS1_CDM_PIPE_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_CDM_PIPE_RUNNING (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_STATUS1_TCU_L1_SHIFT (13U)
+#define RGX_CR_CLK_STATUS1_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_CLK_STATUS1_TCU_L1_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_TCU_L1_RUNNING (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_SHIFT (12U)
+#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_USC_L2ICACHE_RUNNING (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_STATUS1_TDM_SHIFT (10U)
+#define RGX_CR_CLK_STATUS1_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_STATUS1_TDM_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_TDM_RUNNING (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_STATUS1_IPF_SHIFT (9U)
+#define RGX_CR_CLK_STATUS1_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_STATUS1_IPF_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_IPF_RUNNING (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_STATUS1_COMPUTE_SHIFT (8U)
+#define RGX_CR_CLK_STATUS1_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_STATUS1_COMPUTE_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_COMPUTE_RUNNING (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_STATUS1_ASTC_SHIFT (7U)
+#define RGX_CR_CLK_STATUS1_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_STATUS1_ASTC_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_ASTC_RUNNING (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_STATUS1_PIXEL_SHIFT (6U)
+#define RGX_CR_CLK_STATUS1_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_STATUS1_PIXEL_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_PIXEL_RUNNING (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_STATUS1_VERTEX_SHIFT (5U)
+#define RGX_CR_CLK_STATUS1_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_STATUS1_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_STATUS1_TPF_SHIFT (4U)
+#define RGX_CR_CLK_STATUS1_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS1_TPF_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_TPF_RUNNING (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS1_GEO_SHARED_SHIFT (3U)
+#define RGX_CR_CLK_STATUS1_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_STATUS1_GEO_SHARED_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_GEO_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_STATUS1_GEO_VERTEX_SHIFT (1U)
+#define RGX_CR_CLK_STATUS1_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_STATUS1_GEO_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_GEO_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_STATUS1_GEO_TESS_SHIFT (0U)
+#define RGX_CR_CLK_STATUS1_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS1_GEO_TESS_GATED (IMG_UINT64_C(0x0000000000000000))
+#define RGX_CR_CLK_STATUS1_GEO_TESS_RUNNING (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_SOFT_RESET
+*/
+#define RGX_CR_SOFT_RESET (0x0100U)
+#define RGX_CR_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x0FFFFFE0007BDEFF))
+#define RGX_CR_SOFT_RESET_TILING_SHIFT (59U)
+#define RGX_CR_SOFT_RESET_TILING_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TILING_EN (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_SOFT_RESET_GEO_VDM_SHIFT (58U)
+#define RGX_CR_SOFT_RESET_GEO_VDM_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_GEO_VDM_EN (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_SOFT_RESET_RAC_SHIFT (57U)
+#define RGX_CR_SOFT_RESET_RAC_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_RAC_EN (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_SOFT_RESET_GEO_TESS_SHIFT (56U)
+#define RGX_CR_SOFT_RESET_GEO_TESS_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_GEO_TESS_EN (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_SOFT_RESET_INT_SHIFT (55U)
+#define RGX_CR_SOFT_RESET_INT_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_INT_EN (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_SOFT_RESET_FP_SHIFT (54U)
+#define RGX_CR_SOFT_RESET_FP_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FP_EN (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_SOFT_RESET_YUV_SHIFT (53U)
+#define RGX_CR_SOFT_RESET_YUV_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_YUV_EN (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_SOFT_RESET_PSB_SHIFT (52U)
+#define RGX_CR_SOFT_RESET_PSB_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PSB_EN (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_SOFT_RESET_ASC_SHIFT (51U)
+#define RGX_CR_SOFT_RESET_ASC_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_ASC_EN (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_SOFT_RESET_RCE_SHIFT (50U)
+#define RGX_CR_SOFT_RESET_RCE_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_RCE_EN (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_SOFT_RESET_BSC_SHIFT (49U)
+#define RGX_CR_SOFT_RESET_BSC_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BSC_EN (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_SHIFT (48U)
+#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TPU_USC_SELECT_EN (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_SOFT_RESET_USC_L2ICACHE_SHIFT (47U)
+#define RGX_CR_SOFT_RESET_USC_L2ICACHE_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_USC_L2ICACHE_EN (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_SOFT_RESET_TCU_L1_SHIFT (46U)
+#define RGX_CR_SOFT_RESET_TCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TCU_L1_EN (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT (45U)
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_SOFT_RESET_BIF_JONES_SHIFT (44U)
+#define RGX_CR_SOFT_RESET_BIF_JONES_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF_JONES_EN (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_SOFT_RESET_SLC_SHIFT (43U)
+#define RGX_CR_SOFT_RESET_SLC_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_SLC_EN (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_SOFT_RESET_FBCACHE_SHIFT (42U)
+#define RGX_CR_SOFT_RESET_FBCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FBCACHE_EN (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_SOFT_RESET_FBM_SHIFT (41U)
+#define RGX_CR_SOFT_RESET_FBM_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FBM_EN (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_SOFT_RESET_FBDC_SHIFT (40U)
+#define RGX_CR_SOFT_RESET_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FBDC_EN (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_SOFT_RESET_FBC_SHIFT (39U)
+#define RGX_CR_SOFT_RESET_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FBC_EN (IMG_UINT64_C(0x0000008000000000))
+#define RGX_CR_SOFT_RESET_PM_SHIFT (38U)
+#define RGX_CR_SOFT_RESET_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PM_EN (IMG_UINT64_C(0x0000004000000000))
+#define RGX_CR_SOFT_RESET_GARTEN_SHIFT (37U)
+#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_GARTEN_EN (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_SOFT_RESET_TAG_SHIFT (22U)
+#define RGX_CR_SOFT_RESET_TAG_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_SOFT_RESET_TAG_EN (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_SOFT_RESET_TF_SHIFT (21U)
+#define RGX_CR_SOFT_RESET_TF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_SOFT_RESET_TF_EN (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SOFT_RESET_MADD_SHIFT (20U)
+#define RGX_CR_SOFT_RESET_MADD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_SOFT_RESET_MADD_EN (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_SOFT_RESET_PBE_SHIFT (19U)
+#define RGX_CR_SOFT_RESET_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_SOFT_RESET_PBE_EN (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT (17U)
+#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_SOFT_RESET_MCU_L1_EN (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_SOFT_RESET_CDM_PIPE_SHIFT (16U)
+#define RGX_CR_SOFT_RESET_CDM_PIPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_SOFT_RESET_CDM_PIPE_EN (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_SOFT_RESET_TDM_SHIFT (15U)
+#define RGX_CR_SOFT_RESET_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_SOFT_RESET_TDM_EN (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_SOFT_RESET_ASTC_SHIFT (14U)
+#define RGX_CR_SOFT_RESET_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_SOFT_RESET_ASTC_EN (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_SOFT_RESET_PDS_SHIFT (12U)
+#define RGX_CR_SOFT_RESET_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_SOFT_RESET_PDS_EN (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_SOFT_RESET_ISP_SHIFT (11U)
+#define RGX_CR_SOFT_RESET_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_SOFT_RESET_ISP_EN (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_SOFT_RESET_TPF_SHIFT (10U)
+#define RGX_CR_SOFT_RESET_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_SOFT_RESET_TPF_EN (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_SOFT_RESET_IPF_SHIFT (9U)
+#define RGX_CR_SOFT_RESET_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_SOFT_RESET_IPF_EN (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_SOFT_RESET_GEO_SHARED_SHIFT (7U)
+#define RGX_CR_SOFT_RESET_GEO_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_SOFT_RESET_GEO_SHARED_EN (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_SOFT_RESET_GEO_VERTEX_SHIFT (6U)
+#define RGX_CR_SOFT_RESET_GEO_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_SOFT_RESET_GEO_VERTEX_EN (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_SOFT_RESET_PIXEL_SHIFT (5U)
+#define RGX_CR_SOFT_RESET_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_SOFT_RESET_PIXEL_EN (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_SOFT_RESET_COMPUTE_SHIFT (4U)
+#define RGX_CR_SOFT_RESET_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_SOFT_RESET_COMPUTE_EN (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT (3U)
+#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_SOFT_RESET_MCU_L0_EN (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_SOFT_RESET_TPU_SHIFT (2U)
+#define RGX_CR_SOFT_RESET_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_SOFT_RESET_TPU_EN (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_SOFT_RESET_VERTEX_SHIFT (1U)
+#define RGX_CR_SOFT_RESET_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_SOFT_RESET_VERTEX_EN (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_SOFT_RESET_USC_SHIFT (0U)
+#define RGX_CR_SOFT_RESET_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_SOFT_RESET_USC_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_SOFT_RESET_SPU
+*/
+#define RGX_CR_SOFT_RESET_SPU (0x0108U)
+#define RGX_CR_SOFT_RESET_SPU_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SOFT_RESET_SPU_SPU31_SHIFT (31U)
+#define RGX_CR_SOFT_RESET_SPU_SPU31_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU31_EN (0x80000000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU30_SHIFT (30U)
+#define RGX_CR_SOFT_RESET_SPU_SPU30_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU30_EN (0x40000000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU29_SHIFT (29U)
+#define RGX_CR_SOFT_RESET_SPU_SPU29_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU29_EN (0x20000000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU28_SHIFT (28U)
+#define RGX_CR_SOFT_RESET_SPU_SPU28_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU28_EN (0x10000000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU27_SHIFT (27U)
+#define RGX_CR_SOFT_RESET_SPU_SPU27_CLRMSK (0xF7FFFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU27_EN (0x08000000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU26_SHIFT (26U)
+#define RGX_CR_SOFT_RESET_SPU_SPU26_CLRMSK (0xFBFFFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU26_EN (0x04000000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU25_SHIFT (25U)
+#define RGX_CR_SOFT_RESET_SPU_SPU25_CLRMSK (0xFDFFFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU25_EN (0x02000000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU24_SHIFT (24U)
+#define RGX_CR_SOFT_RESET_SPU_SPU24_CLRMSK (0xFEFFFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU24_EN (0x01000000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU23_SHIFT (23U)
+#define RGX_CR_SOFT_RESET_SPU_SPU23_CLRMSK (0xFF7FFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU23_EN (0x00800000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU22_SHIFT (22U)
+#define RGX_CR_SOFT_RESET_SPU_SPU22_CLRMSK (0xFFBFFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU22_EN (0x00400000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU21_SHIFT (21U)
+#define RGX_CR_SOFT_RESET_SPU_SPU21_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU21_EN (0x00200000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU20_SHIFT (20U)
+#define RGX_CR_SOFT_RESET_SPU_SPU20_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU20_EN (0x00100000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU19_SHIFT (19U)
+#define RGX_CR_SOFT_RESET_SPU_SPU19_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU19_EN (0x00080000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU18_SHIFT (18U)
+#define RGX_CR_SOFT_RESET_SPU_SPU18_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU18_EN (0x00040000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU17_SHIFT (17U)
+#define RGX_CR_SOFT_RESET_SPU_SPU17_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU17_EN (0x00020000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU16_SHIFT (16U)
+#define RGX_CR_SOFT_RESET_SPU_SPU16_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU16_EN (0x00010000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU15_SHIFT (15U)
+#define RGX_CR_SOFT_RESET_SPU_SPU15_CLRMSK (0xFFFF7FFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU15_EN (0x00008000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU14_SHIFT (14U)
+#define RGX_CR_SOFT_RESET_SPU_SPU14_CLRMSK (0xFFFFBFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU14_EN (0x00004000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU13_SHIFT (13U)
+#define RGX_CR_SOFT_RESET_SPU_SPU13_CLRMSK (0xFFFFDFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU13_EN (0x00002000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU12_SHIFT (12U)
+#define RGX_CR_SOFT_RESET_SPU_SPU12_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU12_EN (0x00001000U)
+#define RGX_CR_SOFT_RESET_SPU_SPU11_SHIFT (11U)
+#define RGX_CR_SOFT_RESET_SPU_SPU11_CLRMSK (0xFFFFF7FFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU11_EN (0x00000800U)
+#define RGX_CR_SOFT_RESET_SPU_SPU10_SHIFT (10U)
+#define RGX_CR_SOFT_RESET_SPU_SPU10_CLRMSK (0xFFFFFBFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU10_EN (0x00000400U)
+#define RGX_CR_SOFT_RESET_SPU_SPU9_SHIFT (9U)
+#define RGX_CR_SOFT_RESET_SPU_SPU9_CLRMSK (0xFFFFFDFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU9_EN (0x00000200U)
+#define RGX_CR_SOFT_RESET_SPU_SPU8_SHIFT (8U)
+#define RGX_CR_SOFT_RESET_SPU_SPU8_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU8_EN (0x00000100U)
+#define RGX_CR_SOFT_RESET_SPU_SPU7_SHIFT (7U)
+#define RGX_CR_SOFT_RESET_SPU_SPU7_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_SOFT_RESET_SPU_SPU7_EN (0x00000080U)
+#define RGX_CR_SOFT_RESET_SPU_SPU6_SHIFT (6U)
+#define RGX_CR_SOFT_RESET_SPU_SPU6_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU6_EN (0x00000040U)
+#define RGX_CR_SOFT_RESET_SPU_SPU5_SHIFT (5U)
+#define RGX_CR_SOFT_RESET_SPU_SPU5_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU5_EN (0x00000020U)
+#define RGX_CR_SOFT_RESET_SPU_SPU4_SHIFT (4U)
+#define RGX_CR_SOFT_RESET_SPU_SPU4_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_SOFT_RESET_SPU_SPU4_EN (0x00000010U)
+#define RGX_CR_SOFT_RESET_SPU_SPU3_SHIFT (3U)
+#define RGX_CR_SOFT_RESET_SPU_SPU3_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_SOFT_RESET_SPU_SPU3_EN (0x00000008U)
+#define RGX_CR_SOFT_RESET_SPU_SPU2_SHIFT (2U)
+#define RGX_CR_SOFT_RESET_SPU_SPU2_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_SOFT_RESET_SPU_SPU2_EN (0x00000004U)
+#define RGX_CR_SOFT_RESET_SPU_SPU1_SHIFT (1U)
+#define RGX_CR_SOFT_RESET_SPU_SPU1_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_SOFT_RESET_SPU_SPU1_EN (0x00000002U)
+#define RGX_CR_SOFT_RESET_SPU_SPU0_SHIFT (0U)
+#define RGX_CR_SOFT_RESET_SPU_SPU0_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_SOFT_RESET_SPU_SPU0_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_MULTICORE_EVENT_REDUCE
+*/
+#define RGX_CR_MULTICORE_EVENT_REDUCE (0x2428U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__MASKFULL (IMG_UINT64_C(0x00000000FC3FFFFF))
+#define RGX_CR_MULTICORE_EVENT_REDUCE_MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF))
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_FINISHED_SHIFT (31U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_FINISHED_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_FINISHED_EN (0x80000000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TWO_D_DONE1_SHIFT (31U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TWO_D_DONE1_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TWO_D_DONE1_EN (0x80000000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BUFFER_STALL_SHIFT (30U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BUFFER_STALL_EN (0x40000000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TWO_D_DONE0_SHIFT (30U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TWO_D_DONE0_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TWO_D_DONE0_EN (0x40000000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_SIGNAL_FAILURE_SHIFT (29U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_RAY_DONE1_SHIFT (29U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_RAY_DONE1_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_RAY_DONE1_EN (0x20000000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_FINISHED_SHIFT (28U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_FINISHED_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_FINISHED_EN (0x10000000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_RAY_DONE0_SHIFT (28U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_RAY_DONE0_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_RAY_DONE0_EN (0x10000000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_BARRIER_HIT_SHIFT (27U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_RCE_BARRIER_HIT_EN (0x08000000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BARRIER_HIT_SHIFT (26U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BARRIER_HIT_CLRMSK (0xFBFFFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TDM_BARRIER_HIT_EN (0x04000000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_BE_DONE1_SHIFT (21U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_BE_DONE1_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_BE_DONE1_EN (0x00200000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_BE_DONE0_SHIFT (20U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_BE_DONE0_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_BE_DONE0_EN (0x00100000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_WDT_TIMEOUT_SHIFT (19U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_WDT_TIMEOUT_EN (0x00080000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__IPP_END_RENDER_SENT_SHIFT (19U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__IPP_END_RENDER_SENT_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__IPP_END_RENDER_SENT_EN (0x00080000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_CDM_BARRIER_HIT_SHIFT (18U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_CDM_BARRIER_HIT_EN (0x00040000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__WDT_TIMEOUT_SHIFT (18U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__WDT_TIMEOUT_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__WDT_TIMEOUT_EN (0x00040000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_SPFILTER_SIGNAL_UPDATE_SHIFT (17U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__CDM_BARRIER_HIT_SHIFT (17U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__CDM_BARRIER_HIT_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__CDM_BARRIER_HIT_EN (0x00020000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_BUFFER_STALL_SHIFT (16U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_BUFFER_STALL_EN (0x00010000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__SPFILTER_SIGNAL_UPDATE_SHIFT (16U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__SPFILTER_SIGNAL_UPDATE_EN (0x00010000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_USC_TRIGGER_SHIFT (15U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_USC_TRIGGER_CLRMSK (0xFFFF7FFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_USC_TRIGGER_EN (0x00008000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_FW_SHIFT (14U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_FW_CLRMSK (0xFFFFBFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_FW_EN (0x00004000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_ACK_SHIFT (13U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_ACK_CLRMSK (0xFFFFDFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_ACK_EN (0x00002000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_REQ_SHIFT (12U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_REQ_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_GPIO_REQ_EN (0x00001000U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_ABORT_SHIFT (11U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_ABORT_CLRMSK (0xFFFFF7FFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_ABORT_EN (0x00000800U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_COMPLETE_SHIFT (10U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_POWER_COMPLETE_EN (0x00000400U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_MMU_PAGE_FAULT_SHIFT (9U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_MMU_PAGE_FAULT_EN (0x00000200U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_FRAG_DONE_SHIFT (8U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_FRAG_DONE_EN (0x00000100U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_PM_BE_DONE1_SHIFT (8U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_PM_BE_DONE1_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_PM_BE_DONE1_EN (0x00000100U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_OUT_OF_MEMORY_SHIFT (7U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_PM_OUT_OF_MEMORY_EN (0x00000080U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_PM_BE_DONE0_SHIFT (7U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_PM_BE_DONE0_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FRAG_PM_BE_DONE0_EN (0x00000080U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_TERMINATE_SHIFT (6U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_TERMINATE_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_TERMINATE_EN (0x00000040U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__PM_OUT_OF_MEMORY_SHIFT (6U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__PM_OUT_OF_MEMORY_EN (0x00000040U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_FINISHED_SHIFT (5U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_FINISHED_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TA_FINISHED_EN (0x00000020U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__TA_TERMINATE_SHIFT (5U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__TA_TERMINATE_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE__PDM_GT0__TA_TERMINATE_EN (0x00000020U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_IPP_END_RENDER_SENT_SHIFT (4U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_IPP_END_RENDER_SENT_EN (0x00000010U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_GEOM_DONE1_SHIFT (4U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_GEOM_DONE1_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_GEOM_DONE1_EN (0x00000010U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_ISP_END_RENDER_SHIFT (3U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_ISP_END_RENDER_EN (0x00000008U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_GEOM_DONE0_SHIFT (3U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_GEOM_DONE0_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_GEOM_DONE0_EN (0x00000008U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_FINISHED_SHIFT (2U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_FINISHED_EN (0x00000004U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_DONE1_SHIFT (2U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_DONE1_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_DONE1_EN (0x00000004U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TE_END_SHIFT (1U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TE_END_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_TE_END_EN (0x00000002U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_DONE0_SHIFT (1U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_DONE0_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_COMPUTE_DONE0_EN (0x00000002U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_GPU_SHIFT (0U)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_GPU_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_GPU_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON
+*/
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON (0x24B0U)
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U)
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU)
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_EXECUTE_COUNT_SHIFT (8U)
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_EXECUTE_COUNT_CLRMSK (0xC00000FFU)
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT (0U)
+#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MULTICORE_TDM_CTRL_COMMON
+*/
+#define RGX_CR_MULTICORE_TDM_CTRL_COMMON (0x24B8U)
+#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U)
+#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU)
+#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_EXECUTE_COUNT_SHIFT (8U)
+#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_EXECUTE_COUNT_CLRMSK (0xC00000FFU)
+#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_GPU_ENABLE_SHIFT (0U)
+#define RGX_CR_MULTICORE_TDM_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON
+*/
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON (0x24C0U)
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U)
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU)
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_EXECUTE_COUNT_SHIFT (8U)
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_EXECUTE_COUNT_CLRMSK (0xC00000FFU)
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT (0U)
+#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MULTICORE_BROADCAST
+*/
+#define RGX_CR_MULTICORE_BROADCAST (0x24E0U)
+#define RGX_CR_MULTICORE_BROADCAST_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MULTICORE_BROADCAST_PROGRAMMER_MASK_SHIFT (8U)
+#define RGX_CR_MULTICORE_BROADCAST_PROGRAMMER_MASK_CLRMSK (0xFFFF00FFU)
+#define RGX_CR_MULTICORE_BROADCAST_FIRMWARE_MASK_SHIFT (0U)
+#define RGX_CR_MULTICORE_BROADCAST_FIRMWARE_MASK_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MULTICORE
+*/
+#define RGX_CR_MULTICORE (0x24E8U)
+#define RGX_CR_MULTICORE_MASKFULL (IMG_UINT64_C(0x00000007FFFFFFFF))
+#define RGX_CR_MULTICORE_DOMAIN_GPU_MASK_SHIFT (27U)
+#define RGX_CR_MULTICORE_DOMAIN_GPU_MASK_CLRMSK (IMG_UINT64_C(0xFFFFFFF807FFFFFF))
+#define RGX_CR_MULTICORE_PRIMARY_CORE_ID_SHIFT (3U)
+#define RGX_CR_MULTICORE_PRIMARY_CORE_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8000007))
+#define RGX_CR_MULTICORE_ID_SHIFT (0U)
+#define RGX_CR_MULTICORE_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF8))
+
+
+/*
+ Register RGX_CR_MULTICORE_SYSTEM
+*/
+#define RGX_CR_MULTICORE_SYSTEM (0x24F0U)
+#define RGX_CR_MULTICORE_SYSTEM_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT (0U)
+#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK (0xFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_MULTICORE_DOMAIN
+*/
+#define RGX_CR_MULTICORE_DOMAIN (0x24F8U)
+#define RGX_CR_MULTICORE_DOMAIN_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_SHIFT (0U)
+#define RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_CLRMSK (0xFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_EVENT_ENABLE
+*/
+#define RGX_CR_EVENT_ENABLE (0x0128U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_V1__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF))
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__MASKFULL (IMG_UINT64_C(0x00000000FC3FFFFF))
+#define RGX_CR_EVENT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_SHIFT (31U)
+#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_TDM_FENCE_FINISHED_EN (0x80000000U)
+#define RGX_CR_EVENT_ENABLE_TDM_FINISHED_SHIFT (31U)
+#define RGX_CR_EVENT_ENABLE_TDM_FINISHED_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_TDM_FINISHED_EN (0x80000000U)
+#define RGX_CR_EVENT_ENABLE_TWO_D_DONE1_SHIFT (31U)
+#define RGX_CR_EVENT_ENABLE_TWO_D_DONE1_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_TWO_D_DONE1_EN (0x80000000U)
+#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_SHIFT (30U)
+#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_TDM_BUFFER_STALL_EN (0x40000000U)
+#define RGX_CR_EVENT_ENABLE_TWO_D_DONE0_SHIFT (30U)
+#define RGX_CR_EVENT_ENABLE_TWO_D_DONE0_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_TWO_D_DONE0_EN (0x40000000U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_SHIFT (29U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U)
+#define RGX_CR_EVENT_ENABLE_RAY_DONE1_SHIFT (29U)
+#define RGX_CR_EVENT_ENABLE_RAY_DONE1_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_RAY_DONE1_EN (0x20000000U)
+#define RGX_CR_EVENT_ENABLE_RCE_FINISHED_SHIFT (28U)
+#define RGX_CR_EVENT_ENABLE_RCE_FINISHED_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_RCE_FINISHED_EN (0x10000000U)
+#define RGX_CR_EVENT_ENABLE_RAY_DONE0_SHIFT (28U)
+#define RGX_CR_EVENT_ENABLE_RAY_DONE0_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_RAY_DONE0_EN (0x10000000U)
+#define RGX_CR_EVENT_ENABLE_RCE_BARRIER_HIT_SHIFT (27U)
+#define RGX_CR_EVENT_ENABLE_RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU)
+#define RGX_CR_EVENT_ENABLE_RCE_BARRIER_HIT_EN (0x08000000U)
+#define RGX_CR_EVENT_ENABLE_TDM_BARRIER_HIT_SHIFT (26U)
+#define RGX_CR_EVENT_ENABLE_TDM_BARRIER_HIT_CLRMSK (0xFBFFFFFFU)
+#define RGX_CR_EVENT_ENABLE_TDM_BARRIER_HIT_EN (0x04000000U)
+#define RGX_CR_EVENT_ENABLE_FRAG_BE_DONE1_SHIFT (21U)
+#define RGX_CR_EVENT_ENABLE_FRAG_BE_DONE1_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_EVENT_ENABLE_FRAG_BE_DONE1_EN (0x00200000U)
+#define RGX_CR_EVENT_ENABLE_FRAG_BE_DONE0_SHIFT (20U)
+#define RGX_CR_EVENT_ENABLE_FRAG_BE_DONE0_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_EVENT_ENABLE_FRAG_BE_DONE0_EN (0x00100000U)
+#define RGX_CR_EVENT_ENABLE_WDT_TIMEOUT_SHIFT (19U)
+#define RGX_CR_EVENT_ENABLE_WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_EVENT_ENABLE_WDT_TIMEOUT_EN (0x00080000U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_SHIFT (19U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_EN (0x00080000U)
+#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_ENABLE_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U)
+#define RGX_CR_EVENT_ENABLE_CDM_BARRIER_HIT_SHIFT (18U)
+#define RGX_CR_EVENT_ENABLE_CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_ENABLE_CDM_BARRIER_HIT_EN (0x00040000U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_SHIFT (18U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_EN (0x00040000U)
+#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_SHIFT (17U)
+#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_EVENT_ENABLE_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_SHIFT (17U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_EN (0x00020000U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_SHIFT (16U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_BUFFER_STALL_EN (0x00010000U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_SHIFT (16U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_EN (0x00010000U)
+#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_SHIFT (15U)
+#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_CLRMSK (0xFFFF7FFFU)
+#define RGX_CR_EVENT_ENABLE_USC_TRIGGER_EN (0x00008000U)
+#define RGX_CR_EVENT_ENABLE_FAULT_FW_SHIFT (14U)
+#define RGX_CR_EVENT_ENABLE_FAULT_FW_CLRMSK (0xFFFFBFFFU)
+#define RGX_CR_EVENT_ENABLE_FAULT_FW_EN (0x00004000U)
+#define RGX_CR_EVENT_ENABLE_GPIO_ACK_SHIFT (13U)
+#define RGX_CR_EVENT_ENABLE_GPIO_ACK_CLRMSK (0xFFFFDFFFU)
+#define RGX_CR_EVENT_ENABLE_GPIO_ACK_EN (0x00002000U)
+#define RGX_CR_EVENT_ENABLE_GPIO_REQ_SHIFT (12U)
+#define RGX_CR_EVENT_ENABLE_GPIO_REQ_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_EVENT_ENABLE_GPIO_REQ_EN (0x00001000U)
+#define RGX_CR_EVENT_ENABLE_POWER_ABORT_SHIFT (11U)
+#define RGX_CR_EVENT_ENABLE_POWER_ABORT_CLRMSK (0xFFFFF7FFU)
+#define RGX_CR_EVENT_ENABLE_POWER_ABORT_EN (0x00000800U)
+#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_SHIFT (10U)
+#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU)
+#define RGX_CR_EVENT_ENABLE_POWER_COMPLETE_EN (0x00000400U)
+#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_SHIFT (9U)
+#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU)
+#define RGX_CR_EVENT_ENABLE_MMU_PAGE_FAULT_EN (0x00000200U)
+#define RGX_CR_EVENT_ENABLE_PM_FRAG_DONE_SHIFT (8U)
+#define RGX_CR_EVENT_ENABLE_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_EVENT_ENABLE_PM_FRAG_DONE_EN (0x00000100U)
+#define RGX_CR_EVENT_ENABLE_FRAG_PM_BE_DONE1_SHIFT (8U)
+#define RGX_CR_EVENT_ENABLE_FRAG_PM_BE_DONE1_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_EVENT_ENABLE_FRAG_PM_BE_DONE1_EN (0x00000100U)
+#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_SHIFT (7U)
+#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_EVENT_ENABLE_PM_OUT_OF_MEMORY_EN (0x00000080U)
+#define RGX_CR_EVENT_ENABLE_FRAG_PM_BE_DONE0_SHIFT (7U)
+#define RGX_CR_EVENT_ENABLE_FRAG_PM_BE_DONE0_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_EVENT_ENABLE_FRAG_PM_BE_DONE0_EN (0x00000080U)
+#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_SHIFT (6U)
+#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_EVENT_ENABLE_TA_TERMINATE_EN (0x00000040U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_SHIFT (6U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_EN (0x00000040U)
+#define RGX_CR_EVENT_ENABLE_TA_FINISHED_SHIFT (5U)
+#define RGX_CR_EVENT_ENABLE_TA_FINISHED_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_EVENT_ENABLE_TA_FINISHED_EN (0x00000020U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_SHIFT (5U)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_EVENT_ENABLE__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_EN (0x00000020U)
+#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_SHIFT (4U)
+#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_EVENT_ENABLE_ISP_END_MACROTILE_EN (0x00000010U)
+#define RGX_CR_EVENT_ENABLE_IPP_END_RENDER_SENT_SHIFT (4U)
+#define RGX_CR_EVENT_ENABLE_IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_EVENT_ENABLE_IPP_END_RENDER_SENT_EN (0x00000010U)
+#define RGX_CR_EVENT_ENABLE_GEOM_DONE1_SHIFT (4U)
+#define RGX_CR_EVENT_ENABLE_GEOM_DONE1_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_EVENT_ENABLE_GEOM_DONE1_EN (0x00000010U)
+#define RGX_CR_EVENT_ENABLE_ISP_END_RENDER_SHIFT (3U)
+#define RGX_CR_EVENT_ENABLE_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_EVENT_ENABLE_ISP_END_RENDER_EN (0x00000008U)
+#define RGX_CR_EVENT_ENABLE_GEOM_DONE0_SHIFT (3U)
+#define RGX_CR_EVENT_ENABLE_GEOM_DONE0_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_EVENT_ENABLE_GEOM_DONE0_EN (0x00000008U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_SHIFT (2U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_FINISHED_EN (0x00000004U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_DONE1_SHIFT (2U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_DONE1_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_DONE1_EN (0x00000004U)
+#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_SHIFT (1U)
+#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_EVENT_ENABLE_KERNEL_FINISHED_EN (0x00000002U)
+#define RGX_CR_EVENT_ENABLE_TE_END_SHIFT (1U)
+#define RGX_CR_EVENT_ENABLE_TE_END_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_EVENT_ENABLE_TE_END_EN (0x00000002U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_DONE0_SHIFT (1U)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_DONE0_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_EVENT_ENABLE_COMPUTE_DONE0_EN (0x00000002U)
+#define RGX_CR_EVENT_ENABLE_FAULT_GPU_SHIFT (0U)
+#define RGX_CR_EVENT_ENABLE_FAULT_GPU_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_EVENT_ENABLE_FAULT_GPU_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_EVENT_STATUS
+*/
+#define RGX_CR_EVENT_STATUS (0x0130U)
+#define RGX_CR_EVENT_STATUS__ALRIF_V1__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF))
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__MASKFULL (IMG_UINT64_C(0x00000000FC3FFFFF))
+#define RGX_CR_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT (31U)
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN (0x80000000U)
+#define RGX_CR_EVENT_STATUS_TDM_FINISHED_SHIFT (31U)
+#define RGX_CR_EVENT_STATUS_TDM_FINISHED_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_FINISHED_EN (0x80000000U)
+#define RGX_CR_EVENT_STATUS_TWO_D_DONE1_SHIFT (31U)
+#define RGX_CR_EVENT_STATUS_TWO_D_DONE1_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TWO_D_DONE1_EN (0x80000000U)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT (30U)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN (0x40000000U)
+#define RGX_CR_EVENT_STATUS_TWO_D_DONE0_SHIFT (30U)
+#define RGX_CR_EVENT_STATUS_TWO_D_DONE0_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TWO_D_DONE0_EN (0x40000000U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT (29U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U)
+#define RGX_CR_EVENT_STATUS_RAY_DONE1_SHIFT (29U)
+#define RGX_CR_EVENT_STATUS_RAY_DONE1_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_RAY_DONE1_EN (0x20000000U)
+#define RGX_CR_EVENT_STATUS_RCE_FINISHED_SHIFT (28U)
+#define RGX_CR_EVENT_STATUS_RCE_FINISHED_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_RCE_FINISHED_EN (0x10000000U)
+#define RGX_CR_EVENT_STATUS_RAY_DONE0_SHIFT (28U)
+#define RGX_CR_EVENT_STATUS_RAY_DONE0_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_RAY_DONE0_EN (0x10000000U)
+#define RGX_CR_EVENT_STATUS_RCE_BARRIER_HIT_SHIFT (27U)
+#define RGX_CR_EVENT_STATUS_RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU)
+#define RGX_CR_EVENT_STATUS_RCE_BARRIER_HIT_EN (0x08000000U)
+#define RGX_CR_EVENT_STATUS_TDM_BARRIER_HIT_SHIFT (26U)
+#define RGX_CR_EVENT_STATUS_TDM_BARRIER_HIT_CLRMSK (0xFBFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_BARRIER_HIT_EN (0x04000000U)
+#define RGX_CR_EVENT_STATUS_FRAG_BE_DONE1_SHIFT (21U)
+#define RGX_CR_EVENT_STATUS_FRAG_BE_DONE1_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_EVENT_STATUS_FRAG_BE_DONE1_EN (0x00200000U)
+#define RGX_CR_EVENT_STATUS_FRAG_BE_DONE0_SHIFT (20U)
+#define RGX_CR_EVENT_STATUS_FRAG_BE_DONE0_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_EVENT_STATUS_FRAG_BE_DONE0_EN (0x00100000U)
+#define RGX_CR_EVENT_STATUS_WDT_TIMEOUT_SHIFT (19U)
+#define RGX_CR_EVENT_STATUS_WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_EVENT_STATUS_WDT_TIMEOUT_EN (0x00080000U)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_SHIFT (19U)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_EN (0x00080000U)
+#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U)
+#define RGX_CR_EVENT_STATUS_CDM_BARRIER_HIT_SHIFT (18U)
+#define RGX_CR_EVENT_STATUS_CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS_CDM_BARRIER_HIT_EN (0x00040000U)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_SHIFT (18U)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_EN (0x00040000U)
+#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT (17U)
+#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_SHIFT (17U)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_EN (0x00020000U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT (16U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN (0x00010000U)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_SHIFT (16U)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_EN (0x00010000U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT (15U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK (0xFFFF7FFFU)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN (0x00008000U)
+#define RGX_CR_EVENT_STATUS_FAULT_FW_SHIFT (14U)
+#define RGX_CR_EVENT_STATUS_FAULT_FW_CLRMSK (0xFFFFBFFFU)
+#define RGX_CR_EVENT_STATUS_FAULT_FW_EN (0x00004000U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT (13U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK (0xFFFFDFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN (0x00002000U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT (12U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN (0x00001000U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT (11U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK (0xFFFFF7FFU)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN (0x00000800U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT (10U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN (0x00000400U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT (9U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN (0x00000200U)
+#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_SHIFT (8U)
+#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_EVENT_STATUS_PM_FRAG_DONE_EN (0x00000100U)
+#define RGX_CR_EVENT_STATUS_FRAG_PM_BE_DONE1_SHIFT (8U)
+#define RGX_CR_EVENT_STATUS_FRAG_PM_BE_DONE1_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_EVENT_STATUS_FRAG_PM_BE_DONE1_EN (0x00000100U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT (7U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN (0x00000080U)
+#define RGX_CR_EVENT_STATUS_FRAG_PM_BE_DONE0_SHIFT (7U)
+#define RGX_CR_EVENT_STATUS_FRAG_PM_BE_DONE0_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_EVENT_STATUS_FRAG_PM_BE_DONE0_EN (0x00000080U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT (6U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN (0x00000040U)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_SHIFT (6U)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_EN (0x00000040U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT (5U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN (0x00000020U)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_SHIFT (5U)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_EVENT_STATUS__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_EN (0x00000020U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT (4U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN (0x00000010U)
+#define RGX_CR_EVENT_STATUS_IPP_END_RENDER_SENT_SHIFT (4U)
+#define RGX_CR_EVENT_STATUS_IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_EVENT_STATUS_IPP_END_RENDER_SENT_EN (0x00000010U)
+#define RGX_CR_EVENT_STATUS_GEOM_DONE1_SHIFT (4U)
+#define RGX_CR_EVENT_STATUS_GEOM_DONE1_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_EVENT_STATUS_GEOM_DONE1_EN (0x00000010U)
+#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_SHIFT (3U)
+#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_EVENT_STATUS_ISP_END_RENDER_EN (0x00000008U)
+#define RGX_CR_EVENT_STATUS_GEOM_DONE0_SHIFT (3U)
+#define RGX_CR_EVENT_STATUS_GEOM_DONE0_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_EVENT_STATUS_GEOM_DONE0_EN (0x00000008U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT (2U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN (0x00000004U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_DONE1_SHIFT (2U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_DONE1_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_DONE1_EN (0x00000004U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT (1U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN (0x00000002U)
+#define RGX_CR_EVENT_STATUS_TE_END_SHIFT (1U)
+#define RGX_CR_EVENT_STATUS_TE_END_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_EVENT_STATUS_TE_END_EN (0x00000002U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_DONE0_SHIFT (1U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_DONE0_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_DONE0_EN (0x00000002U)
+#define RGX_CR_EVENT_STATUS_FAULT_GPU_SHIFT (0U)
+#define RGX_CR_EVENT_STATUS_FAULT_GPU_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_EVENT_STATUS_FAULT_GPU_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_EVENT_CLEAR
+*/
+#define RGX_CR_EVENT_CLEAR (0x0138U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_V1__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_EQ0__MASKFULL (IMG_UINT64_C(0x00000000F80FFFFF))
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__MASKFULL (IMG_UINT64_C(0x00000000FC3FFFFF))
+#define RGX_CR_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT (31U)
+#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN (0x80000000U)
+#define RGX_CR_EVENT_CLEAR_TDM_FINISHED_SHIFT (31U)
+#define RGX_CR_EVENT_CLEAR_TDM_FINISHED_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_TDM_FINISHED_EN (0x80000000U)
+#define RGX_CR_EVENT_CLEAR_TWO_D_DONE1_SHIFT (31U)
+#define RGX_CR_EVENT_CLEAR_TWO_D_DONE1_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_TWO_D_DONE1_EN (0x80000000U)
+#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT (30U)
+#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN (0x40000000U)
+#define RGX_CR_EVENT_CLEAR_TWO_D_DONE0_SHIFT (30U)
+#define RGX_CR_EVENT_CLEAR_TWO_D_DONE0_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_TWO_D_DONE0_EN (0x40000000U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT (29U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U)
+#define RGX_CR_EVENT_CLEAR_RAY_DONE1_SHIFT (29U)
+#define RGX_CR_EVENT_CLEAR_RAY_DONE1_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_RAY_DONE1_EN (0x20000000U)
+#define RGX_CR_EVENT_CLEAR_RCE_FINISHED_SHIFT (28U)
+#define RGX_CR_EVENT_CLEAR_RCE_FINISHED_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_RCE_FINISHED_EN (0x10000000U)
+#define RGX_CR_EVENT_CLEAR_RAY_DONE0_SHIFT (28U)
+#define RGX_CR_EVENT_CLEAR_RAY_DONE0_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_RAY_DONE0_EN (0x10000000U)
+#define RGX_CR_EVENT_CLEAR_RCE_BARRIER_HIT_SHIFT (27U)
+#define RGX_CR_EVENT_CLEAR_RCE_BARRIER_HIT_CLRMSK (0xF7FFFFFFU)
+#define RGX_CR_EVENT_CLEAR_RCE_BARRIER_HIT_EN (0x08000000U)
+#define RGX_CR_EVENT_CLEAR_TDM_BARRIER_HIT_SHIFT (26U)
+#define RGX_CR_EVENT_CLEAR_TDM_BARRIER_HIT_CLRMSK (0xFBFFFFFFU)
+#define RGX_CR_EVENT_CLEAR_TDM_BARRIER_HIT_EN (0x04000000U)
+#define RGX_CR_EVENT_CLEAR_FRAG_BE_DONE1_SHIFT (21U)
+#define RGX_CR_EVENT_CLEAR_FRAG_BE_DONE1_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_EVENT_CLEAR_FRAG_BE_DONE1_EN (0x00200000U)
+#define RGX_CR_EVENT_CLEAR_FRAG_BE_DONE0_SHIFT (20U)
+#define RGX_CR_EVENT_CLEAR_FRAG_BE_DONE0_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_EVENT_CLEAR_FRAG_BE_DONE0_EN (0x00100000U)
+#define RGX_CR_EVENT_CLEAR_WDT_TIMEOUT_SHIFT (19U)
+#define RGX_CR_EVENT_CLEAR_WDT_TIMEOUT_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_EVENT_CLEAR_WDT_TIMEOUT_EN (0x00080000U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_SHIFT (19U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__IPP_END_RENDER_SENT_EN (0x00080000U)
+#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U)
+#define RGX_CR_EVENT_CLEAR_CDM_BARRIER_HIT_SHIFT (18U)
+#define RGX_CR_EVENT_CLEAR_CDM_BARRIER_HIT_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_CLEAR_CDM_BARRIER_HIT_EN (0x00040000U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_SHIFT (18U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__WDT_TIMEOUT_EN (0x00040000U)
+#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT (17U)
+#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_SHIFT (17U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__CDM_BARRIER_HIT_EN (0x00020000U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT (16U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN (0x00010000U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_SHIFT (16U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__SPFILTER_SIGNAL_UPDATE_EN (0x00010000U)
+#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT (15U)
+#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK (0xFFFF7FFFU)
+#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_EN (0x00008000U)
+#define RGX_CR_EVENT_CLEAR_FAULT_FW_SHIFT (14U)
+#define RGX_CR_EVENT_CLEAR_FAULT_FW_CLRMSK (0xFFFFBFFFU)
+#define RGX_CR_EVENT_CLEAR_FAULT_FW_EN (0x00004000U)
+#define RGX_CR_EVENT_CLEAR_GPIO_ACK_SHIFT (13U)
+#define RGX_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK (0xFFFFDFFFU)
+#define RGX_CR_EVENT_CLEAR_GPIO_ACK_EN (0x00002000U)
+#define RGX_CR_EVENT_CLEAR_GPIO_REQ_SHIFT (12U)
+#define RGX_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_EVENT_CLEAR_GPIO_REQ_EN (0x00001000U)
+#define RGX_CR_EVENT_CLEAR_POWER_ABORT_SHIFT (11U)
+#define RGX_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK (0xFFFFF7FFU)
+#define RGX_CR_EVENT_CLEAR_POWER_ABORT_EN (0x00000800U)
+#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT (10U)
+#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU)
+#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_EN (0x00000400U)
+#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT (9U)
+#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU)
+#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN (0x00000200U)
+#define RGX_CR_EVENT_CLEAR_PM_FRAG_DONE_SHIFT (8U)
+#define RGX_CR_EVENT_CLEAR_PM_FRAG_DONE_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_EVENT_CLEAR_PM_FRAG_DONE_EN (0x00000100U)
+#define RGX_CR_EVENT_CLEAR_FRAG_PM_BE_DONE1_SHIFT (8U)
+#define RGX_CR_EVENT_CLEAR_FRAG_PM_BE_DONE1_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_EVENT_CLEAR_FRAG_PM_BE_DONE1_EN (0x00000100U)
+#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT (7U)
+#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN (0x00000080U)
+#define RGX_CR_EVENT_CLEAR_FRAG_PM_BE_DONE0_SHIFT (7U)
+#define RGX_CR_EVENT_CLEAR_FRAG_PM_BE_DONE0_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_EVENT_CLEAR_FRAG_PM_BE_DONE0_EN (0x00000080U)
+#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT (6U)
+#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_EN (0x00000040U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_SHIFT (6U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__PM_OUT_OF_MEMORY_EN (0x00000040U)
+#define RGX_CR_EVENT_CLEAR_TA_FINISHED_SHIFT (5U)
+#define RGX_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_EVENT_CLEAR_TA_FINISHED_EN (0x00000020U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_SHIFT (5U)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_EVENT_CLEAR__ALRIF_EQ2_AND_PDM_GT0__TA_TERMINATE_EN (0x00000020U)
+#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT (4U)
+#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN (0x00000010U)
+#define RGX_CR_EVENT_CLEAR_IPP_END_RENDER_SENT_SHIFT (4U)
+#define RGX_CR_EVENT_CLEAR_IPP_END_RENDER_SENT_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_EVENT_CLEAR_IPP_END_RENDER_SENT_EN (0x00000010U)
+#define RGX_CR_EVENT_CLEAR_GEOM_DONE1_SHIFT (4U)
+#define RGX_CR_EVENT_CLEAR_GEOM_DONE1_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_EVENT_CLEAR_GEOM_DONE1_EN (0x00000010U)
+#define RGX_CR_EVENT_CLEAR_ISP_END_RENDER_SHIFT (3U)
+#define RGX_CR_EVENT_CLEAR_ISP_END_RENDER_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_EVENT_CLEAR_ISP_END_RENDER_EN (0x00000008U)
+#define RGX_CR_EVENT_CLEAR_GEOM_DONE0_SHIFT (3U)
+#define RGX_CR_EVENT_CLEAR_GEOM_DONE0_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_EVENT_CLEAR_GEOM_DONE0_EN (0x00000008U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT (2U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN (0x00000004U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_DONE1_SHIFT (2U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_DONE1_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_DONE1_EN (0x00000004U)
+#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT (1U)
+#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_EN (0x00000002U)
+#define RGX_CR_EVENT_CLEAR_TE_END_SHIFT (1U)
+#define RGX_CR_EVENT_CLEAR_TE_END_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_EVENT_CLEAR_TE_END_EN (0x00000002U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_DONE0_SHIFT (1U)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_DONE0_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_EVENT_CLEAR_COMPUTE_DONE0_EN (0x00000002U)
+#define RGX_CR_EVENT_CLEAR_FAULT_GPU_SHIFT (0U)
+#define RGX_CR_EVENT_CLEAR_FAULT_GPU_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_EVENT_CLEAR_FAULT_GPU_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_TIMER
+*/
+#define RGX_CR_TIMER (0x0160U)
+#define RGX_CR_TIMER_MASKFULL (IMG_UINT64_C(0x8000FFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_SHIFT (63U)
+#define RGX_CR_TIMER_BIT31_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_EN (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_TIMER_VALUE_SHIFT (0U)
+#define RGX_CR_TIMER_VALUE_CLRMSK (IMG_UINT64_C(0xFFFF000000000000))
+
+
+/*
+ Register RGX_CR_FAULT_FW_STATUS
+*/
+#define RGX_CR_FAULT_FW_STATUS (0x0170U)
+#define RGX_CR_FAULT_FW_STATUS_MASKFULL (IMG_UINT64_C(0x000000000007000F))
+#define RGX_CR_FAULT_FW_STATUS_META_CORRECT_SHIFT (18U)
+#define RGX_CR_FAULT_FW_STATUS_META_CORRECT_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_FAULT_FW_STATUS_META_CORRECT_EN (0x00040000U)
+#define RGX_CR_FAULT_FW_STATUS_SLC_CORRECT_SHIFT (17U)
+#define RGX_CR_FAULT_FW_STATUS_SLC_CORRECT_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_FAULT_FW_STATUS_SLC_CORRECT_EN (0x00020000U)
+#define RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_SHIFT (16U)
+#define RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_FAULT_FW_STATUS_MMU_CORRECT_EN (0x00010000U)
+#define RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_SHIFT (3U)
+#define RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_FAULT_FW_STATUS_LOCKSTEP_DETECT_EN (0x00000008U)
+#define RGX_CR_FAULT_FW_STATUS_META_DETECT_SHIFT (2U)
+#define RGX_CR_FAULT_FW_STATUS_META_DETECT_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_FAULT_FW_STATUS_META_DETECT_EN (0x00000004U)
+#define RGX_CR_FAULT_FW_STATUS_SLC_DETECT_SHIFT (1U)
+#define RGX_CR_FAULT_FW_STATUS_SLC_DETECT_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_FAULT_FW_STATUS_SLC_DETECT_EN (0x00000002U)
+#define RGX_CR_FAULT_FW_STATUS_MMU_DETECT_SHIFT (0U)
+#define RGX_CR_FAULT_FW_STATUS_MMU_DETECT_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_FAULT_FW_STATUS_MMU_DETECT_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_FAULT_FW_CLEAR
+*/
+#define RGX_CR_FAULT_FW_CLEAR (0x0178U)
+#define RGX_CR_FAULT_FW_CLEAR_MASKFULL (IMG_UINT64_C(0x000000000007000F))
+#define RGX_CR_FAULT_FW_CLEAR_META_CORRECT_SHIFT (18U)
+#define RGX_CR_FAULT_FW_CLEAR_META_CORRECT_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_FAULT_FW_CLEAR_META_CORRECT_EN (0x00040000U)
+#define RGX_CR_FAULT_FW_CLEAR_SLC_CORRECT_SHIFT (17U)
+#define RGX_CR_FAULT_FW_CLEAR_SLC_CORRECT_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_FAULT_FW_CLEAR_SLC_CORRECT_EN (0x00020000U)
+#define RGX_CR_FAULT_FW_CLEAR_MMU_CORRECT_SHIFT (16U)
+#define RGX_CR_FAULT_FW_CLEAR_MMU_CORRECT_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_FAULT_FW_CLEAR_MMU_CORRECT_EN (0x00010000U)
+#define RGX_CR_FAULT_FW_CLEAR_LOCKSTEP_DETECT_SHIFT (3U)
+#define RGX_CR_FAULT_FW_CLEAR_LOCKSTEP_DETECT_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_FAULT_FW_CLEAR_LOCKSTEP_DETECT_EN (0x00000008U)
+#define RGX_CR_FAULT_FW_CLEAR_META_DETECT_SHIFT (2U)
+#define RGX_CR_FAULT_FW_CLEAR_META_DETECT_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_FAULT_FW_CLEAR_META_DETECT_EN (0x00000004U)
+#define RGX_CR_FAULT_FW_CLEAR_SLC_DETECT_SHIFT (1U)
+#define RGX_CR_FAULT_FW_CLEAR_SLC_DETECT_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_FAULT_FW_CLEAR_SLC_DETECT_EN (0x00000002U)
+#define RGX_CR_FAULT_FW_CLEAR_MMU_DETECT_SHIFT (0U)
+#define RGX_CR_FAULT_FW_CLEAR_MMU_DETECT_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_FAULT_FW_CLEAR_MMU_DETECT_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_JONES_RAM_STATUS
+*/
+#define RGX_CR_JONES_RAM_STATUS (0x1148U)
+#define RGX_CR_JONES_RAM_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_JONES_RAM_STATUS_GARTEN_SHIFT (8U)
+#define RGX_CR_JONES_RAM_STATUS_GARTEN_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_JONES_RAM_STATUS_GARTEN_EN (0x00000100U)
+#define RGX_CR_JONES_RAM_STATUS_TDM_SHIFT (7U)
+#define RGX_CR_JONES_RAM_STATUS_TDM_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_JONES_RAM_STATUS_TDM_EN (0x00000080U)
+#define RGX_CR_JONES_RAM_STATUS_VERTEX_SHIFT (6U)
+#define RGX_CR_JONES_RAM_STATUS_VERTEX_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_JONES_RAM_STATUS_VERTEX_EN (0x00000040U)
+#define RGX_CR_JONES_RAM_STATUS_PIXEL_SHIFT (5U)
+#define RGX_CR_JONES_RAM_STATUS_PIXEL_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_JONES_RAM_STATUS_PIXEL_EN (0x00000020U)
+#define RGX_CR_JONES_RAM_STATUS_COMPUTE_SHIFT (4U)
+#define RGX_CR_JONES_RAM_STATUS_COMPUTE_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_JONES_RAM_STATUS_COMPUTE_EN (0x00000010U)
+#define RGX_CR_JONES_RAM_STATUS_FBCDC_SHIFT (3U)
+#define RGX_CR_JONES_RAM_STATUS_FBCDC_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_JONES_RAM_STATUS_FBCDC_EN (0x00000008U)
+#define RGX_CR_JONES_RAM_STATUS_PM_SHIFT (2U)
+#define RGX_CR_JONES_RAM_STATUS_PM_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_JONES_RAM_STATUS_PM_EN (0x00000004U)
+#define RGX_CR_JONES_RAM_STATUS_BIF_SHIFT (1U)
+#define RGX_CR_JONES_RAM_STATUS_BIF_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_JONES_RAM_STATUS_BIF_EN (0x00000002U)
+#define RGX_CR_JONES_RAM_STATUS_SLC_SHIFT (0U)
+#define RGX_CR_JONES_RAM_STATUS_SLC_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_JONES_RAM_STATUS_SLC_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_JONES_RAM_INIT_KICK
+*/
+#define RGX_CR_JONES_RAM_INIT_KICK (0x1158U)
+#define RGX_CR_JONES_RAM_INIT_KICK_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_SHIFT (8U)
+#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_JONES_RAM_INIT_KICK_GARTEN_EN (0x00000100U)
+#define RGX_CR_JONES_RAM_INIT_KICK_TDM_SHIFT (7U)
+#define RGX_CR_JONES_RAM_INIT_KICK_TDM_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_JONES_RAM_INIT_KICK_TDM_EN (0x00000080U)
+#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_SHIFT (6U)
+#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_JONES_RAM_INIT_KICK_VERTEX_EN (0x00000040U)
+#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_SHIFT (5U)
+#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_JONES_RAM_INIT_KICK_PIXEL_EN (0x00000020U)
+#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_SHIFT (4U)
+#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_JONES_RAM_INIT_KICK_COMPUTE_EN (0x00000010U)
+#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_SHIFT (3U)
+#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_JONES_RAM_INIT_KICK_FBCDC_EN (0x00000008U)
+#define RGX_CR_JONES_RAM_INIT_KICK_PM_SHIFT (2U)
+#define RGX_CR_JONES_RAM_INIT_KICK_PM_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_JONES_RAM_INIT_KICK_PM_EN (0x00000004U)
+#define RGX_CR_JONES_RAM_INIT_KICK_BIF_SHIFT (1U)
+#define RGX_CR_JONES_RAM_INIT_KICK_BIF_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_JONES_RAM_INIT_KICK_BIF_EN (0x00000002U)
+#define RGX_CR_JONES_RAM_INIT_KICK_SLC_SHIFT (0U)
+#define RGX_CR_JONES_RAM_INIT_KICK_SLC_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_JONES_RAM_INIT_KICK_SLC_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_PM_PARTIAL_RENDER_ENABLE
+*/
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE (0x0338U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT (0U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS (0x04A0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_PDS0 (0x04A8U)
+#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT (36U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE (16U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_PDS1 (0x04B0U)
+#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL (IMG_UINT64_C(0x00000001FFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT (32U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT (31U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT (30U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT (23U)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_SHIFT (22U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_SHIFT (11U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF))
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (16U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT (6U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F))
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_ALIGNSHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_ALIGNSIZE (2U)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT (0U)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0))
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_ALIGNSHIFT (2U)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_ALIGNSIZE (4U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_LOAD_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0 (0x04D8U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT (36U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE (16U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_LOAD_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1 (0x04E0U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL (IMG_UINT64_C(0x00000001BFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT (32U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT (31U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT (23U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_SHIFT (22U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_SHIFT (11U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (6U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (64U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT (6U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_ALIGNSHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_ALIGNSIZE (2U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT (0U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_ALIGNSHIFT (2U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_ALIGNSIZE (4U)
+
+
+/*
+ Register RGX_CR_CDM_TERMINATE_PDS
+*/
+#define RGX_CR_CDM_TERMINATE_PDS (0x04B8U)
+#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT (36U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE (16U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_CDM_TERMINATE_PDS1
+*/
+#define RGX_CR_CDM_TERMINATE_PDS1 (0x04C0U)
+#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL (IMG_UINT64_C(0x00000001BFFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT (32U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT (31U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT (23U)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC07FFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_ALIGNSHIFT (1U)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_ALIGNSIZE (2U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_SHIFT (22U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_TYPE_EN (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_SHIFT (11U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC007FF))
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_ALIGNSHIFT (6U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_ALLOC_SIZE_ALIGNSIZE (64U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT (6U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F))
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_ALIGNSHIFT (1U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_ALIGNSIZE (2U)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT (0U)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC0))
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_ALIGNSHIFT (2U)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_ALIGNSIZE (4U)
+
+
+/*
+ Register group: RGX_CR_SCRATCH, with 16 repeats
+*/
+#define RGX_CR_SCRATCH_REPEATCOUNT (16U)
+/*
+ Register RGX_CR_SCRATCH0
+*/
+#define RGX_CR_SCRATCH0 (0x0800U)
+#define RGX_CR_SCRATCH0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH0_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH0_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH1
+*/
+#define RGX_CR_SCRATCH1 (0x0808U)
+#define RGX_CR_SCRATCH1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH1_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH1_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH2
+*/
+#define RGX_CR_SCRATCH2 (0x0810U)
+#define RGX_CR_SCRATCH2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH2_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH2_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH3
+*/
+#define RGX_CR_SCRATCH3 (0x0818U)
+#define RGX_CR_SCRATCH3_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH3_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH3_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH4
+*/
+#define RGX_CR_SCRATCH4 (0x0820U)
+#define RGX_CR_SCRATCH4_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH4_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH4_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH5
+*/
+#define RGX_CR_SCRATCH5 (0x0828U)
+#define RGX_CR_SCRATCH5_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH5_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH5_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH6
+*/
+#define RGX_CR_SCRATCH6 (0x0830U)
+#define RGX_CR_SCRATCH6_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH6_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH6_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH7
+*/
+#define RGX_CR_SCRATCH7 (0x0838U)
+#define RGX_CR_SCRATCH7_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH7_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH7_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH8
+*/
+#define RGX_CR_SCRATCH8 (0x0840U)
+#define RGX_CR_SCRATCH8_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH8_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH8_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH9
+*/
+#define RGX_CR_SCRATCH9 (0x0848U)
+#define RGX_CR_SCRATCH9_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH9_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH9_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH10
+*/
+#define RGX_CR_SCRATCH10 (0x0850U)
+#define RGX_CR_SCRATCH10_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH10_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH10_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH11
+*/
+#define RGX_CR_SCRATCH11 (0x0858U)
+#define RGX_CR_SCRATCH11_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH11_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH11_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH12
+*/
+#define RGX_CR_SCRATCH12 (0x0860U)
+#define RGX_CR_SCRATCH12_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH12_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH12_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH13
+*/
+#define RGX_CR_SCRATCH13 (0x0868U)
+#define RGX_CR_SCRATCH13_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH13_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH13_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH14
+*/
+#define RGX_CR_SCRATCH14 (0x0870U)
+#define RGX_CR_SCRATCH14_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH14_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH14_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_SCRATCH15
+*/
+#define RGX_CR_SCRATCH15 (0x0878U)
+#define RGX_CR_SCRATCH15_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SCRATCH15_DATA_SHIFT (0U)
+#define RGX_CR_SCRATCH15_DATA_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register group: RGX_CR_OS0_SCRATCH, with 4 repeats
+*/
+#define RGX_CR_OS0_SCRATCH_REPEATCOUNT (4U)
+/*
+ Register RGX_CR_OS0_SCRATCH0
+*/
+#define RGX_CR_OS0_SCRATCH0 (0x0880U)
+#define RGX_CR_OS0_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS0_SCRATCH0_DATA_SHIFT (0U)
+#define RGX_CR_OS0_SCRATCH0_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS0_SCRATCH1
+*/
+#define RGX_CR_OS0_SCRATCH1 (0x0888U)
+#define RGX_CR_OS0_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS0_SCRATCH1_DATA_SHIFT (0U)
+#define RGX_CR_OS0_SCRATCH1_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS0_SCRATCH2
+*/
+#define RGX_CR_OS0_SCRATCH2 (0x0890U)
+#define RGX_CR_OS0_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS0_SCRATCH2_DATA_SHIFT (0U)
+#define RGX_CR_OS0_SCRATCH2_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS0_SCRATCH3
+*/
+#define RGX_CR_OS0_SCRATCH3 (0x0898U)
+#define RGX_CR_OS0_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS0_SCRATCH3_DATA_SHIFT (0U)
+#define RGX_CR_OS0_SCRATCH3_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register group: RGX_CR_OS1_SCRATCH, with 4 repeats
+*/
+#define RGX_CR_OS1_SCRATCH_REPEATCOUNT (4U)
+/*
+ Register RGX_CR_OS1_SCRATCH0
+*/
+#define RGX_CR_OS1_SCRATCH0 (0x10880U)
+#define RGX_CR_OS1_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS1_SCRATCH0_DATA_SHIFT (0U)
+#define RGX_CR_OS1_SCRATCH0_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS1_SCRATCH1
+*/
+#define RGX_CR_OS1_SCRATCH1 (0x10888U)
+#define RGX_CR_OS1_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS1_SCRATCH1_DATA_SHIFT (0U)
+#define RGX_CR_OS1_SCRATCH1_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS1_SCRATCH2
+*/
+#define RGX_CR_OS1_SCRATCH2 (0x10890U)
+#define RGX_CR_OS1_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS1_SCRATCH2_DATA_SHIFT (0U)
+#define RGX_CR_OS1_SCRATCH2_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS1_SCRATCH3
+*/
+#define RGX_CR_OS1_SCRATCH3 (0x10898U)
+#define RGX_CR_OS1_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS1_SCRATCH3_DATA_SHIFT (0U)
+#define RGX_CR_OS1_SCRATCH3_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register group: RGX_CR_OS2_SCRATCH, with 4 repeats
+*/
+#define RGX_CR_OS2_SCRATCH_REPEATCOUNT (4U)
+/*
+ Register RGX_CR_OS2_SCRATCH0
+*/
+#define RGX_CR_OS2_SCRATCH0 (0x20880U)
+#define RGX_CR_OS2_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS2_SCRATCH0_DATA_SHIFT (0U)
+#define RGX_CR_OS2_SCRATCH0_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS2_SCRATCH1
+*/
+#define RGX_CR_OS2_SCRATCH1 (0x20888U)
+#define RGX_CR_OS2_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS2_SCRATCH1_DATA_SHIFT (0U)
+#define RGX_CR_OS2_SCRATCH1_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS2_SCRATCH2
+*/
+#define RGX_CR_OS2_SCRATCH2 (0x20890U)
+#define RGX_CR_OS2_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS2_SCRATCH2_DATA_SHIFT (0U)
+#define RGX_CR_OS2_SCRATCH2_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS2_SCRATCH3
+*/
+#define RGX_CR_OS2_SCRATCH3 (0x20898U)
+#define RGX_CR_OS2_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS2_SCRATCH3_DATA_SHIFT (0U)
+#define RGX_CR_OS2_SCRATCH3_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register group: RGX_CR_OS3_SCRATCH, with 4 repeats
+*/
+#define RGX_CR_OS3_SCRATCH_REPEATCOUNT (4U)
+/*
+ Register RGX_CR_OS3_SCRATCH0
+*/
+#define RGX_CR_OS3_SCRATCH0 (0x30880U)
+#define RGX_CR_OS3_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS3_SCRATCH0_DATA_SHIFT (0U)
+#define RGX_CR_OS3_SCRATCH0_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS3_SCRATCH1
+*/
+#define RGX_CR_OS3_SCRATCH1 (0x30888U)
+#define RGX_CR_OS3_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS3_SCRATCH1_DATA_SHIFT (0U)
+#define RGX_CR_OS3_SCRATCH1_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS3_SCRATCH2
+*/
+#define RGX_CR_OS3_SCRATCH2 (0x30890U)
+#define RGX_CR_OS3_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS3_SCRATCH2_DATA_SHIFT (0U)
+#define RGX_CR_OS3_SCRATCH2_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS3_SCRATCH3
+*/
+#define RGX_CR_OS3_SCRATCH3 (0x30898U)
+#define RGX_CR_OS3_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS3_SCRATCH3_DATA_SHIFT (0U)
+#define RGX_CR_OS3_SCRATCH3_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register group: RGX_CR_OS4_SCRATCH, with 4 repeats
+*/
+#define RGX_CR_OS4_SCRATCH_REPEATCOUNT (4U)
+/*
+ Register RGX_CR_OS4_SCRATCH0
+*/
+#define RGX_CR_OS4_SCRATCH0 (0x40880U)
+#define RGX_CR_OS4_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS4_SCRATCH0_DATA_SHIFT (0U)
+#define RGX_CR_OS4_SCRATCH0_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS4_SCRATCH1
+*/
+#define RGX_CR_OS4_SCRATCH1 (0x40888U)
+#define RGX_CR_OS4_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS4_SCRATCH1_DATA_SHIFT (0U)
+#define RGX_CR_OS4_SCRATCH1_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS4_SCRATCH2
+*/
+#define RGX_CR_OS4_SCRATCH2 (0x40890U)
+#define RGX_CR_OS4_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS4_SCRATCH2_DATA_SHIFT (0U)
+#define RGX_CR_OS4_SCRATCH2_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS4_SCRATCH3
+*/
+#define RGX_CR_OS4_SCRATCH3 (0x40898U)
+#define RGX_CR_OS4_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS4_SCRATCH3_DATA_SHIFT (0U)
+#define RGX_CR_OS4_SCRATCH3_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register group: RGX_CR_OS5_SCRATCH, with 4 repeats
+*/
+#define RGX_CR_OS5_SCRATCH_REPEATCOUNT (4U)
+/*
+ Register RGX_CR_OS5_SCRATCH0
+*/
+#define RGX_CR_OS5_SCRATCH0 (0x50880U)
+#define RGX_CR_OS5_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS5_SCRATCH0_DATA_SHIFT (0U)
+#define RGX_CR_OS5_SCRATCH0_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS5_SCRATCH1
+*/
+#define RGX_CR_OS5_SCRATCH1 (0x50888U)
+#define RGX_CR_OS5_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS5_SCRATCH1_DATA_SHIFT (0U)
+#define RGX_CR_OS5_SCRATCH1_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS5_SCRATCH2
+*/
+#define RGX_CR_OS5_SCRATCH2 (0x50890U)
+#define RGX_CR_OS5_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS5_SCRATCH2_DATA_SHIFT (0U)
+#define RGX_CR_OS5_SCRATCH2_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS5_SCRATCH3
+*/
+#define RGX_CR_OS5_SCRATCH3 (0x50898U)
+#define RGX_CR_OS5_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS5_SCRATCH3_DATA_SHIFT (0U)
+#define RGX_CR_OS5_SCRATCH3_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register group: RGX_CR_OS6_SCRATCH, with 4 repeats
+*/
+#define RGX_CR_OS6_SCRATCH_REPEATCOUNT (4U)
+/*
+ Register RGX_CR_OS6_SCRATCH0
+*/
+#define RGX_CR_OS6_SCRATCH0 (0x60880U)
+#define RGX_CR_OS6_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS6_SCRATCH0_DATA_SHIFT (0U)
+#define RGX_CR_OS6_SCRATCH0_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS6_SCRATCH1
+*/
+#define RGX_CR_OS6_SCRATCH1 (0x60888U)
+#define RGX_CR_OS6_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS6_SCRATCH1_DATA_SHIFT (0U)
+#define RGX_CR_OS6_SCRATCH1_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS6_SCRATCH2
+*/
+#define RGX_CR_OS6_SCRATCH2 (0x60890U)
+#define RGX_CR_OS6_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS6_SCRATCH2_DATA_SHIFT (0U)
+#define RGX_CR_OS6_SCRATCH2_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS6_SCRATCH3
+*/
+#define RGX_CR_OS6_SCRATCH3 (0x60898U)
+#define RGX_CR_OS6_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS6_SCRATCH3_DATA_SHIFT (0U)
+#define RGX_CR_OS6_SCRATCH3_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register group: RGX_CR_OS7_SCRATCH, with 4 repeats
+*/
+#define RGX_CR_OS7_SCRATCH_REPEATCOUNT (4U)
+/*
+ Register RGX_CR_OS7_SCRATCH0
+*/
+#define RGX_CR_OS7_SCRATCH0 (0x70880U)
+#define RGX_CR_OS7_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS7_SCRATCH0_DATA_SHIFT (0U)
+#define RGX_CR_OS7_SCRATCH0_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS7_SCRATCH1
+*/
+#define RGX_CR_OS7_SCRATCH1 (0x70888U)
+#define RGX_CR_OS7_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS7_SCRATCH1_DATA_SHIFT (0U)
+#define RGX_CR_OS7_SCRATCH1_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS7_SCRATCH2
+*/
+#define RGX_CR_OS7_SCRATCH2 (0x70890U)
+#define RGX_CR_OS7_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS7_SCRATCH2_DATA_SHIFT (0U)
+#define RGX_CR_OS7_SCRATCH2_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_OS7_SCRATCH3
+*/
+#define RGX_CR_OS7_SCRATCH3 (0x70898U)
+#define RGX_CR_OS7_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_OS7_SCRATCH3_DATA_SHIFT (0U)
+#define RGX_CR_OS7_SCRATCH3_DATA_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVDATAX
+*/
+#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3000U)
+#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVDATAX_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVDATAX_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVDATAX
+*/
+#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A00U)
+#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVDATAX_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVDATAX_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVDATAX
+*/
+#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3000U)
+#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVDATAX_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVDATAX_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVDATAX
+*/
+#define RGX_CR_META_SP_MSLVDATAX (0x0A00U)
+#define RGX_CR_META_SP_MSLVDATAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVDATAT
+*/
+#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3040U)
+#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVDATAT_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVDATAT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVDATAT
+*/
+#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A08U)
+#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVDATAT_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVDATAT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVDATAT
+*/
+#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3040U)
+#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVDATAT_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVDATAT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVDATAT
+*/
+#define RGX_CR_META_SP_MSLVDATAT (0x0A08U)
+#define RGX_CR_META_SP_MSLVDATAT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVCTRL0
+*/
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3080U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__ADDR_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__ADDR_CLRMSK (0x00000003U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__AUTOINCR_SHIFT (1U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__AUTOINCR_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__AUTOINCR_EN (0x00000002U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__RD_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__RD_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__RD_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVCTRL0
+*/
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A10U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__ADDR_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__ADDR_CLRMSK (0x00000003U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__AUTOINCR_SHIFT (1U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__AUTOINCR_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__AUTOINCR_EN (0x00000002U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__RD_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__RD_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__RD_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVCTRL0
+*/
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3080U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__ADDR_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__ADDR_CLRMSK (0x00000003U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__AUTOINCR_SHIFT (1U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__AUTOINCR_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__AUTOINCR_EN (0x00000002U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__RD_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__RD_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__RD_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVCTRL0
+*/
+#define RGX_CR_META_SP_MSLVCTRL0 (0x0A10U)
+#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK (0x00000003U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT (1U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN (0x00000002U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVCTRL1
+*/
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF30C0U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000F7F4003F))
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERRTHREAD_SHIFT (30U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERRTHREAD_CLRMSK (0x3FFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__LOCK2_INTERLOCK_SHIFT (29U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__LOCK2_INTERLOCK_EN (0x20000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__ATOMIC_INTERLOCK_SHIFT (28U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__ATOMIC_INTERLOCK_EN (0x10000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_SHIFT (26U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN (0x04000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__COREMEM_IDLE_SHIFT (25U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__COREMEM_IDLE_CLRMSK (0xFDFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__COREMEM_IDLE_EN (0x02000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_SHIFT (24U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_CLRMSK (0xFEFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN (0x01000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERRID_SHIFT (21U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERRID_CLRMSK (0xFF1FFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERR_SHIFT (20U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERR_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__DEFERR_EN (0x00100000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__WR_ACTIVE_SHIFT (18U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__WR_ACTIVE_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__WR_ACTIVE_EN (0x00040000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__THREAD_SHIFT (4U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__THREAD_CLRMSK (0xFFFFFFCFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRANS_SIZE_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRANS_SIZE_CLRMSK (0xFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__BYTE_ROUND_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__BYTE_ROUND_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVCTRL1
+*/
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A18U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000F7F4003F))
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERRTHREAD_SHIFT (30U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERRTHREAD_CLRMSK (0x3FFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__LOCK2_INTERLOCK_SHIFT (29U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__LOCK2_INTERLOCK_EN (0x20000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__ATOMIC_INTERLOCK_SHIFT (28U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__ATOMIC_INTERLOCK_EN (0x10000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__GBLPORT_IDLE_SHIFT (26U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__GBLPORT_IDLE_EN (0x04000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__COREMEM_IDLE_SHIFT (25U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__COREMEM_IDLE_CLRMSK (0xFDFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__COREMEM_IDLE_EN (0x02000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__READY_SHIFT (24U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__READY_CLRMSK (0xFEFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__READY_EN (0x01000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERRID_SHIFT (21U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERRID_CLRMSK (0xFF1FFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERR_SHIFT (20U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERR_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__DEFERR_EN (0x00100000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__WR_ACTIVE_SHIFT (18U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__WR_ACTIVE_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__WR_ACTIVE_EN (0x00040000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__THREAD_SHIFT (4U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__THREAD_CLRMSK (0xFFFFFFCFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRANS_SIZE_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRANS_SIZE_CLRMSK (0xFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__BYTE_ROUND_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__BYTE_ROUND_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVCTRL1
+*/
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x30C0U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x00000000F7F4003F))
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERRTHREAD_SHIFT (30U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERRTHREAD_CLRMSK (0x3FFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__LOCK2_INTERLOCK_SHIFT (29U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__LOCK2_INTERLOCK_EN (0x20000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__ATOMIC_INTERLOCK_SHIFT (28U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__ATOMIC_INTERLOCK_EN (0x10000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_SHIFT (26U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN (0x04000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__COREMEM_IDLE_SHIFT (25U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__COREMEM_IDLE_CLRMSK (0xFDFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__COREMEM_IDLE_EN (0x02000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_SHIFT (24U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_CLRMSK (0xFEFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN (0x01000000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERRID_SHIFT (21U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERRID_CLRMSK (0xFF1FFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERR_SHIFT (20U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERR_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__DEFERR_EN (0x00100000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__WR_ACTIVE_SHIFT (18U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__WR_ACTIVE_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__WR_ACTIVE_EN (0x00040000U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__THREAD_SHIFT (4U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__THREAD_CLRMSK (0xFFFFFFCFU)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRANS_SIZE_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRANS_SIZE_CLRMSK (0xFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__BYTE_ROUND_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__BYTE_ROUND_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVCTRL1
+*/
+#define RGX_CR_META_SP_MSLVCTRL1 (0x0A18U)
+#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL (IMG_UINT64_C(0x00000000F7F4003F))
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT (30U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK (0x3FFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT (29U)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN (0x20000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT (28U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN (0x10000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT (26U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN (0x04000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT (25U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK (0xFDFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN (0x02000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT (24U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK (0xFEFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_EN (0x01000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT (21U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK (0xFF1FFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT (20U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN (0x00100000U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT (18U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN (0x00040000U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT (4U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK (0xFFFFFFCFU)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK (0xFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVHANDSHKE
+*/
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3280U)
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__INPUT_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__INPUT_CLRMSK (0xFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__OUTPUT_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__OUTPUT_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVHANDSHKE
+*/
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A50U)
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__INPUT_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__INPUT_CLRMSK (0xFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__OUTPUT_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__OUTPUT_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVHANDSHKE
+*/
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3280U)
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__INPUT_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__INPUT_CLRMSK (0xFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__OUTPUT_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVHANDSHKE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__OUTPUT_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVHANDSHKE
+*/
+#define RGX_CR_META_SP_MSLVHANDSHKE (0x0A50U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK (0xFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT0KICK
+*/
+#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3400U)
+#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT0KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT0KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT0KICK
+*/
+#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A80U)
+#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT0KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT0KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT0KICK
+*/
+#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3400U)
+#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT0KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT0KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT0KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT0KICK
+*/
+#define RGX_CR_META_SP_MSLVT0KICK (0x0A80U)
+#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT0KICKI
+*/
+#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3440U)
+#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT0KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT0KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT0KICKI
+*/
+#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A88U)
+#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT0KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT0KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT0KICKI
+*/
+#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3440U)
+#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT0KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT0KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT0KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT0KICKI
+*/
+#define RGX_CR_META_SP_MSLVT0KICKI (0x0A88U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT1KICK
+*/
+#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3480U)
+#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT1KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT1KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT1KICK
+*/
+#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A90U)
+#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT1KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT1KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT1KICK
+*/
+#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3480U)
+#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT1KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT1KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT1KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT1KICK
+*/
+#define RGX_CR_META_SP_MSLVT1KICK (0x0A90U)
+#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT1KICKI
+*/
+#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF34C0U)
+#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT1KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT1KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT1KICKI
+*/
+#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0A98U)
+#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT1KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT1KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT1KICKI
+*/
+#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x34C0U)
+#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT1KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT1KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT1KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT1KICKI
+*/
+#define RGX_CR_META_SP_MSLVT1KICKI (0x0A98U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT2KICK
+*/
+#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3500U)
+#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT2KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT2KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT2KICK
+*/
+#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AA0U)
+#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT2KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT2KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT2KICK
+*/
+#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3500U)
+#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT2KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT2KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT2KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT2KICK
+*/
+#define RGX_CR_META_SP_MSLVT2KICK (0x0AA0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT2KICKI
+*/
+#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3540U)
+#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT2KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT2KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT2KICKI
+*/
+#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AA8U)
+#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT2KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT2KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT2KICKI
+*/
+#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3540U)
+#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT2KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT2KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT2KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT2KICKI
+*/
+#define RGX_CR_META_SP_MSLVT2KICKI (0x0AA8U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT3KICK
+*/
+#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3580U)
+#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT3KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT3KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT3KICK
+*/
+#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AB0U)
+#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT3KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT3KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT3KICK
+*/
+#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3580U)
+#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT3KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT3KICK__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT3KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT3KICK
+*/
+#define RGX_CR_META_SP_MSLVT3KICK (0x0AB0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT3KICKI
+*/
+#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF35C0U)
+#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT3KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MSLVT3KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT3KICKI
+*/
+#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AB8U)
+#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT3KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MSLVT3KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT3KICKI
+*/
+#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x35C0U)
+#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT3KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT3KICKI__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MSLVT3KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT3KICKI
+*/
+#define RGX_CR_META_SP_MSLVT3KICKI (0x0AB8U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVRST
+*/
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3600U)
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__SOFTRESET_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__SOFTRESET_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__SOFTRESET_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVRST
+*/
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AC0U)
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__SOFTRESET_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__SOFTRESET_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__SOFTRESET_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVRST
+*/
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3600U)
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_V1_AND_METAREG_UNPACKED__SOFTRESET_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_V1_AND_METAREG_UNPACKED__SOFTRESET_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVRST__HOST_SECURITY_V1_AND_METAREG_UNPACKED__SOFTRESET_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVRST
+*/
+#define RGX_CR_META_SP_MSLVRST (0x0AC0U)
+#define RGX_CR_META_SP_MSLVRST_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQSTATUS
+*/
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3640U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRIGVECT3_SHIFT (3U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRIGVECT3_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRIGVECT3_EN (0x00000008U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRIGVECT2_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRIGVECT2_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__TRIGVECT2_EN (0x00000004U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQSTATUS
+*/
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AC8U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRIGVECT3_SHIFT (3U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRIGVECT3_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRIGVECT3_EN (0x00000008U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRIGVECT2_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRIGVECT2_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__TRIGVECT2_EN (0x00000004U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQSTATUS
+*/
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3640U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRIGVECT3_SHIFT (3U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRIGVECT3_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRIGVECT3_EN (0x00000008U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRIGVECT2_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRIGVECT2_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED__TRIGVECT2_EN (0x00000004U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQSTATUS
+*/
+#define RGX_CR_META_SP_MSLVIRQSTATUS (0x0AC8U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT (3U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN (0x00000008U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN (0x00000004U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQENABLE
+*/
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF3680U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__EVENT1_SHIFT (3U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__EVENT1_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__EVENT1_EN (0x00000008U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__EVENT0_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__EVENT0_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__EVENT0_EN (0x00000004U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQENABLE
+*/
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AD0U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__EVENT1_SHIFT (3U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__EVENT1_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__EVENT1_EN (0x00000008U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__EVENT0_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__EVENT0_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__EVENT0_EN (0x00000004U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQENABLE
+*/
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x3680U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__EVENT1_SHIFT (3U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__EVENT1_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__EVENT1_EN (0x00000008U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__EVENT0_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__EVENT0_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED__EVENT0_EN (0x00000004U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQENABLE
+*/
+#define RGX_CR_META_SP_MSLVIRQENABLE (0x0AD0U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT (3U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN (0x00000008U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN (0x00000004U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQLEVEL
+*/
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_METAREG_UNPACKED (0xF36C0U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MODE_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MODE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__MODE_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQLEVEL
+*/
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED (0xF0AD8U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MODE_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MODE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_NOT_METAREG_UNPACKED__MODE_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQLEVEL
+*/
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_V1_AND_METAREG_UNPACKED (0x36C0U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MODE_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MODE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_V1_AND_METAREG_UNPACKED__MODE_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQLEVEL
+*/
+#define RGX_CR_META_SP_MSLVIRQLEVEL (0x0AD8U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN (0x00000001U)
+
+
+/*
+ Register group: RGX_CR_FWCORE_DMI_RESERVED0, with 32 repeats
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT (32U)
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED00
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED00 (0x8FC0U)
+#define RGX_CR_FWCORE_DMI_RESERVED00_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED01
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED01 (0x8FC8U)
+#define RGX_CR_FWCORE_DMI_RESERVED01_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED02
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED02 (0x8FD0U)
+#define RGX_CR_FWCORE_DMI_RESERVED02_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED03
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED03 (0x8FD8U)
+#define RGX_CR_FWCORE_DMI_RESERVED03_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_DATA0
+*/
+#define RGX_CR_FWCORE_DMI_DATA0 (0x90C0U)
+#define RGX_CR_FWCORE_DMI_DATA0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_DATA0_VAL_SHIFT (0U)
+#define RGX_CR_FWCORE_DMI_DATA0_VAL_CLRMSK (0x00000000U)
+
+
+/*
+ Register group: RGX_CR_FWCORE_DMI_RESERVED1, with 7 repeats
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT (7U)
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED10
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED10 (0x90C8U)
+#define RGX_CR_FWCORE_DMI_RESERVED10_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED11
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED11 (0x90D0U)
+#define RGX_CR_FWCORE_DMI_RESERVED11_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED12
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED12 (0x90D8U)
+#define RGX_CR_FWCORE_DMI_RESERVED12_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED13
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED13 (0x90E0U)
+#define RGX_CR_FWCORE_DMI_RESERVED13_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED14
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED14 (0x90E8U)
+#define RGX_CR_FWCORE_DMI_RESERVED14_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED15
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED15 (0x90F0U)
+#define RGX_CR_FWCORE_DMI_RESERVED15_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED16
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED16 (0x90F8U)
+#define RGX_CR_FWCORE_DMI_RESERVED16_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_DATA1
+*/
+#define RGX_CR_FWCORE_DMI_DATA1 (0x9100U)
+#define RGX_CR_FWCORE_DMI_DATA1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_DATA1_VAL_SHIFT (0U)
+#define RGX_CR_FWCORE_DMI_DATA1_VAL_CLRMSK (0x00000000U)
+
+
+/*
+ Register group: RGX_CR_FWCORE_DMI_RESERVED2, with 7 repeats
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT (7U)
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED20
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED20 (0x9108U)
+#define RGX_CR_FWCORE_DMI_RESERVED20_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED21
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED21 (0x9110U)
+#define RGX_CR_FWCORE_DMI_RESERVED21_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED22
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED22 (0x9118U)
+#define RGX_CR_FWCORE_DMI_RESERVED22_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED23
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED23 (0x9120U)
+#define RGX_CR_FWCORE_DMI_RESERVED23_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register group: RGX_CR_FWCORE_DMI_RESERVED3, with 80 repeats
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT (80U)
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED30
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED30 (0x9140U)
+#define RGX_CR_FWCORE_DMI_RESERVED30_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED31
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED31 (0x9148U)
+#define RGX_CR_FWCORE_DMI_RESERVED31_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED32
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED32 (0x9150U)
+#define RGX_CR_FWCORE_DMI_RESERVED32_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED33
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED33 (0x9158U)
+#define RGX_CR_FWCORE_DMI_RESERVED33_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED34
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED34 (0x9160U)
+#define RGX_CR_FWCORE_DMI_RESERVED34_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED35
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED35 (0x9168U)
+#define RGX_CR_FWCORE_DMI_RESERVED35_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED36
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED36 (0x9170U)
+#define RGX_CR_FWCORE_DMI_RESERVED36_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED37
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED37 (0x9178U)
+#define RGX_CR_FWCORE_DMI_RESERVED37_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED38
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED38 (0x9180U)
+#define RGX_CR_FWCORE_DMI_RESERVED38_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED39
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED39 (0x9188U)
+#define RGX_CR_FWCORE_DMI_RESERVED39_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED310
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED310 (0x9190U)
+#define RGX_CR_FWCORE_DMI_RESERVED310_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED311
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED311 (0x9198U)
+#define RGX_CR_FWCORE_DMI_RESERVED311_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED312
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED312 (0x91A0U)
+#define RGX_CR_FWCORE_DMI_RESERVED312_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED313
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED313 (0x91A8U)
+#define RGX_CR_FWCORE_DMI_RESERVED313_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED314
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED314 (0x91B0U)
+#define RGX_CR_FWCORE_DMI_RESERVED314_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED315
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED315 (0x91B8U)
+#define RGX_CR_FWCORE_DMI_RESERVED315_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED316
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED316 (0x91C0U)
+#define RGX_CR_FWCORE_DMI_RESERVED316_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED317
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED317 (0x91C8U)
+#define RGX_CR_FWCORE_DMI_RESERVED317_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED318
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED318 (0x91D0U)
+#define RGX_CR_FWCORE_DMI_RESERVED318_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED319
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED319 (0x91D8U)
+#define RGX_CR_FWCORE_DMI_RESERVED319_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED320
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED320 (0x91E0U)
+#define RGX_CR_FWCORE_DMI_RESERVED320_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED321
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED321 (0x91E8U)
+#define RGX_CR_FWCORE_DMI_RESERVED321_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED322
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED322 (0x91F0U)
+#define RGX_CR_FWCORE_DMI_RESERVED322_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED323
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED323 (0x91F8U)
+#define RGX_CR_FWCORE_DMI_RESERVED323_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED324
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED324 (0x9200U)
+#define RGX_CR_FWCORE_DMI_RESERVED324_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED325
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED325 (0x9208U)
+#define RGX_CR_FWCORE_DMI_RESERVED325_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED326
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED326 (0x9210U)
+#define RGX_CR_FWCORE_DMI_RESERVED326_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED327
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED327 (0x9218U)
+#define RGX_CR_FWCORE_DMI_RESERVED327_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED328
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED328 (0x9220U)
+#define RGX_CR_FWCORE_DMI_RESERVED328_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED329
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED329 (0x9228U)
+#define RGX_CR_FWCORE_DMI_RESERVED329_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED330
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED330 (0x9230U)
+#define RGX_CR_FWCORE_DMI_RESERVED330_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED331
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED331 (0x9238U)
+#define RGX_CR_FWCORE_DMI_RESERVED331_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_DMCONTROL
+*/
+#define RGX_CR_FWCORE_DMI_DMCONTROL (0x93C0U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_MASKFULL (IMG_UINT64_C(0x00000000D0000003))
+#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_SHIFT (31U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN (0x80000000U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_SHIFT (30U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN (0x40000000U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_SHIFT (28U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_EN (0x10000000U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_SHIFT (1U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_EN (0x00000002U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_SHIFT (0U)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN (0x00000001U)
+
+
+/*
+ Register group: RGX_CR_FWCORE_DMI_RESERVED4, with 7 repeats
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED4_REPEATCOUNT (7U)
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED40
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED40 (0x93C8U)
+#define RGX_CR_FWCORE_DMI_RESERVED40_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_RESERVED41
+*/
+#define RGX_CR_FWCORE_DMI_RESERVED41 (0x93D0U)
+#define RGX_CR_FWCORE_DMI_RESERVED41_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_DMSTATUS
+*/
+#define RGX_CR_FWCORE_DMI_DMSTATUS (0x9400U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_MASKFULL (IMG_UINT64_C(0x00000000004FFFFF))
+#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_SHIFT (22U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_CLRMSK (0xFFBFFFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_EN (0x00400000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_SHIFT (19U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_EN (0x00080000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_SHIFT (18U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_EN (0x00040000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_SHIFT (17U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN (0x00020000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_SHIFT (16U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_EN (0x00010000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_SHIFT (15U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_CLRMSK (0xFFFF7FFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_EN (0x00008000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_SHIFT (14U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_CLRMSK (0xFFFFBFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_EN (0x00004000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_SHIFT (13U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_CLRMSK (0xFFFFDFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_EN (0x00002000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_SHIFT (12U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_EN (0x00001000U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_SHIFT (11U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_CLRMSK (0xFFFFF7FFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_EN (0x00000800U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_SHIFT (10U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_CLRMSK (0xFFFFFBFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_EN (0x00000400U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_SHIFT (9U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_CLRMSK (0xFFFFFDFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN (0x00000200U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_SHIFT (8U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_EN (0x00000100U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_SHIFT (7U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_EN (0x00000080U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_SHIFT (6U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_EN (0x00000040U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_SHIFT (5U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_EN (0x00000020U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_SHIFT (4U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_EN (0x00000010U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_SHIFT (0U)
+#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_CLRMSK (0xFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_ABSTRACTCS
+*/
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS (0x9540U)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL (IMG_UINT64_C(0x000000001F00170F))
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_SHIFT (24U)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_CLRMSK (0xE0FFFFFFU)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_SHIFT (12U)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN (0x00001000U)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT (8U)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK (0xFFFFF8FFU)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_SHIFT (0U)
+#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_CLRMSK (0xFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_COMMAND
+*/
+#define RGX_CR_FWCORE_DMI_COMMAND (0x9580U)
+#define RGX_CR_FWCORE_DMI_COMMAND_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT (24U)
+#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_CLRMSK (0x00FFFFFFU)
+#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_SHIFT (0U)
+#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_CLRMSK (0xFF000000U)
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_SBCS
+*/
+#define RGX_CR_FWCORE_DMI_SBCS (0x9DC0U)
+#define RGX_CR_FWCORE_DMI_SBCS_MASKFULL (IMG_UINT64_C(0x00000000E07FFFFF))
+#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_SHIFT (29U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_CLRMSK (0x1FFFFFFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_SHIFT (22U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_CLRMSK (0xFFBFFFFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_EN (0x00400000U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_SHIFT (21U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN (0x00200000U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_SHIFT (20U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN (0x00100000U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT (17U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_CLRMSK (0xFFF1FFFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_SHIFT (16U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_EN (0x00010000U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_SHIFT (15U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_CLRMSK (0xFFFF7FFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_EN (0x00008000U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT (12U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK (0xFFFF8FFFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_SHIFT (5U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_CLRMSK (0xFFFFF01FU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_SHIFT (4U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_EN (0x00000010U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_SHIFT (3U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_EN (0x00000008U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_SHIFT (2U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_EN (0x00000004U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_SHIFT (1U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_EN (0x00000002U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_SHIFT (0U)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_SBADDRESS0
+*/
+#define RGX_CR_FWCORE_DMI_SBADDRESS0 (0x9E00U)
+#define RGX_CR_FWCORE_DMI_SBADDRESS0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_SHIFT (0U)
+#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_SBDATA0
+*/
+#define RGX_CR_FWCORE_DMI_SBDATA0 (0x9EC0U)
+#define RGX_CR_FWCORE_DMI_SBDATA0_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_SBDATA1
+*/
+#define RGX_CR_FWCORE_DMI_SBDATA1 (0x9F00U)
+#define RGX_CR_FWCORE_DMI_SBDATA1_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_SBDATA2
+*/
+#define RGX_CR_FWCORE_DMI_SBDATA2 (0x9F40U)
+#define RGX_CR_FWCORE_DMI_SBDATA2_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_SBDATA3
+*/
+#define RGX_CR_FWCORE_DMI_SBDATA3 (0x9F80U)
+#define RGX_CR_FWCORE_DMI_SBDATA3_MASKFULL (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_FWCORE_DMI_HALTSUM0
+*/
+#define RGX_CR_FWCORE_DMI_HALTSUM0 (0x9FC0U)
+#define RGX_CR_FWCORE_DMI_HALTSUM0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_SHIFT (0U)
+#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE
+*/
+#define RGX_CR_MTS_SCHEDULE (0x0B00U)
+#define RGX_CR_MTS_SCHEDULE_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_HOST_HOST (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1 (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2 (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3 (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM1 (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM2 (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM3 (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM4 (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM5 (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM6 (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM7 (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL (0x0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE1
+*/
+#define RGX_CR_MTS_SCHEDULE1 (0x10B00U)
+#define RGX_CR_MTS_SCHEDULE1_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_HOST (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1 (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2 (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3 (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM1 (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM2 (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM3 (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM4 (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM5 (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM6 (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM7 (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL (0x0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE2
+*/
+#define RGX_CR_MTS_SCHEDULE2 (0x20B00U)
+#define RGX_CR_MTS_SCHEDULE2_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_HOST (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1 (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2 (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3 (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM1 (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM2 (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM3 (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM4 (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM5 (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM6 (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM7 (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL (0x0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE3
+*/
+#define RGX_CR_MTS_SCHEDULE3 (0x30B00U)
+#define RGX_CR_MTS_SCHEDULE3_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_HOST (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1 (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2 (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3 (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM1 (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM2 (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM3 (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM4 (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM5 (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM6 (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM7 (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL (0x0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE4
+*/
+#define RGX_CR_MTS_SCHEDULE4 (0x40B00U)
+#define RGX_CR_MTS_SCHEDULE4_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_HOST (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1 (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2 (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3 (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM1 (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM2 (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM3 (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM4 (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM5 (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM6 (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM7 (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL (0x0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE5
+*/
+#define RGX_CR_MTS_SCHEDULE5 (0x50B00U)
+#define RGX_CR_MTS_SCHEDULE5_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_HOST (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1 (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2 (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3 (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM1 (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM2 (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM3 (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM4 (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM5 (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM6 (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM7 (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL (0x0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE6
+*/
+#define RGX_CR_MTS_SCHEDULE6 (0x60B00U)
+#define RGX_CR_MTS_SCHEDULE6_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_HOST (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1 (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2 (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3 (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM1 (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM2 (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM3 (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM4 (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM5 (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM6 (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM7 (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL (0x0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE7
+*/
+#define RGX_CR_MTS_SCHEDULE7 (0x70B00U)
+#define RGX_CR_MTS_SCHEDULE7_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_HOST (0x00000100U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK (0xFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1 (0x00000040U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2 (0x00000080U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3 (0x000000C0U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX (0x00000020U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED (0x00000010U)
+#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK (0xFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM0 (0x00000000U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM1 (0x00000001U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM2 (0x00000002U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM3 (0x00000003U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM4 (0x00000004U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM5 (0x00000005U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM6 (0x00000006U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM7 (0x00000007U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL (0x0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC (0x0B30U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC (0x0B38U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC (0x0B40U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC (0x0B48U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG
+*/
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG (0x0B50U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S8_CPR__MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT (1U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN (0x00000002U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT (0U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META (0x00000000U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS (0x00000001U)
+
+
+/*
+ Register RGX_CR_MTS_DM0_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE (0x0B58U)
+#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U)
+#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_MTS_DM1_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE (0x0B60U)
+#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U)
+#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_MTS_DM2_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE (0x0B68U)
+#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U)
+#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_MTS_DM3_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE (0x0B70U)
+#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U)
+#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_MTS_DM4_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE (0x0B78U)
+#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U)
+#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_MTS_DM5_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE (0x0B80U)
+#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U)
+#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_MTS_DM6_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE (0xD138U)
+#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U)
+#define RGX_CR_MTS_DM6_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_MTS_DM7_INTERRUPT_ENABLE
+*/
+#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE (0xD140U)
+#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U)
+#define RGX_CR_MTS_DM7_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_MTS_INTCTX
+*/
+#define RGX_CR_MTS_INTCTX (0x0B98U)
+#define RGX_CR_MTS_INTCTX_MASKFULL (IMG_UINT64_C(0x000000003FC0FFFF))
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT (22U)
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK (0xC03FFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT (8U)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK (0xFFFF00FFU)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MTS_BGCTX
+*/
+#define RGX_CR_MTS_BGCTX (0x0BA0U)
+#define RGX_CR_MTS_BGCTX_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT (0U)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE
+*/
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE (0x0BA8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT (56U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK (IMG_UINT64_C(0x00FFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT (48U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT (40U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT (32U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT (24U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT (16U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT (8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT (0U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_MTS_GPU_INT_STATUS
+*/
+#define RGX_CR_MTS_GPU_INT_STATUS (0x0BB0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT (0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_IRQ_OS0_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS0_EVENT_STATUS (0x0BD0U)
+#define RGX_CR_IRQ_OS0_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS0_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR (0x0BE0U)
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS1_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS1_EVENT_STATUS (0x10BD0U)
+#define RGX_CR_IRQ_OS1_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS1_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR (0x10BE0U)
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS2_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS2_EVENT_STATUS (0x20BD0U)
+#define RGX_CR_IRQ_OS2_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS2_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR (0x20BE0U)
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS3_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS3_EVENT_STATUS (0x30BD0U)
+#define RGX_CR_IRQ_OS3_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS3_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR (0x30BE0U)
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS4_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS4_EVENT_STATUS (0x40BD0U)
+#define RGX_CR_IRQ_OS4_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS4_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR (0x40BE0U)
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS5_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS5_EVENT_STATUS (0x50BD0U)
+#define RGX_CR_IRQ_OS5_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS5_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR (0x50BE0U)
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS6_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS6_EVENT_STATUS (0x60BD0U)
+#define RGX_CR_IRQ_OS6_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS6_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR (0x60BE0U)
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS7_EVENT_STATUS
+*/
+#define RGX_CR_IRQ_OS7_EVENT_STATUS (0x70BD0U)
+#define RGX_CR_IRQ_OS7_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_IRQ_OS7_EVENT_CLEAR
+*/
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR (0x70BE0U)
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT (0U)
+#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE_ENABLE
+*/
+#define RGX_CR_MTS_SCHEDULE_ENABLE (0x0BD8U)
+#define RGX_CR_MTS_SCHEDULE_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_FWCORE_BOOT
+*/
+#define RGX_CR_FWCORE_BOOT (0x70C0U)
+#define RGX_CR_FWCORE_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_FWCORE_BOOT_MODE_SHIFT (0U)
+#define RGX_CR_FWCORE_BOOT_MODE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_BOOT_MODE_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_META_BOOT
+*/
+#define RGX_CR_META_BOOT (0x0BF8U)
+#define RGX_CR_META_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_BOOT_MODE_SHIFT (0U)
+#define RGX_CR_META_BOOT_MODE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_META_BOOT_MODE_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_GARTEN_SLC
+*/
+#define RGX_CR_GARTEN_SLC (0x0BB8U)
+#define RGX_CR_GARTEN_SLC_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT (0U)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_ISP_RENDER
+*/
+#define RGX_CR_ISP_RENDER (0x0F08U)
+#define RGX_CR_ISP_RENDER__IFR_AND_PDM_EQ0__MASKFULL (IMG_UINT64_C(0x000000000003FFFF))
+#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__MASKFULL (IMG_UINT64_C(0x000000000007FF53))
+#define RGX_CR_ISP_RENDER_MASKFULL (IMG_UINT64_C(0x000000000003FFF0))
+#define RGX_CR_ISP_RENDER_TILES_PER_ISP_SHIFT (16U)
+#define RGX_CR_ISP_RENDER_TILES_PER_ISP_CLRMSK (0xFFFCFFFFU)
+#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__TILES_PER_ISP_SHIFT (16U)
+#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__TILES_PER_ISP_CLRMSK (0xFFF8FFFFU)
+#define RGX_CR_ISP_RENDER_TILE_LIMIT_HIGH_SHIFT (12U)
+#define RGX_CR_ISP_RENDER_TILE_LIMIT_HIGH_CLRMSK (0xFFFF0FFFU)
+#define RGX_CR_ISP_RENDER_TILE_LIMIT_LOW_SHIFT (8U)
+#define RGX_CR_ISP_RENDER_TILE_LIMIT_LOW_CLRMSK (0xFFFFF0FFU)
+#define RGX_CR_ISP_RENDER_TILE_STARVATION_SHIFT (7U)
+#define RGX_CR_ISP_RENDER_TILE_STARVATION_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_ISP_RENDER_TILE_STARVATION_EN (0x00000080U)
+#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_SHIFT (6U)
+#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_ISP_RENDER_PROCESS_EMPTY_TILES_EN (0x00000040U)
+#define RGX_CR_ISP_RENDER_RESUME_SHIFT (4U)
+#define RGX_CR_ISP_RENDER_RESUME_CLRMSK (0xFFFFFFCFU)
+#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_NONE (0x00000000U)
+#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_TILE (0x00000010U)
+#define RGX_CR_ISP_RENDER_RESUME_CONTEXT_PBLK (0x00000030U)
+#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__RESUME_SHIFT (4U)
+#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__RESUME_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__RESUME_CONTEXT_NONE (0x00000000U)
+#define RGX_CR_ISP_RENDER__CTX_SWITCH_LVL4_AND_PDM_EQ0__RESUME_CONTEXT_RESUME (0x00000010U)
+#define RGX_CR_ISP_RENDER_DIR_SHIFT (2U)
+#define RGX_CR_ISP_RENDER_DIR_CLRMSK (0xFFFFFFF3U)
+#define RGX_CR_ISP_RENDER_DIR_TL2BR (0x00000000U)
+#define RGX_CR_ISP_RENDER_DIR_TR2BL (0x00000004U)
+#define RGX_CR_ISP_RENDER_DIR_BL2TR (0x00000008U)
+#define RGX_CR_ISP_RENDER_DIR_BR2TL (0x0000000CU)
+#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT (1U)
+#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN (0x00000002U)
+#define RGX_CR_ISP_RENDER_MODE_SHIFT (0U)
+#define RGX_CR_ISP_RENDER_MODE_CLRMSK (0xFFFFFFFCU)
+#define RGX_CR_ISP_RENDER_MODE_NORM (0x00000000U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_2D (0x00000002U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE (0x00000003U)
+#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT (0U)
+#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_ISP_CTL
+*/
+#define RGX_CR_ISP_CTL (0x0FB0U)
+#define RGX_CR_ISP_CTL_MASKFULL (IMG_UINT64_C(0x00000000007BF8FF))
+#define RGX_CR_ISP_CTL_DBUFFER_COUNT_SHIFT (20U)
+#define RGX_CR_ISP_CTL_DBUFFER_COUNT_CLRMSK (0xFF8FFFFFU)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT (19U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN (0x00080000U)
+#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_SHIFT (17U)
+#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_ISP_CTL_UPFRONT_DEPTH_DISABLE_EN (0x00020000U)
+#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_SHIFT (16U)
+#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ONE_EN (0x00010000U)
+#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_SHIFT (15U)
+#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_CLRMSK (0xFFFF7FFFU)
+#define RGX_CR_ISP_CTL_DEPTH_CLAMP_ZERO_EN (0x00008000U)
+#define RGX_CR_ISP_CTL_LINE_SAMPLE_SHIFT (14U)
+#define RGX_CR_ISP_CTL_LINE_SAMPLE_CLRMSK (0xFFFFBFFFU)
+#define RGX_CR_ISP_CTL_LINE_SAMPLE_EN (0x00004000U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT (13U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK (0xFFFFDFFFU)
+#define RGX_CR_ISP_CTL_LINE_STYLE_EN (0x00002000U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_SHIFT (12U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_ISP_CTL_LINE_STYLE_SINGLE_PIXEL_EN (0x00001000U)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT (11U)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK (0xFFFFF7FFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN (0x00000800U)
+#define RGX_CR_ISP_CTL_UPASS_START_SHIFT (0U)
+#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register group: RGX_CR_MEM_TILING_CFG, with 8 repeats
+*/
+#define RGX_CR_MEM_TILING_CFG_REPEATCOUNT (8U)
+/*
+ Register RGX_CR_MEM_TILING_CFG0
+*/
+#define RGX_CR_MEM_TILING_CFG0 (0x12D8U)
+#define RGX_CR_MEM_TILING_CFG0_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_MEM_TILING_CFG0_XSTRIDE_SHIFT (61U)
+#define RGX_CR_MEM_TILING_CFG0_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG0_ENABLE_SHIFT (60U)
+#define RGX_CR_MEM_TILING_CFG0_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG0_ENABLE_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_SHIFT (32U)
+#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE (4096U)
+#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_SHIFT (0U)
+#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MEM_TILING_CFG1
+*/
+#define RGX_CR_MEM_TILING_CFG1 (0x12E0U)
+#define RGX_CR_MEM_TILING_CFG1_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_MEM_TILING_CFG1_XSTRIDE_SHIFT (61U)
+#define RGX_CR_MEM_TILING_CFG1_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG1_ENABLE_SHIFT (60U)
+#define RGX_CR_MEM_TILING_CFG1_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG1_ENABLE_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_SHIFT (32U)
+#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE (4096U)
+#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_SHIFT (0U)
+#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MEM_TILING_CFG2
+*/
+#define RGX_CR_MEM_TILING_CFG2 (0x12E8U)
+#define RGX_CR_MEM_TILING_CFG2_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_MEM_TILING_CFG2_XSTRIDE_SHIFT (61U)
+#define RGX_CR_MEM_TILING_CFG2_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG2_ENABLE_SHIFT (60U)
+#define RGX_CR_MEM_TILING_CFG2_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG2_ENABLE_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_SHIFT (32U)
+#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE (4096U)
+#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_SHIFT (0U)
+#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MEM_TILING_CFG3
+*/
+#define RGX_CR_MEM_TILING_CFG3 (0x12F0U)
+#define RGX_CR_MEM_TILING_CFG3_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_MEM_TILING_CFG3_XSTRIDE_SHIFT (61U)
+#define RGX_CR_MEM_TILING_CFG3_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG3_ENABLE_SHIFT (60U)
+#define RGX_CR_MEM_TILING_CFG3_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG3_ENABLE_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_SHIFT (32U)
+#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE (4096U)
+#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_SHIFT (0U)
+#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MEM_TILING_CFG4
+*/
+#define RGX_CR_MEM_TILING_CFG4 (0x12F8U)
+#define RGX_CR_MEM_TILING_CFG4_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_MEM_TILING_CFG4_XSTRIDE_SHIFT (61U)
+#define RGX_CR_MEM_TILING_CFG4_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG4_ENABLE_SHIFT (60U)
+#define RGX_CR_MEM_TILING_CFG4_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG4_ENABLE_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_SHIFT (32U)
+#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE (4096U)
+#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_SHIFT (0U)
+#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MEM_TILING_CFG5
+*/
+#define RGX_CR_MEM_TILING_CFG5 (0x1300U)
+#define RGX_CR_MEM_TILING_CFG5_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_MEM_TILING_CFG5_XSTRIDE_SHIFT (61U)
+#define RGX_CR_MEM_TILING_CFG5_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG5_ENABLE_SHIFT (60U)
+#define RGX_CR_MEM_TILING_CFG5_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG5_ENABLE_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_SHIFT (32U)
+#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE (4096U)
+#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_SHIFT (0U)
+#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MEM_TILING_CFG6
+*/
+#define RGX_CR_MEM_TILING_CFG6 (0x1308U)
+#define RGX_CR_MEM_TILING_CFG6_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_MEM_TILING_CFG6_XSTRIDE_SHIFT (61U)
+#define RGX_CR_MEM_TILING_CFG6_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG6_ENABLE_SHIFT (60U)
+#define RGX_CR_MEM_TILING_CFG6_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG6_ENABLE_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_SHIFT (32U)
+#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE (4096U)
+#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_SHIFT (0U)
+#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MEM_TILING_CFG7
+*/
+#define RGX_CR_MEM_TILING_CFG7 (0x1310U)
+#define RGX_CR_MEM_TILING_CFG7_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF))
+#define RGX_CR_MEM_TILING_CFG7_XSTRIDE_SHIFT (61U)
+#define RGX_CR_MEM_TILING_CFG7_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG7_ENABLE_SHIFT (60U)
+#define RGX_CR_MEM_TILING_CFG7_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG7_ENABLE_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_SHIFT (32U)
+#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF))
+#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE (4096U)
+#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_SHIFT (0U)
+#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT (12U)
+#define RGX_CR_MEM_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_USC_TIMER
+*/
+#define RGX_CR_USC_TIMER (0x46C8U)
+#define RGX_CR_USC_TIMER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_USC_TIMER_CNT_SHIFT (0U)
+#define RGX_CR_USC_TIMER_CNT_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_CR_USC_TIMER_CNT
+*/
+#define RGX_CR_USC_TIMER_CNT (0x46D0U)
+#define RGX_CR_USC_TIMER_CNT_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_USC_TIMER_CNT_RESET_SHIFT (0U)
+#define RGX_CR_USC_TIMER_CNT_RESET_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_USC_TIMER_CNT_RESET_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_TE_CHECKSUM
+*/
+#define RGX_CR_TE_CHECKSUM (0x5110U)
+#define RGX_CR_TE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVB_CHECKSUM
+*/
+#define RGX_CR_USC_UVB_CHECKSUM (0x5118U)
+#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_TE_TMA_CHECKSUM
+*/
+#define RGX_CR_TE_TMA_CHECKSUM (0x5128U)
+#define RGX_CR_TE_TMA_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TE_TMA_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_TE_TMA_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_CDM_PDS_CHECKSUM
+*/
+#define RGX_CR_CDM_PDS_CHECKSUM (0x5130U)
+#define RGX_CR_CDM_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CDM_PDS_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_CDM_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_VCE_CHECKSUM
+*/
+#define RGX_CR_VCE_CHECKSUM (0x5030U)
+#define RGX_CR_VCE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_ISP_PDS_CHECKSUM
+*/
+#define RGX_CR_ISP_PDS_CHECKSUM (0x5038U)
+#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_ISP_TPF_CHECKSUM
+*/
+#define RGX_CR_ISP_TPF_CHECKSUM (0x5040U)
+#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_TFPU_CHECKSUM
+*/
+#define RGX_CR_TFPU_CHECKSUM (0x5048U)
+#define RGX_CR_TFPU_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TFPU_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_TFPU_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_ZLS_CHECKSUM
+*/
+#define RGX_CR_ZLS_CHECKSUM (0x5050U)
+#define RGX_CR_ZLS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ZLS_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_ZLS_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PBE_CHECKSUM_3D
+*/
+#define RGX_CR_PBE_CHECKSUM_3D__PBE_CHECKSUM_2D (0x5058U)
+#define RGX_CR_PBE_CHECKSUM_3D__PBE_CHECKSUM_2D__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_CHECKSUM_3D__PBE_CHECKSUM_2D__VALUE_SHIFT (0U)
+#define RGX_CR_PBE_CHECKSUM_3D__PBE_CHECKSUM_2D__VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PBE_CHECKSUM
+*/
+#define RGX_CR_PBE_CHECKSUM (0x5058U)
+#define RGX_CR_PBE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PDS_DOUTM_STM_CHECKSUM
+*/
+#define RGX_CR_PDS_DOUTM_STM_CHECKSUM (0x5060U)
+#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_PDS_DOUTM_STM_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_IFPU_ISP_CHECKSUM
+*/
+#define RGX_CR_IFPU_ISP_CHECKSUM (0x5068U)
+#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PPP_CLIP_CHECKSUM
+*/
+#define RGX_CR_PPP_CLIP_CHECKSUM (0x5120U)
+#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_VCE_PRIM_CHECKSUM
+*/
+#define RGX_CR_VCE_PRIM_CHECKSUM (0x5140U)
+#define RGX_CR_VCE_PRIM_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VCE_PRIM_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_VCE_PRIM_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_TDM_PDS_CHECKSUM
+*/
+#define RGX_CR_TDM_PDS_CHECKSUM (0x5148U)
+#define RGX_CR_TDM_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TDM_PDS_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_TDM_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PBE_CHECKSUM_2D
+*/
+#define RGX_CR_PBE_CHECKSUM_2D (0x5158U)
+#define RGX_CR_PBE_CHECKSUM_2D_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_CHECKSUM_2D_VALUE_SHIFT (0U)
+#define RGX_CR_PBE_CHECKSUM_2D_VALUE_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_PHASE_GEOM
+*/
+#define RGX_CR_PERF_PHASE_GEOM (0x6008U)
+#define RGX_CR_PERF_PHASE_GEOM__PDM_V1__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_PHASE_GEOM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_PHASE_GEOM_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_PHASE_GEOM_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_PHASE_FRAG
+*/
+#define RGX_CR_PERF_PHASE_FRAG (0x6010U)
+#define RGX_CR_PERF_PHASE_FRAG__PDM_V1__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_PHASE_FRAG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_PHASE_FRAG_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_PHASE_FRAG_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_PHASE_COMP
+*/
+#define RGX_CR_PERF_PHASE_COMP (0x6018U)
+#define RGX_CR_PERF_PHASE_COMP__PDM_V1__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_PHASE_COMP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_PHASE_COMP_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_PHASE_COMP_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_CYCLE_GEOM_TOTAL
+*/
+#define RGX_CR_PERF_CYCLE_GEOM_TOTAL (0x6020U)
+#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_CYCLE_GEOM_TOTAL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_CYCLE_FRAG_TOTAL
+*/
+#define RGX_CR_PERF_CYCLE_FRAG_TOTAL (0x6028U)
+#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_CYCLE_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_CYCLE_COMP_TOTAL
+*/
+#define RGX_CR_PERF_CYCLE_COMP_TOTAL (0x6030U)
+#define RGX_CR_PERF_CYCLE_COMP_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_CYCLE_COMP_TOTAL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_CYCLE_COMP_TOTAL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL
+*/
+#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL (0x6038U)
+#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_CYCLE_GEOM_OR_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_PHASE_2D
+*/
+#define RGX_CR_PERF_PHASE_2D (0x6050U)
+#define RGX_CR_PERF_PHASE_2D__PDM_V1__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_PHASE_2D_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_PHASE_2D_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_PHASE_2D_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_CYCLE_2D_TOTAL
+*/
+#define RGX_CR_PERF_CYCLE_2D_TOTAL (0x6058U)
+#define RGX_CR_PERF_CYCLE_2D_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_CYCLE_2D_TOTAL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_CYCLE_2D_TOTAL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC0_READ_STALL
+*/
+#define RGX_CR_PERF_SLC0_READ_STALL (0x60B8U)
+#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC0_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC0_WRITE_STALL (0x60C0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC1_READ_STALL
+*/
+#define RGX_CR_PERF_SLC1_READ_STALL (0x60E0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC1_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC1_WRITE_STALL (0x60E8U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC2_READ_STALL
+*/
+#define RGX_CR_PERF_SLC2_READ_STALL (0x6158U)
+#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC2_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC2_WRITE_STALL (0x6160U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC3_READ_STALL
+*/
+#define RGX_CR_PERF_SLC3_READ_STALL (0x6180U)
+#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC3_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC3_WRITE_STALL (0x6188U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL
+*/
+#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL (0x6408U)
+#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_CYCLE_GEOM_AND_FRAG_TOTAL_COUNT_CLRMSK (0x00000000U)
+
+
+/*
+ Register group: RGX_CR_FWCORE_ADDR_REMAP_CONFIG, with 16 repeats
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT (16U)
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG0
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 (0x7020U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG1
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 (0x7028U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG2
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 (0x7030U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG3
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 (0x7038U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG4
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 (0x7040U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG5
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 (0x7048U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG6
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 (0x7050U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG7
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 (0x7058U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG8
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 (0x7060U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG9
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 (0x7068U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG10
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 (0x7070U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG11
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 (0x7078U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG12
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 (0x7080U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG13
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 (0x7088U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG14
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 (0x7090U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG15
+*/
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 (0x7098U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL (IMG_UINT64_C(0x3FFFF0FFFFFFF0FF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT (61U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT (60U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT (44U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT (12U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MMU_CONTEXT_SHIFT (0U)
+#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MMU_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FWCORE_WRAPPER_NMI_ENABLE
+*/
+#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE (0x70A0U)
+#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_EVENT_EN_SHIFT (0U)
+#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_EVENT_EN_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_WRAPPER_NMI_ENABLE_EVENT_EN_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_FWCORE_WRAPPER_NMI_EVENT
+*/
+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT (0x70A8U)
+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT (0U)
+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_FWCORE_WRAPPER_RESET_VECTOR
+*/
+#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR (0x70B0U)
+#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE))
+#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR_ADDR_SHIFT (1U)
+#define RGX_CR_FWCORE_WRAPPER_RESET_VECTOR_ADDR_CLRMSK (0x00000001U)
+
+
+/*
+ Register RGX_CR_FWCORE_WRAPPER_NMI_VECTOR
+*/
+#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR (0x70B8U)
+#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE))
+#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR_ADDR_SHIFT (1U)
+#define RGX_CR_FWCORE_WRAPPER_NMI_VECTOR_ADDR_CLRMSK (0x00000001U)
+
+
+/*
+ Register RGX_CR_JONES_IDLE
+*/
+#define RGX_CR_JONES_IDLE (0x8328U)
+#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x000000000007FEFF))
+#define RGX_CR_JONES_IDLE_ASC_SHIFT (18U)
+#define RGX_CR_JONES_IDLE_ASC_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_JONES_IDLE_ASC_EN (0x00040000U)
+#define RGX_CR_JONES_IDLE_RCE_SHIFT (17U)
+#define RGX_CR_JONES_IDLE_RCE_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_JONES_IDLE_RCE_EN (0x00020000U)
+#define RGX_CR_JONES_IDLE_AXI2IMG_SHIFT (16U)
+#define RGX_CR_JONES_IDLE_AXI2IMG_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_JONES_IDLE_AXI2IMG_EN (0x00010000U)
+#define RGX_CR_JONES_IDLE_SLC_SHIFT (15U)
+#define RGX_CR_JONES_IDLE_SLC_CLRMSK (0xFFFF7FFFU)
+#define RGX_CR_JONES_IDLE_SLC_EN (0x00008000U)
+#define RGX_CR_JONES_IDLE_TDM_SHIFT (14U)
+#define RGX_CR_JONES_IDLE_TDM_CLRMSK (0xFFFFBFFFU)
+#define RGX_CR_JONES_IDLE_TDM_EN (0x00004000U)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT (13U)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK (0xFFFFDFFFU)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN (0x00002000U)
+#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT (12U)
+#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_JONES_IDLE_FB_CDC_EN (0x00001000U)
+#define RGX_CR_JONES_IDLE_MMU_SHIFT (11U)
+#define RGX_CR_JONES_IDLE_MMU_CLRMSK (0xFFFFF7FFU)
+#define RGX_CR_JONES_IDLE_MMU_EN (0x00000800U)
+#define RGX_CR_JONES_IDLE_DFU_SHIFT (10U)
+#define RGX_CR_JONES_IDLE_DFU_CLRMSK (0xFFFFFBFFU)
+#define RGX_CR_JONES_IDLE_DFU_EN (0x00000400U)
+#define RGX_CR_JONES_IDLE_GARTEN_SHIFT (9U)
+#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK (0xFFFFFDFFU)
+#define RGX_CR_JONES_IDLE_GARTEN_EN (0x00000200U)
+#define RGX_CR_JONES_IDLE_SOCIF_SHIFT (7U)
+#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_JONES_IDLE_SOCIF_EN (0x00000080U)
+#define RGX_CR_JONES_IDLE_TILING_SHIFT (6U)
+#define RGX_CR_JONES_IDLE_TILING_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_JONES_IDLE_TILING_EN (0x00000040U)
+#define RGX_CR_JONES_IDLE_IPP_SHIFT (5U)
+#define RGX_CR_JONES_IDLE_IPP_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_JONES_IDLE_IPP_EN (0x00000020U)
+#define RGX_CR_JONES_IDLE_USC_GMUTEX_SHIFT (4U)
+#define RGX_CR_JONES_IDLE_USC_GMUTEX_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_JONES_IDLE_USC_GMUTEX_EN (0x00000010U)
+#define RGX_CR_JONES_IDLE_PM_SHIFT (3U)
+#define RGX_CR_JONES_IDLE_PM_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_JONES_IDLE_PM_EN (0x00000008U)
+#define RGX_CR_JONES_IDLE_CDM_SHIFT (2U)
+#define RGX_CR_JONES_IDLE_CDM_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_JONES_IDLE_CDM_EN (0x00000004U)
+#define RGX_CR_JONES_IDLE_DCE_SHIFT (1U)
+#define RGX_CR_JONES_IDLE_DCE_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_JONES_IDLE_DCE_EN (0x00000002U)
+#define RGX_CR_JONES_IDLE_BIF_SHIFT (0U)
+#define RGX_CR_JONES_IDLE_BIF_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_JONES_IDLE_BIF_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_SYS_BUS_SECURE
+*/
+#define RGX_CR_SYS_BUS_SECURE (0xA100U)
+#define RGX_CR_SYS_BUS_SECURE__SYS_BUS_SECURE_RESET__MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT (0U)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_6 (0xF0000U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_6__MASKFULL (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_6__ID_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_6__ID_CLRMSK (0xFFFFFFC0U)
+
+
+/*
+ Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4 (0xF0000U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4__MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4__ID_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4__ID_CLRMSK (0xFFFFFFE0U)
+
+
+/*
+ Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_GT6 (0xF0000U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_GT6__MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_GT6__ID_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_GT6__ID_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT (0xE140U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_V1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4__MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_V1_AND_MH_PASID_WIDTH_6__MASKFULL (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK (0xFFFFFF00U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_V1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4__ID_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_V1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4__ID_CLRMSK (0xFFFFFFE0U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_V1_AND_MH_PASID_WIDTH_6__ID_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_V1_AND_MH_PASID_WIDTH_6__ID_CLRMSK (0xFFFFFFC0U)
+
+
+/*
+ Register RGX_CR_MMU_CBASE_MAPPING
+*/
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1 (0xF0008U)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF))
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__INVALID_SHIFT (28U)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__INVALID_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__INVALID_EN (0x10000000U)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_CLRMSK (0xF0000000U)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MMU_CBASE_MAPPING
+*/
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU (0xF0008U)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF))
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__INVALID_SHIFT (28U)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__INVALID_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__INVALID_EN (0x10000000U)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__BASE_ADDR_CLRMSK (0xF0000000U)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__BASE_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1_AND_VPU__BASE_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MMU_CBASE_MAPPING
+*/
+#define RGX_CR_MMU_CBASE_MAPPING__VPU (0x1E010U)
+#define RGX_CR_MMU_CBASE_MAPPING__VPU__MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF))
+#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_SHIFT (28U)
+#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_MMU_CBASE_MAPPING__VPU__INVALID_EN (0x10000000U)
+#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_CLRMSK (0xF0000000U)
+#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_MMU_CBASE_MAPPING__VPU__BASE_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MMU_CBASE_MAPPING
+*/
+#define RGX_CR_MMU_CBASE_MAPPING (0xE148U)
+#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL (IMG_UINT64_C(0x000000001FFFFFFF))
+#define RGX_CR_MMU_CBASE_MAPPING_INVALID_SHIFT (28U)
+#define RGX_CR_MMU_CBASE_MAPPING_INVALID_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_MMU_CBASE_MAPPING_INVALID_EN (0x10000000U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK (0xF0000000U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MMU_FAULT_STATUS1
+*/
+#define RGX_CR_MMU_FAULT_STATUS1 (0xE150U)
+#define RGX_CR_MMU_FAULT_STATUS1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS1_LEVEL_SHIFT (62U)
+#define RGX_CR_MMU_FAULT_STATUS1_LEVEL_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS1_REQ_ID_SHIFT (56U)
+#define RGX_CR_MMU_FAULT_STATUS1_REQ_ID_CLRMSK (IMG_UINT64_C(0xC0FFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT (48U)
+#define RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS1_ADDRESS_SHIFT (4U)
+#define RGX_CR_MMU_FAULT_STATUS1_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFF00000000000F))
+#define RGX_CR_MMU_FAULT_STATUS1_RNW_SHIFT (3U)
+#define RGX_CR_MMU_FAULT_STATUS1_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS1_RNW_EN (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS1_TYPE_SHIFT (1U)
+#define RGX_CR_MMU_FAULT_STATUS1_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS1_FAULT_SHIFT (0U)
+#define RGX_CR_MMU_FAULT_STATUS1_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS1_FAULT_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_MMU_FAULT_STATUS2
+*/
+#define RGX_CR_MMU_FAULT_STATUS2 (0xE158U)
+#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__MASKFULL (IMG_UINT64_C(0x00000000003FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS2_MASKFULL (IMG_UINT64_C(0x000000003FFF07FF))
+#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_SHIFT (29U)
+#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_MMU_FAULT_STATUS2_WRITEBACK_EN (0x20000000U)
+#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_SHIFT (28U)
+#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_MMU_FAULT_STATUS2_CLEANUNIQUE_EN (0x10000000U)
+#define RGX_CR_MMU_FAULT_STATUS2_BANK_SHIFT (24U)
+#define RGX_CR_MMU_FAULT_STATUS2_BANK_CLRMSK (0xF0FFFFFFU)
+#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_SHIFT (21U)
+#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__FBM_FAULT_EN (0x00200000U)
+#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_SHIFT (20U)
+#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__WRITEBACK_EN (0x00100000U)
+#define RGX_CR_MMU_FAULT_STATUS2_TLB_ENTRY_SHIFT (16U)
+#define RGX_CR_MMU_FAULT_STATUS2_TLB_ENTRY_CLRMSK (0xFF00FFFFU)
+#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_SHIFT (12U)
+#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BIF_ID_CLRMSK (0xFFF00FFFU)
+#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_SHIFT (10U)
+#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_CLRMSK (0xFFFFFBFFU)
+#define RGX_CR_MMU_FAULT_STATUS2_FBM_FAULT_EN (0x00000400U)
+#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BANK_SHIFT (8U)
+#define RGX_CR_MMU_FAULT_STATUS2__ALBTOP__BANK_CLRMSK (0xFFFFF0FFU)
+#define RGX_CR_MMU_FAULT_STATUS2_BIF_ID_SHIFT (0U)
+#define RGX_CR_MMU_FAULT_STATUS2_BIF_ID_CLRMSK (0xFFFFFC00U)
+#define RGX_CR_MMU_FAULT_STATUS2_ACTIVE_ID_SHIFT (0U)
+#define RGX_CR_MMU_FAULT_STATUS2_ACTIVE_ID_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MMU_FAULT_STATUS_META
+*/
+#define RGX_CR_MMU_FAULT_STATUS_META (0xE160U)
+#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT (62U)
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT (56U)
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK (IMG_UINT64_C(0xC0FFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT (48U)
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT (4U)
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFF00000000000F))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT (3U)
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT (1U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT (0U)
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_MMU_FAULT_STATUS2_META
+*/
+#define RGX_CR_MMU_FAULT_STATUS2_META (0xE198U)
+#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__MASKFULL (IMG_UINT64_C(0x0000000000001FFF))
+#define RGX_CR_MMU_FAULT_STATUS2_META_MASKFULL (IMG_UINT64_C(0x0000000000003FFF))
+#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_SHIFT (13U)
+#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_CLRMSK (0xFFFFDFFFU)
+#define RGX_CR_MMU_FAULT_STATUS2_META_WRITEBACK_EN (0x00002000U)
+#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_SHIFT (12U)
+#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_MMU_FAULT_STATUS2_META_CLEANUNIQUE_EN (0x00001000U)
+#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__WRITEBACK_SHIFT (12U)
+#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__WRITEBACK_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_MMU_FAULT_STATUS2_META__ALBTOP__WRITEBACK_EN (0x00001000U)
+#define RGX_CR_MMU_FAULT_STATUS2_META_BANK_SHIFT (8U)
+#define RGX_CR_MMU_FAULT_STATUS2_META_BANK_CLRMSK (0xFFFFF0FFU)
+#define RGX_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_SHIFT (0U)
+#define RGX_CR_MMU_FAULT_STATUS2_META_TLB_ENTRY_CLRMSK (0xFFFFFF00U)
+#define RGX_CR_MMU_FAULT_STATUS2_META_ACTIVE_ID_SHIFT (0U)
+#define RGX_CR_MMU_FAULT_STATUS2_META_ACTIVE_ID_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MMU_FAULT_STATUS_PM
+*/
+#define RGX_CR_MMU_FAULT_STATUS_PM (0xE130U)
+#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_PM_MASKFULL (IMG_UINT64_C(0x0000000007FFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_PM_DM_SHIFT (24U)
+#define RGX_CR_MMU_FAULT_STATUS_PM_DM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__DM_SHIFT (24U)
+#define RGX_CR_MMU_FAULT_STATUS_PM__PM_RECYCLE__DM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0FFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_SHIFT (23U)
+#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_PM_RNW_EN (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_MMU_FAULT_STATUS_PM_ADDRESS_SHIFT (3U)
+#define RGX_CR_MMU_FAULT_STATUS_PM_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF800007))
+#define RGX_CR_MMU_FAULT_STATUS_PM_LEVEL_SHIFT (1U)
+#define RGX_CR_MMU_FAULT_STATUS_PM_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_SHIFT (0U)
+#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_PM_FAULT_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_MMU_STATUS
+*/
+#define RGX_CR_MMU_STATUS__VPU (0x10288U)
+#define RGX_CR_MMU_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define RGX_CR_MMU_STATUS__VPU__MMU_STALLED_SHIFT (40U)
+#define RGX_CR_MMU_STATUS__VPU__MMU_STALLED_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_CR_MMU_STATUS__VPU__MMU_STALLED_EN (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_MMU_STATUS__VPU__PM_WRITES_SHIFT (38U)
+#define RGX_CR_MMU_STATUS__VPU__PM_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFF3FFFFFFFFF))
+#define RGX_CR_MMU_STATUS__VPU__PM_READS_SHIFT (36U)
+#define RGX_CR_MMU_STATUS__VPU__PM_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF))
+#define RGX_CR_MMU_STATUS__VPU__PC_READS_SHIFT (24U)
+#define RGX_CR_MMU_STATUS__VPU__PC_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF))
+#define RGX_CR_MMU_STATUS__VPU__PD_READS_SHIFT (12U)
+#define RGX_CR_MMU_STATUS__VPU__PD_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF))
+#define RGX_CR_MMU_STATUS__VPU__PT_READS_SHIFT (0U)
+#define RGX_CR_MMU_STATUS__VPU__PT_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000))
+
+
+/*
+ Register RGX_CR_MMU_STATUS
+*/
+#define RGX_CR_MMU_STATUS (0xE170U)
+#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__MASKFULL (IMG_UINT64_C(0x000003FFFFFFFFFF))
+#define RGX_CR_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__MMU_STALLED_SHIFT (41U)
+#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__MMU_STALLED_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF))
+#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__MMU_STALLED_EN (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_MMU_STATUS_MMU_STALLED_SHIFT (40U)
+#define RGX_CR_MMU_STATUS_MMU_STALLED_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_CR_MMU_STATUS_MMU_STALLED_EN (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__PM_WRITES_SHIFT (39U)
+#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__PM_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFE7FFFFFFFFF))
+#define RGX_CR_MMU_STATUS_PM_WRITES_SHIFT (38U)
+#define RGX_CR_MMU_STATUS_PM_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFF3FFFFFFFFF))
+#define RGX_CR_MMU_STATUS_PM_READS_SHIFT (36U)
+#define RGX_CR_MMU_STATUS_PM_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF))
+#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__PM_READS_SHIFT (36U)
+#define RGX_CR_MMU_STATUS__CATURIX_TOP_INFRASTRUCTURE__PM_READS_CLRMSK (IMG_UINT64_C(0xFFFFFF8FFFFFFFFF))
+#define RGX_CR_MMU_STATUS_PC_READS_SHIFT (24U)
+#define RGX_CR_MMU_STATUS_PC_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF))
+#define RGX_CR_MMU_STATUS_PD_READS_SHIFT (12U)
+#define RGX_CR_MMU_STATUS_PD_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF))
+#define RGX_CR_MMU_STATUS_PT_READS_SHIFT (0U)
+#define RGX_CR_MMU_STATUS_PT_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000))
+
+
+/*
+ Register RGX_CR_MMU_ENTRY_STATUS
+*/
+#define RGX_CR_MMU_ENTRY_STATUS__VPU (0x1E028U)
+#define RGX_CR_MMU_ENTRY_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000000FFFFFF80FF))
+#define RGX_CR_MMU_ENTRY_STATUS__VPU__ADDRESS_SHIFT (15U)
+#define RGX_CR_MMU_ENTRY_STATUS__VPU__ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000007FFF))
+#define RGX_CR_MMU_ENTRY_STATUS__VPU__CONTEXT_ID_SHIFT (0U)
+#define RGX_CR_MMU_ENTRY_STATUS__VPU__CONTEXT_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_MMU_ENTRY_STATUS
+*/
+#define RGX_CR_MMU_ENTRY_STATUS (0xE178U)
+#define RGX_CR_MMU_ENTRY_STATUS_MASKFULL (IMG_UINT64_C(0x000000FFFFFF80FF))
+#define RGX_CR_MMU_ENTRY_STATUS_ADDRESS_SHIFT (15U)
+#define RGX_CR_MMU_ENTRY_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000007FFF))
+#define RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_SHIFT (0U)
+#define RGX_CR_MMU_ENTRY_STATUS_CONTEXT_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_MMU_ENTRY
+*/
+#define RGX_CR_MMU_ENTRY__VPU (0x1E030U)
+#define RGX_CR_MMU_ENTRY__VPU__MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_MMU_ENTRY__VPU__ENABLE_SHIFT (1U)
+#define RGX_CR_MMU_ENTRY__VPU__ENABLE_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_MMU_ENTRY__VPU__ENABLE_EN (0x00000002U)
+#define RGX_CR_MMU_ENTRY__VPU__PENDING_SHIFT (0U)
+#define RGX_CR_MMU_ENTRY__VPU__PENDING_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_MMU_ENTRY__VPU__PENDING_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_MMU_ENTRY
+*/
+#define RGX_CR_MMU_ENTRY (0xE180U)
+#define RGX_CR_MMU_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_MMU_ENTRY_ENABLE_SHIFT (1U)
+#define RGX_CR_MMU_ENTRY_ENABLE_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_MMU_ENTRY_ENABLE_EN (0x00000002U)
+#define RGX_CR_MMU_ENTRY_PENDING_SHIFT (0U)
+#define RGX_CR_MMU_ENTRY_PENDING_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_MMU_ENTRY_PENDING_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_MMU_PAGE_SIZE_RANGE_ONE
+*/
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE (0xE350U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_SHIFT (38U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_SHIFT (19U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSHIFT (21U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_END_ADDR_ALIGNSIZE (2097152U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSHIFT (21U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_ONE_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+ Register RGX_CR_MMU_PAGE_SIZE_RANGE_TWO
+*/
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO (0xE358U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_SHIFT (38U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_SHIFT (19U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSHIFT (21U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_END_ADDR_ALIGNSIZE (2097152U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSHIFT (21U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_TWO_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+ Register RGX_CR_MMU_PAGE_SIZE_RANGE_THREE
+*/
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE (0xE360U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_SHIFT (38U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_SHIFT (19U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSHIFT (21U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_END_ADDR_ALIGNSIZE (2097152U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSHIFT (21U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_THREE_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+ Register RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR
+*/
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR (0xE368U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_MASKFULL (IMG_UINT64_C(0x000001FFFFFFFFFF))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_SHIFT (38U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFE3FFFFFFFFF))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_SHIFT (19U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFC00007FFFF))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSHIFT (21U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_END_ADDR_ALIGNSIZE (2097152U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF80000))
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSHIFT (21U)
+#define RGX_CR_MMU_PAGE_SIZE_RANGE_FOUR_BASE_ADDR_ALIGNSIZE (2097152U)
+
+
+/*
+ Register RGX_CR_SLC_STATUS1
+*/
+#define RGX_CR_SLC_STATUS1 (0xE210U)
+#define RGX_CR_SLC_STATUS1_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_SHIFT (48U)
+#define RGX_CR_SLC_STATUS1_XBAR_CFI_TIMEOUTS_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_SHIFT (36U)
+#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFF000FFFFFFFFF))
+#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_SHIFT (24U)
+#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF))
+#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_SHIFT (12U)
+#define RGX_CR_SLC_STATUS1_BUS1_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF))
+#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_SHIFT (0U)
+#define RGX_CR_SLC_STATUS1_BUS0_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000))
+
+
+/*
+ Register RGX_CR_SLC_STATUS2
+*/
+#define RGX_CR_SLC_STATUS2 (0xE218U)
+#define RGX_CR_SLC_STATUS2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_SHIFT (48U)
+#define RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_SHIFT (36U)
+#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFF000FFFFFFFFF))
+#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_SHIFT (24U)
+#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_WRITES_CLRMSK (IMG_UINT64_C(0xFFFFFFF000FFFFFF))
+#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_SHIFT (12U)
+#define RGX_CR_SLC_STATUS2_BUS3_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF000FFF))
+#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_SHIFT (0U)
+#define RGX_CR_SLC_STATUS2_BUS2_OUTSTANDING_READS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF000))
+
+
+/*
+ Register RGX_CR_SLC_IDLE
+*/
+#define RGX_CR_SLC_IDLE (0xE230U)
+#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__MASKFULL (IMG_UINT64_C(0x0000000001FFFFFF))
+#define RGX_CR_SLC_IDLE_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF))
+#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__FBCDC_ARB_SHIFT (24U)
+#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__FBCDC_ARB_CLRMSK (0xFEFFFFFFU)
+#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__FBCDC_ARB_EN (0x01000000U)
+#define RGX_CR_SLC_IDLE_FBCDC_ARB_SHIFT (20U)
+#define RGX_CR_SLC_IDLE_FBCDC_ARB_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_SLC_IDLE_FBCDC_ARB_EN (0x00100000U)
+#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__OWDB_SHIFT (20U)
+#define RGX_CR_SLC_IDLE__COHERENCY_AND_ALRIF_GT0__OWDB_CLRMSK (0xFF0FFFFFU)
+#define RGX_CR_SLC_IDLE_OWDB_SHIFT (16U)
+#define RGX_CR_SLC_IDLE_OWDB_CLRMSK (0xFFF0FFFFU)
+#define RGX_CR_SLC_IDLE_ACE_CLBS_SHIFT (16U)
+#define RGX_CR_SLC_IDLE_ACE_CLBS_CLRMSK (0xFFF0FFFFU)
+#define RGX_CR_SLC_IDLE_ACE_CONVERTERS_SHIFT (12U)
+#define RGX_CR_SLC_IDLE_ACE_CONVERTERS_CLRMSK (0xFFFF0FFFU)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT (4U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK (0xFFFFF00FU)
+#define RGX_CR_SLC_IDLE_MMU_SHIFT (3U)
+#define RGX_CR_SLC_IDLE_MMU_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_SLC_IDLE_MMU_EN (0x00000008U)
+#define RGX_CR_SLC_IDLE_CCM_SHIFT (2U)
+#define RGX_CR_SLC_IDLE_CCM_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_SLC_IDLE_CCM_EN (0x00000004U)
+#define RGX_CR_SLC_IDLE_RDI_SHIFT (1U)
+#define RGX_CR_SLC_IDLE_RDI_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_SLC_IDLE_RDI_EN (0x00000002U)
+#define RGX_CR_SLC_IDLE_XBAR_SHIFT (0U)
+#define RGX_CR_SLC_IDLE_XBAR_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_SLC_IDLE_XBAR_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_SLC_FAULT_STOP_STATUS
+*/
+#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU (0x1E240U)
+#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__MASKFULL (IMG_UINT64_C(0x000000000001FFFF))
+#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__BIF_SHIFT (0U)
+#define RGX_CR_SLC_FAULT_STOP_STATUS__VPU__BIF_CLRMSK (0xFFFE0000U)
+
+
+/*
+ Register RGX_CR_SLC_FAULT_STOP_STATUS
+*/
+#define RGX_CR_SLC_FAULT_STOP_STATUS (0xE240U)
+#define RGX_CR_SLC_FAULT_STOP_STATUS_MASKFULL (IMG_UINT64_C(0x000000000001FFFF))
+#define RGX_CR_SLC_FAULT_STOP_STATUS_BIF_SHIFT (0U)
+#define RGX_CR_SLC_FAULT_STOP_STATUS_BIF_CLRMSK (0xFFFE0000U)
+
+
+/*
+ Register RGX_CR_SLC_STATUS_DEBUG
+*/
+#define RGX_CR_SLC_STATUS_DEBUG__VPU (0x1E260U)
+#define RGX_CR_SLC_STATUS_DEBUG__VPU__MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_COH_REQ_SHIFT (16U)
+#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_COH_REQ_CLRMSK (0x0000FFFFU)
+#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_ADDR_ALIAS_SHIFT (0U)
+#define RGX_CR_SLC_STATUS_DEBUG__VPU__ERR_ADDR_ALIAS_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_SLC_STATUS_DEBUG
+*/
+#define RGX_CR_SLC_STATUS_DEBUG (0xE260U)
+#define RGX_CR_SLC_STATUS_DEBUG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_SHIFT (16U)
+#define RGX_CR_SLC_STATUS_DEBUG_ERR_COH_REQ_CLRMSK (0x0000FFFFU)
+#define RGX_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_SHIFT (0U)
+#define RGX_CR_SLC_STATUS_DEBUG_ERR_ADDR_ALIAS_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_HMMU_OSID_PAGE_SIZE
+*/
+#define RGX_CR_HMMU_OSID_PAGE_SIZE (0x80000U)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_MASKFULL (IMG_UINT64_C(0x0000000077777777))
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_7_SHIFT (28U)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_7_CLRMSK (0x8FFFFFFFU)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_6_SHIFT (24U)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_6_CLRMSK (0xF8FFFFFFU)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_5_SHIFT (20U)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_5_CLRMSK (0xFF8FFFFFU)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_4_SHIFT (16U)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_4_CLRMSK (0xFFF8FFFFU)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_3_SHIFT (12U)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_3_CLRMSK (0xFFFF8FFFU)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_2_SHIFT (8U)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_2_CLRMSK (0xFFFFF8FFU)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_1_SHIFT (4U)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_1_CLRMSK (0xFFFFFF8FU)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_0_SHIFT (0U)
+#define RGX_CR_HMMU_OSID_PAGE_SIZE_OSID_0_CLRMSK (0xFFFFFFF8U)
+
+
+/*
+ Register RGX_CR_HMMU_BYPASS
+*/
+#define RGX_CR_HMMU_BYPASS (0x80008U)
+#define RGX_CR_HMMU_BYPASS_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_HMMU_BYPASS_EN_SHIFT (0U)
+#define RGX_CR_HMMU_BYPASS_EN_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_HMMU_INVAL
+*/
+#define RGX_CR_HMMU_INVAL (0x80010U)
+#define RGX_CR_HMMU_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_HMMU_INVAL_OS_ID_SHIFT (4U)
+#define RGX_CR_HMMU_INVAL_OS_ID_CLRMSK (0xFFFFFF8FU)
+#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_SHIFT (3U)
+#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_HMMU_INVAL_ALL_OS_IDS_EN (0x00000008U)
+#define RGX_CR_HMMU_INVAL_HPC_SHIFT (2U)
+#define RGX_CR_HMMU_INVAL_HPC_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_HMMU_INVAL_HPC_EN (0x00000004U)
+#define RGX_CR_HMMU_INVAL_HPD_SHIFT (1U)
+#define RGX_CR_HMMU_INVAL_HPD_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_HMMU_INVAL_HPD_EN (0x00000002U)
+#define RGX_CR_HMMU_INVAL_HPT_SHIFT (0U)
+#define RGX_CR_HMMU_INVAL_HPT_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_HMMU_INVAL_HPT_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_HMMU_HPC_BASE_MAPPING0
+*/
+#define RGX_CR_HMMU_HPC_BASE_MAPPING0 (0x80018U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING0_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR1_SHIFT (36U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR1_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_SHIFT (32U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID1_EN (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR0_SHIFT (4U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING0_ADDR0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_SHIFT (0U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING0_VALID0_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_HMMU_HPC_BASE_MAPPING1
+*/
+#define RGX_CR_HMMU_HPC_BASE_MAPPING1 (0x80020U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING1_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR3_SHIFT (36U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR3_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_SHIFT (32U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID3_EN (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR2_SHIFT (4U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING1_ADDR2_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_SHIFT (0U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING1_VALID2_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_HMMU_HPC_BASE_MAPPING2
+*/
+#define RGX_CR_HMMU_HPC_BASE_MAPPING2 (0x80028U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING2_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR5_SHIFT (36U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR5_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_SHIFT (32U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID5_EN (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR4_SHIFT (4U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING2_ADDR4_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_SHIFT (0U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING2_VALID4_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_HMMU_HPC_BASE_MAPPING3
+*/
+#define RGX_CR_HMMU_HPC_BASE_MAPPING3 (0x80030U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING3_MASKFULL (IMG_UINT64_C(0xFFFFFFF1FFFFFFF1))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR7_SHIFT (36U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR7_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_SHIFT (32U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID7_EN (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR6_SHIFT (4U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING3_ADDR6_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_SHIFT (0U)
+#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_HMMU_HPC_BASE_MAPPING3_VALID6_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register group: RGX_CR_HMMU_PAGE_FAULT_INFO, with 8 repeats
+*/
+#define RGX_CR_HMMU_PAGE_FAULT_INFO_REPEATCOUNT (8U)
+/*
+ Register RGX_CR_HMMU_PAGE_FAULT_INFO0
+*/
+#define RGX_CR_HMMU_PAGE_FAULT_INFO0 (0x80038U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO0_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO0_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO0_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO0_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PAGE_FAULT_INFO1
+*/
+#define RGX_CR_HMMU_PAGE_FAULT_INFO1 (0x80040U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO1_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO1_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO1_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PAGE_FAULT_INFO2
+*/
+#define RGX_CR_HMMU_PAGE_FAULT_INFO2 (0x80048U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO2_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO2_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO2_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO2_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PAGE_FAULT_INFO3
+*/
+#define RGX_CR_HMMU_PAGE_FAULT_INFO3 (0x80050U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO3_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO3_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO3_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO3_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PAGE_FAULT_INFO4
+*/
+#define RGX_CR_HMMU_PAGE_FAULT_INFO4 (0x80058U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO4_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO4_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO4_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO4_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PAGE_FAULT_INFO5
+*/
+#define RGX_CR_HMMU_PAGE_FAULT_INFO5 (0x80060U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO5_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO5_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO5_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO5_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PAGE_FAULT_INFO6
+*/
+#define RGX_CR_HMMU_PAGE_FAULT_INFO6 (0x80068U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO6_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO6_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO6_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO6_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PAGE_FAULT_INFO7
+*/
+#define RGX_CR_HMMU_PAGE_FAULT_INFO7 (0x80070U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO7_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO7_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PAGE_FAULT_INFO7_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PAGE_FAULT_INFO7_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register group: RGX_CR_HMMU_PENDING_ENTRY_INFO, with 8 repeats
+*/
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO_REPEATCOUNT (8U)
+/*
+ Register RGX_CR_HMMU_PENDING_ENTRY_INFO0
+*/
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO0 (0x800C0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO0_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PENDING_ENTRY_INFO1
+*/
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO1 (0x800C8U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO1_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PENDING_ENTRY_INFO2
+*/
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO2 (0x800D0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO2_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PENDING_ENTRY_INFO3
+*/
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO3 (0x800D8U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO3_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PENDING_ENTRY_INFO4
+*/
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO4 (0x800E0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO4_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PENDING_ENTRY_INFO5
+*/
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO5 (0x800E8U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO5_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PENDING_ENTRY_INFO6
+*/
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO6 (0x800F0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO6_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_PENDING_ENTRY_INFO7
+*/
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO7 (0x800F8U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_ADDR_SHIFT (2U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC0000003))
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_LEVEL_SHIFT (0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_INFO7_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_HMMU_HOST_IRQ_ENABLE
+*/
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE (0x80100U)
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_SHIFT (3U)
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_EN (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_SHIFT (2U)
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_READONLY_FAULT_PM_EN (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_SHIFT (1U)
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PAGE_FAULT_EN (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_SHIFT (0U)
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_HMMU_HOST_IRQ_ENABLE_PENDING_ENTRY_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_HMMU_PENDING_ENTRY
+*/
+#define RGX_CR_HMMU_PENDING_ENTRY (0x80108U)
+#define RGX_CR_HMMU_PENDING_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_SHIFT (0U)
+#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_HMMU_PENDING_ENTRY_ENABLE_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_HMMU_FAULT_STATUS
+*/
+#define RGX_CR_HMMU_FAULT_STATUS (0x80120U)
+#define RGX_CR_HMMU_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_SHIFT (31U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID7_EN (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_SHIFT (30U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID6_EN (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_SHIFT (29U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID5_EN (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_SHIFT (28U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID4_EN (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_SHIFT (27U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID3_EN (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_SHIFT (26U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID2_EN (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_SHIFT (25U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID1_EN (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_SHIFT (24U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_PM_OSID0_EN (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_SHIFT (23U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID7_EN (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_SHIFT (22U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID6_EN (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_SHIFT (21U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID5_EN (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_SHIFT (20U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID4_EN (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_SHIFT (19U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID3_EN (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_SHIFT (18U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID2_EN (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_SHIFT (17U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID1_EN (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_SHIFT (16U)
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_READONLY_FAULT_OSID0_EN (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_SHIFT (15U)
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID7_EN (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_SHIFT (14U)
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID6_EN (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_SHIFT (13U)
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID5_EN (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_SHIFT (12U)
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID4_EN (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_SHIFT (11U)
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID3_EN (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_SHIFT (10U)
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID2_EN (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_SHIFT (9U)
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID1_EN (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_SHIFT (8U)
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_HMMU_FAULT_STATUS_PENDING_ENTRY_OSID0_EN (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_SHIFT (7U)
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID7_EN (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_SHIFT (6U)
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID6_EN (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_SHIFT (5U)
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID5_EN (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_SHIFT (4U)
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID4_EN (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_SHIFT (3U)
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID3_EN (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_SHIFT (2U)
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID2_EN (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_SHIFT (1U)
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID1_EN (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_SHIFT (0U)
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_HMMU_FAULT_STATUS_PAGE_FAULT_OSID0_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register group: RGX_CR_HMMU_READONLY_FAULT_INFO, with 8 repeats
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_INFO_REPEATCOUNT (8U)
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_INFO0
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_INFO0 (0x80190U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_HMMU_READONLY_FAULT_INFO0_ADDR_SHIFT (4U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_INFO1
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_INFO1 (0x80198U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_HMMU_READONLY_FAULT_INFO1_ADDR_SHIFT (4U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_INFO2
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_INFO2 (0x801A0U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_HMMU_READONLY_FAULT_INFO2_ADDR_SHIFT (4U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_INFO3
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_INFO3 (0x801A8U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_HMMU_READONLY_FAULT_INFO3_ADDR_SHIFT (4U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_INFO4
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_INFO4 (0x801B0U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_HMMU_READONLY_FAULT_INFO4_ADDR_SHIFT (4U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_INFO5
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_INFO5 (0x801B8U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_HMMU_READONLY_FAULT_INFO5_ADDR_SHIFT (4U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_INFO6
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_INFO6 (0x801C0U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_HMMU_READONLY_FAULT_INFO6_ADDR_SHIFT (4U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_INFO7
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_INFO7 (0x801C8U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_HMMU_READONLY_FAULT_INFO7_ADDR_SHIFT (4U)
+#define RGX_CR_HMMU_READONLY_FAULT_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F))
+
+
+/*
+ Register group: RGX_CR_HMMU_READONLY_FAULT_PM_INFO, with 8 repeats
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO_REPEATCOUNT (8U)
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO0
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0 (0x801D0U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_ADDR_SHIFT (0U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO1
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1 (0x801D8U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_ADDR_SHIFT (0U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO2
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2 (0x801E0U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_ADDR_SHIFT (0U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO3
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3 (0x801E8U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_ADDR_SHIFT (0U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO4
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4 (0x801F0U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_ADDR_SHIFT (0U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO5
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5 (0x801F8U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_ADDR_SHIFT (0U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO6
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6 (0x80200U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_ADDR_SHIFT (0U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+
+
+/*
+ Register RGX_CR_HMMU_READONLY_FAULT_PM_INFO7
+*/
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7 (0x80208U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_ADDR_SHIFT (0U)
+#define RGX_CR_HMMU_READONLY_FAULT_PM_INFO7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000))
+
+
+/*
+ Register RGX_CR_ACE_CTRL
+*/
+#define RGX_CR_ACE_CTRL__VPU (0x1E320U)
+#define RGX_CR_ACE_CTRL__VPU__MASKFULL (IMG_UINT64_C(0x00000000007FCFFF))
+#define RGX_CR_ACE_CTRL__VPU__CLB_AXQOS_SHIFT (19U)
+#define RGX_CR_ACE_CTRL__VPU__CLB_AXQOS_CLRMSK (0xFF87FFFFU)
+#define RGX_CR_ACE_CTRL__VPU__PM_MMU_AXCACHE_SHIFT (15U)
+#define RGX_CR_ACE_CTRL__VPU__PM_MMU_AXCACHE_CLRMSK (0xFFF87FFFU)
+#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U)
+#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0xFFFFBFFFU)
+#define RGX_CR_ACE_CTRL__VPU__ENABLE_NONSECURE_PROT_MATCH_EN (0x00004000U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_SHIFT (8U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_CLRMSK (0xFFFFF0FFU)
+#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_DEVICE_NON_BUFFERABLE (0x00000000U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_DEVICE_BUFFERABLE (0x00000100U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000200U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_NORMAL_NC_BUFFERABLE (0x00000300U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0x00000600U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0x00000E00U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0x00000700U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0x00000F00U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_SHIFT (4U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_CLRMSK (0xFFFFFF0FU)
+#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_DEVICE_NON_BUFFERABLE (0x00000000U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_DEVICE_BUFFERABLE (0x00000010U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000020U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_NORMAL_NC_BUFFERABLE (0x00000030U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0x000000A0U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0x000000E0U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0x000000B0U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0x000000F0U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_DOMAIN_SHIFT (2U)
+#define RGX_CR_ACE_CTRL__VPU__MMU_DOMAIN_CLRMSK (0xFFFFFFF3U)
+#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_SHIFT (1U)
+#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_INNER_SHAREABLE (0x00000000U)
+#define RGX_CR_ACE_CTRL__VPU__COH_DOMAIN_OUTER_SHAREABLE (0x00000002U)
+#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_SHIFT (0U)
+#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_NON_SHAREABLE (0x00000000U)
+#define RGX_CR_ACE_CTRL__VPU__NON_COH_DOMAIN_SYSTEM (0x00000001U)
+
+
+/*
+ Register RGX_CR_ACE_CTRL
+*/
+#define RGX_CR_ACE_CTRL (0xE320U)
+#define RGX_CR_ACE_CTRL_MASKFULL (IMG_UINT64_C(0x0000000000FFCFFF))
+#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_SHIFT (23U)
+#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_CLRMSK (0xFF7FFFFFU)
+#define RGX_CR_ACE_CTRL_DISABLE_EMPTY_BURST_REMOVAL_EN (0x00800000U)
+#define RGX_CR_ACE_CTRL_CLB_AXQOS_SHIFT (19U)
+#define RGX_CR_ACE_CTRL_CLB_AXQOS_CLRMSK (0xFF87FFFFU)
+#define RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_SHIFT (15U)
+#define RGX_CR_ACE_CTRL_PM_MMU_AXCACHE_CLRMSK (0xFFF87FFFU)
+#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_SHIFT (14U)
+#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_CLRMSK (0xFFFFBFFFU)
+#define RGX_CR_ACE_CTRL_ENABLE_NONSECURE_PROT_MATCH_EN (0x00004000U)
+#define RGX_CR_ACE_CTRL_MMU_AWCACHE_SHIFT (8U)
+#define RGX_CR_ACE_CTRL_MMU_AWCACHE_CLRMSK (0xFFFFF0FFU)
+#define RGX_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_NON_BUFFERABLE (0x00000000U)
+#define RGX_CR_ACE_CTRL_MMU_AWCACHE_DEVICE_BUFFERABLE (0x00000100U)
+#define RGX_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000200U)
+#define RGX_CR_ACE_CTRL_MMU_AWCACHE_NORMAL_NC_BUFFERABLE (0x00000300U)
+#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_NO_ALLOCATE (0x00000600U)
+#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_THROUGH_WRITE_ALLOCATE (0x00000E00U)
+#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_NO_ALLOCATE (0x00000700U)
+#define RGX_CR_ACE_CTRL_MMU_AWCACHE_WRITE_BACK_WRITE_ALLOCATE (0x00000F00U)
+#define RGX_CR_ACE_CTRL_MMU_ARCACHE_SHIFT (4U)
+#define RGX_CR_ACE_CTRL_MMU_ARCACHE_CLRMSK (0xFFFFFF0FU)
+#define RGX_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_NON_BUFFERABLE (0x00000000U)
+#define RGX_CR_ACE_CTRL_MMU_ARCACHE_DEVICE_BUFFERABLE (0x00000010U)
+#define RGX_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_NON_BUFFERABLE (0x00000020U)
+#define RGX_CR_ACE_CTRL_MMU_ARCACHE_NORMAL_NC_BUFFERABLE (0x00000030U)
+#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_NO_ALLOCATE (0x000000A0U)
+#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_THROUGH_READ_ALLOCATE (0x000000E0U)
+#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_NO_ALLOCATE (0x000000B0U)
+#define RGX_CR_ACE_CTRL_MMU_ARCACHE_WRITE_BACK_READ_ALLOCATE (0x000000F0U)
+#define RGX_CR_ACE_CTRL_MMU_DOMAIN_SHIFT (2U)
+#define RGX_CR_ACE_CTRL_MMU_DOMAIN_CLRMSK (0xFFFFFFF3U)
+#define RGX_CR_ACE_CTRL_COH_DOMAIN_SHIFT (1U)
+#define RGX_CR_ACE_CTRL_COH_DOMAIN_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_ACE_CTRL_COH_DOMAIN_INNER_SHAREABLE (0x00000000U)
+#define RGX_CR_ACE_CTRL_COH_DOMAIN_OUTER_SHAREABLE (0x00000002U)
+#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_SHIFT (0U)
+#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_NON_SHAREABLE (0x00000000U)
+#define RGX_CR_ACE_CTRL_NON_COH_DOMAIN_SYSTEM (0x00000001U)
+
+
+/*
+ Register RGX_CR_SOC_AXI
+*/
+#define RGX_CR_SOC_AXI (0xE338U)
+#define RGX_CR_SOC_AXI_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (3U)
+#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_SOC_AXI_NON_COHERENT_128_BYTE_BURST_SUPPORT_EN (0x00000008U)
+#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_SHIFT (2U)
+#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_SOC_AXI_COHERENT_128_BYTE_BURST_SUPPORT_EN (0x00000004U)
+#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_SHIFT (0U)
+#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_CLRMSK (0xFFFFFFFCU)
+#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_NO_COHERENCY (0x00000000U)
+#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_ACE_LITE_COHERENCY (0x00000001U)
+#define RGX_CR_SOC_AXI_COHERENCY_SUPPORT_FULL_ACE_COHERENCY (0x00000002U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING0
+*/
+#define RGX_CR_CONTEXT_MAPPING0 (0xF078U)
+#define RGX_CR_CONTEXT_MAPPING0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT (24U)
+#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT (16U)
+#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF))
+#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF))
+#define RGX_CR_CONTEXT_MAPPING0_GEOM_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING0_GEOM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING2
+*/
+#define RGX_CR_CONTEXT_MAPPING2 (0xF088U)
+#define RGX_CR_CONTEXT_MAPPING2_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT (16U)
+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK (0xFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK (0xFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING3
+*/
+#define RGX_CR_CONTEXT_MAPPING3 (0xF090U)
+#define RGX_CR_CONTEXT_MAPPING3_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT (16U)
+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK (0xFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK (0xFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK (0xFFFFFF00U)
+
+
+/*
+ Register RGX_CR_BIF_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_OUTSTANDING_READ (0xF098U)
+#define RGX_CR_BIF_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_OUTSTANDING_READ_COUNTER_SHIFT (0U)
+#define RGX_CR_BIF_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIF_TEXAS1_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ (0xF0A0U)
+#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ_COUNTER_SHIFT (0U)
+#define RGX_CR_BIF_TEXAS1_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIF_TEXAS0_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ (0xF0A8U)
+#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ_COUNTER_SHIFT (0U)
+#define RGX_CR_BIF_TEXAS0_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIF_PFS
+*/
+#define RGX_CR_BIF_PFS (0xF0B0U)
+#define RGX_CR_BIF_PFS_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_BIF_PFS_SLC_STALLING_SHIFT (8U)
+#define RGX_CR_BIF_PFS_SLC_STALLING_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_BIF_PFS_SLC_STALLING_EN (0x00000100U)
+#define RGX_CR_BIF_PFS_TILING_IDLE_SHIFT (7U)
+#define RGX_CR_BIF_PFS_TILING_IDLE_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_BIF_PFS_TILING_IDLE_EN (0x00000080U)
+#define RGX_CR_BIF_PFS_ARB_IDLE_SHIFT (6U)
+#define RGX_CR_BIF_PFS_ARB_IDLE_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_BIF_PFS_ARB_IDLE_EN (0x00000040U)
+#define RGX_CR_BIF_PFS_RTN_MEM_IF_IDLE_SHIFT (5U)
+#define RGX_CR_BIF_PFS_RTN_MEM_IF_IDLE_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_BIF_PFS_RTN_MEM_IF_IDLE_EN (0x00000020U)
+#define RGX_CR_BIF_PFS_NONMETA_REQ_COMPLETE_SHIFT (4U)
+#define RGX_CR_BIF_PFS_NONMETA_REQ_COMPLETE_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_BIF_PFS_NONMETA_REQ_COMPLETE_EN (0x00000010U)
+#define RGX_CR_BIF_PFS_NONMETA_WDATA_COMPLETE_SHIFT (3U)
+#define RGX_CR_BIF_PFS_NONMETA_WDATA_COMPLETE_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_BIF_PFS_NONMETA_WDATA_COMPLETE_EN (0x00000008U)
+#define RGX_CR_BIF_PFS_WDATA_IP_IDLE_SHIFT (2U)
+#define RGX_CR_BIF_PFS_WDATA_IP_IDLE_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_BIF_PFS_WDATA_IP_IDLE_EN (0x00000004U)
+#define RGX_CR_BIF_PFS_REQUEST_IP_IDLE_SHIFT (1U)
+#define RGX_CR_BIF_PFS_REQUEST_IP_IDLE_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_BIF_PFS_REQUEST_IP_IDLE_EN (0x00000002U)
+#define RGX_CR_BIF_PFS_STALL_COMPLETE_SHIFT (0U)
+#define RGX_CR_BIF_PFS_STALL_COMPLETE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_BIF_PFS_STALL_COMPLETE_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_BIF_TEXAS0_PFS
+*/
+#define RGX_CR_BIF_TEXAS0_PFS (0xF0B8U)
+#define RGX_CR_BIF_TEXAS0_PFS_MASKFULL (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_INFLIGHT_SHIFT (6U)
+#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_INFLIGHT_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_INFLIGHT_EN (0x00000040U)
+#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_IDLE_SHIFT (5U)
+#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_IDLE_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_BIF_TEXAS0_PFS_JONES_CFI_IDLE_EN (0x00000020U)
+#define RGX_CR_BIF_TEXAS0_PFS_SLC_STALLING_SHIFT (4U)
+#define RGX_CR_BIF_TEXAS0_PFS_SLC_STALLING_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_BIF_TEXAS0_PFS_SLC_STALLING_EN (0x00000010U)
+#define RGX_CR_BIF_TEXAS0_PFS_TILING_IDLE_SHIFT (3U)
+#define RGX_CR_BIF_TEXAS0_PFS_TILING_IDLE_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_BIF_TEXAS0_PFS_TILING_IDLE_EN (0x00000008U)
+#define RGX_CR_BIF_TEXAS0_PFS_ARB_IDLE_SHIFT (2U)
+#define RGX_CR_BIF_TEXAS0_PFS_ARB_IDLE_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_BIF_TEXAS0_PFS_ARB_IDLE_EN (0x00000004U)
+#define RGX_CR_BIF_TEXAS0_PFS_RTN_MEM_IF_IDLE_SHIFT (1U)
+#define RGX_CR_BIF_TEXAS0_PFS_RTN_MEM_IF_IDLE_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_BIF_TEXAS0_PFS_RTN_MEM_IF_IDLE_EN (0x00000002U)
+#define RGX_CR_BIF_TEXAS0_PFS_STALL_COMPLETE_SHIFT (0U)
+#define RGX_CR_BIF_TEXAS0_PFS_STALL_COMPLETE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_BIF_TEXAS0_PFS_STALL_COMPLETE_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_BIF_TEXAS1_PFS
+*/
+#define RGX_CR_BIF_TEXAS1_PFS (0xF0C8U)
+#define RGX_CR_BIF_TEXAS1_PFS_MASKFULL (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_PDSRW_SHIFT (6U)
+#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_PDSRW_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_PDSRW_EN (0x00000040U)
+#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_MCU_SHIFT (5U)
+#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_MCU_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_BIF_TEXAS1_PFS_ARB_ACCUM_STALLING_MCU_EN (0x00000020U)
+#define RGX_CR_BIF_TEXAS1_PFS_SLC_STALLING_SHIFT (4U)
+#define RGX_CR_BIF_TEXAS1_PFS_SLC_STALLING_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_BIF_TEXAS1_PFS_SLC_STALLING_EN (0x00000010U)
+#define RGX_CR_BIF_TEXAS1_PFS_TILING_IDLE_SHIFT (3U)
+#define RGX_CR_BIF_TEXAS1_PFS_TILING_IDLE_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_BIF_TEXAS1_PFS_TILING_IDLE_EN (0x00000008U)
+#define RGX_CR_BIF_TEXAS1_PFS_ARB_IDLE_SHIFT (2U)
+#define RGX_CR_BIF_TEXAS1_PFS_ARB_IDLE_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_BIF_TEXAS1_PFS_ARB_IDLE_EN (0x00000004U)
+#define RGX_CR_BIF_TEXAS1_PFS_RTN_MEM_IF_IDLE_SHIFT (1U)
+#define RGX_CR_BIF_TEXAS1_PFS_RTN_MEM_IF_IDLE_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_BIF_TEXAS1_PFS_RTN_MEM_IF_IDLE_EN (0x00000002U)
+#define RGX_CR_BIF_TEXAS1_PFS_STALL_COMPLETE_SHIFT (0U)
+#define RGX_CR_BIF_TEXAS1_PFS_STALL_COMPLETE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_BIF_TEXAS1_PFS_STALL_COMPLETE_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_JONES_FIX
+*/
+#define RGX_CR_JONES_FIX__ROGUE3 (0xF0C0U)
+#define RGX_CR_JONES_FIX__ROGUE3__MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_JONES_FIX__ROGUE3__DISABLE_SHIFT (0U)
+#define RGX_CR_JONES_FIX__ROGUE3__DISABLE_CLRMSK (0xFFFF0000U)
+
+
+/*
+ Register RGX_CR_FBCDC_STATUS
+*/
+#define RGX_CR_FBCDC_STATUS (0xF600U)
+#define RGX_CR_FBCDC_STATUS_MASKFULL (IMG_UINT64_C(0x000000000F0F0F0F))
+#define RGX_CR_FBCDC_STATUS_STATE_CHECK_FAIL_SHIFT (24U)
+#define RGX_CR_FBCDC_STATUS_STATE_CHECK_FAIL_CLRMSK (0xF0FFFFFFU)
+#define RGX_CR_FBCDC_STATUS_UFOD_DECODER_ERROR_SHIFT (16U)
+#define RGX_CR_FBCDC_STATUS_UFOD_DECODER_ERROR_CLRMSK (0xFFF0FFFFU)
+#define RGX_CR_FBCDC_STATUS_HEADER_CHECK_FAIL_SHIFT (8U)
+#define RGX_CR_FBCDC_STATUS_HEADER_CHECK_FAIL_CLRMSK (0xFFFFF0FFU)
+#define RGX_CR_FBCDC_STATUS_TILE_LENGTH_CHECK_FAIL_SHIFT (0U)
+#define RGX_CR_FBCDC_STATUS_TILE_LENGTH_CHECK_FAIL_CLRMSK (0xFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_FBCDC_SIGNATURE_STATUS
+*/
+#define RGX_CR_FBCDC_SIGNATURE_STATUS (0xF618U)
+#define RGX_CR_FBCDC_SIGNATURE_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_FBCDC_SIGNATURE_STATUS_FBM_ERROR_SHIFT (4U)
+#define RGX_CR_FBCDC_SIGNATURE_STATUS_FBM_ERROR_CLRMSK (0xFFFFFF0FU)
+#define RGX_CR_FBCDC_SIGNATURE_STATUS_FBDC_ERROR_SHIFT (0U)
+#define RGX_CR_FBCDC_SIGNATURE_STATUS_FBDC_ERROR_CLRMSK (0xFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING4
+*/
+#define RGX_CR_CONTEXT_MAPPING4 (0xF210U)
+#define RGX_CR_CONTEXT_MAPPING4_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT (40U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT (32U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT (24U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT (16U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_FBCDC_IDLE
+*/
+#define RGX_CR_FBCDC_IDLE (0xF218U)
+#define RGX_CR_FBCDC_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000003FFF))
+#define RGX_CR_FBCDC_IDLE_FBC_MEMIF3_SHIFT (13U)
+#define RGX_CR_FBCDC_IDLE_FBC_MEMIF3_CLRMSK (0xFFFFDFFFU)
+#define RGX_CR_FBCDC_IDLE_FBC_MEMIF3_EN (0x00002000U)
+#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF3_SHIFT (12U)
+#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF3_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF3_EN (0x00001000U)
+#define RGX_CR_FBCDC_IDLE_FBM_MEMIF3_SHIFT (11U)
+#define RGX_CR_FBCDC_IDLE_FBM_MEMIF3_CLRMSK (0xFFFFF7FFU)
+#define RGX_CR_FBCDC_IDLE_FBM_MEMIF3_EN (0x00000800U)
+#define RGX_CR_FBCDC_IDLE_FBC_MEMIF2_SHIFT (10U)
+#define RGX_CR_FBCDC_IDLE_FBC_MEMIF2_CLRMSK (0xFFFFFBFFU)
+#define RGX_CR_FBCDC_IDLE_FBC_MEMIF2_EN (0x00000400U)
+#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF2_SHIFT (9U)
+#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF2_CLRMSK (0xFFFFFDFFU)
+#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF2_EN (0x00000200U)
+#define RGX_CR_FBCDC_IDLE_FBM_MEMIF2_SHIFT (8U)
+#define RGX_CR_FBCDC_IDLE_FBM_MEMIF2_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_FBCDC_IDLE_FBM_MEMIF2_EN (0x00000100U)
+#define RGX_CR_FBCDC_IDLE_FBC_MEMIF1_SHIFT (7U)
+#define RGX_CR_FBCDC_IDLE_FBC_MEMIF1_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_FBCDC_IDLE_FBC_MEMIF1_EN (0x00000080U)
+#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF1_SHIFT (6U)
+#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF1_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF1_EN (0x00000040U)
+#define RGX_CR_FBCDC_IDLE_FBM_MEMIF1_SHIFT (5U)
+#define RGX_CR_FBCDC_IDLE_FBM_MEMIF1_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_FBCDC_IDLE_FBM_MEMIF1_EN (0x00000020U)
+#define RGX_CR_FBCDC_IDLE_FBC_MEMIF0_SHIFT (4U)
+#define RGX_CR_FBCDC_IDLE_FBC_MEMIF0_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_FBCDC_IDLE_FBC_MEMIF0_EN (0x00000010U)
+#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF0_SHIFT (3U)
+#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF0_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_FBCDC_IDLE_FBDC_MEMIF0_EN (0x00000008U)
+#define RGX_CR_FBCDC_IDLE_FBM_MEMIF0_SHIFT (2U)
+#define RGX_CR_FBCDC_IDLE_FBM_MEMIF0_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_FBCDC_IDLE_FBM_MEMIF0_EN (0x00000004U)
+#define RGX_CR_FBCDC_IDLE_FBHC_SHIFT (1U)
+#define RGX_CR_FBCDC_IDLE_FBHC_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_FBCDC_IDLE_FBHC_EN (0x00000002U)
+#define RGX_CR_FBCDC_IDLE_FBSC_SHIFT (0U)
+#define RGX_CR_FBCDC_IDLE_FBSC_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_FBCDC_IDLE_FBSC_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_MERCER_SOFT_RESET
+*/
+#define RGX_CR_MERCER_SOFT_RESET (0x0630U)
+#define RGX_CR_MERCER_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_SHIFT (62U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_EN (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_SHIFT (61U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_SHIFT (60U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_SHIFT (59U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_EN (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_SHIFT (58U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_EN (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_SHIFT (57U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_EN (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_SHIFT (56U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_EN (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_SHIFT (55U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_EN (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_SHIFT (54U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_EN (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_SHIFT (53U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_EN (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_SHIFT (52U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_EN (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_SHIFT (51U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_EN (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_SHIFT (50U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_EN (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_SHIFT (49U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_EN (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_SHIFT (48U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_EN (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_SHIFT (47U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_EN (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_SHIFT (46U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_EN (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_SHIFT (45U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_EN (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_SHIFT (44U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_EN (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_SHIFT (43U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_EN (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_SHIFT (42U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_EN (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_SHIFT (41U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_EN (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_SHIFT (40U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_EN (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_SHIFT (39U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_EN (IMG_UINT64_C(0x0000008000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_SHIFT (38U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_EN (IMG_UINT64_C(0x0000004000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_SHIFT (37U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_EN (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_SHIFT (36U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_EN (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_SHIFT (35U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_EN (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_SHIFT (34U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_EN (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_SHIFT (33U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_EN (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_SHIFT (32U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_EN (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_SHIFT (31U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_EN (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_SHIFT (30U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_EN (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_SHIFT (29U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_EN (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_SHIFT (28U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_EN (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_SHIFT (27U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_EN (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_SHIFT (26U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_EN (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_SHIFT (25U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_EN (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_SHIFT (24U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_EN (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_SHIFT (23U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_EN (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_SHIFT (22U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_EN (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_SHIFT (21U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_EN (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_SHIFT (20U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_EN (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_SHIFT (19U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_EN (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_SHIFT (18U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_EN (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_SHIFT (17U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_EN (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_SHIFT (16U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_EN (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_SHIFT (15U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_EN (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_SHIFT (14U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_EN (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_SHIFT (13U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_EN (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_SHIFT (12U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_EN (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_SHIFT (11U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_EN (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_SHIFT (10U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_EN (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_SHIFT (9U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_EN (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_SHIFT (8U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_EN (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_SHIFT (7U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_EN (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_SHIFT (6U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_EN (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_SHIFT (5U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_EN (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_SHIFT (4U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_EN (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_SHIFT (3U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_EN (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_SHIFT (2U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_EN (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_SHIFT (1U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_EN (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_SHIFT (0U)
+#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_TEXAS_SOFT_RESET
+*/
+#define RGX_CR_TEXAS_SOFT_RESET (0x0640U)
+#define RGX_CR_TEXAS_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TEXAS_SOFT_RESET_SPU31_SHIFT (31U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU31_CLRMSK (0x7FFFFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU31_EN (0x80000000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU30_SHIFT (30U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU30_CLRMSK (0xBFFFFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU30_EN (0x40000000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU29_SHIFT (29U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU29_CLRMSK (0xDFFFFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU29_EN (0x20000000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU28_SHIFT (28U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU28_CLRMSK (0xEFFFFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU28_EN (0x10000000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU27_SHIFT (27U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU27_CLRMSK (0xF7FFFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU27_EN (0x08000000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU26_SHIFT (26U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU26_CLRMSK (0xFBFFFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU26_EN (0x04000000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU25_SHIFT (25U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU25_CLRMSK (0xFDFFFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU25_EN (0x02000000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU24_SHIFT (24U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU24_CLRMSK (0xFEFFFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU24_EN (0x01000000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU23_SHIFT (23U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU23_CLRMSK (0xFF7FFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU23_EN (0x00800000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU22_SHIFT (22U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU22_CLRMSK (0xFFBFFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU22_EN (0x00400000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU21_SHIFT (21U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU21_CLRMSK (0xFFDFFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU21_EN (0x00200000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU20_SHIFT (20U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU20_CLRMSK (0xFFEFFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU20_EN (0x00100000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU19_SHIFT (19U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU19_CLRMSK (0xFFF7FFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU19_EN (0x00080000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU18_SHIFT (18U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU18_CLRMSK (0xFFFBFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU18_EN (0x00040000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU17_SHIFT (17U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU17_CLRMSK (0xFFFDFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU17_EN (0x00020000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU16_SHIFT (16U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU16_CLRMSK (0xFFFEFFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU16_EN (0x00010000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU15_SHIFT (15U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU15_CLRMSK (0xFFFF7FFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU15_EN (0x00008000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU14_SHIFT (14U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU14_CLRMSK (0xFFFFBFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU14_EN (0x00004000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU13_SHIFT (13U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU13_CLRMSK (0xFFFFDFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU13_EN (0x00002000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU12_SHIFT (12U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU12_CLRMSK (0xFFFFEFFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU12_EN (0x00001000U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU11_SHIFT (11U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU11_CLRMSK (0xFFFFF7FFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU11_EN (0x00000800U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU10_SHIFT (10U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU10_CLRMSK (0xFFFFFBFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU10_EN (0x00000400U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU9_SHIFT (9U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU9_CLRMSK (0xFFFFFDFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU9_EN (0x00000200U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU8_SHIFT (8U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU8_CLRMSK (0xFFFFFEFFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU8_EN (0x00000100U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU7_SHIFT (7U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU7_CLRMSK (0xFFFFFF7FU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU7_EN (0x00000080U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU6_SHIFT (6U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU6_CLRMSK (0xFFFFFFBFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU6_EN (0x00000040U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU5_SHIFT (5U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU5_CLRMSK (0xFFFFFFDFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU5_EN (0x00000020U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU4_SHIFT (4U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU4_CLRMSK (0xFFFFFFEFU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU4_EN (0x00000010U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU3_SHIFT (3U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU3_CLRMSK (0xFFFFFFF7U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU3_EN (0x00000008U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU2_SHIFT (2U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU2_CLRMSK (0xFFFFFFFBU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU2_EN (0x00000004U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU1_SHIFT (1U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU1_CLRMSK (0xFFFFFFFDU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU1_EN (0x00000002U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU0_SHIFT (0U)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU0_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_TEXAS_SOFT_RESET_SPU0_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_SWIFT_SOFT_RESET
+*/
+#define RGX_CR_SWIFT_SOFT_RESET (0x0650U)
+#define RGX_CR_SWIFT_SOFT_RESET__ALRIF_GT0__MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT2_SHIFT (62U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT2_EN (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT1_SHIFT (61U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT1_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT0_SHIFT (60U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SWIFT0_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT2_SHIFT (59U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT2_EN (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT1_SHIFT (58U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT1_EN (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT0_SHIFT (57U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SWIFT0_EN (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT2_SHIFT (56U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT2_EN (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT1_SHIFT (55U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT1_EN (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT0_SHIFT (54U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SWIFT0_EN (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT2_SHIFT (53U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT2_EN (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT1_SHIFT (52U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT1_EN (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT0_SHIFT (51U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SWIFT0_EN (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT2_SHIFT (50U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT2_EN (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT1_SHIFT (49U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT1_EN (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT0_SHIFT (48U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SWIFT0_EN (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT2_SHIFT (47U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT2_EN (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT1_SHIFT (46U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT1_EN (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT0_SHIFT (45U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SWIFT0_EN (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT2_SHIFT (44U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT2_EN (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT1_SHIFT (43U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT1_EN (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT0_SHIFT (42U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SWIFT0_EN (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT2_SHIFT (41U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT2_EN (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT1_SHIFT (40U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT1_EN (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT0_SHIFT (39U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SWIFT0_EN (IMG_UINT64_C(0x0000008000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT2_SHIFT (38U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT2_EN (IMG_UINT64_C(0x0000004000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT1_SHIFT (37U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT1_EN (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT0_SHIFT (36U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SWIFT0_EN (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT2_SHIFT (35U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT2_EN (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT1_SHIFT (34U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT1_EN (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT0_SHIFT (33U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SWIFT0_EN (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT2_SHIFT (32U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT2_EN (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU31_SHIFT (31U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU31_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU31_EN (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT1_SHIFT (31U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT1_EN (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU30_SHIFT (30U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU30_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU30_EN (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT0_SHIFT (30U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SWIFT0_EN (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU29_SHIFT (29U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU29_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU29_EN (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT2_SHIFT (29U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT2_EN (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU28_SHIFT (28U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU28_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU28_EN (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT1_SHIFT (28U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT1_EN (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU27_SHIFT (27U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU27_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU27_EN (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT0_SHIFT (27U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SWIFT0_EN (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU26_SHIFT (26U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU26_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU26_EN (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT2_SHIFT (26U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT2_EN (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU25_SHIFT (25U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU25_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU25_EN (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT1_SHIFT (25U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT1_EN (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU24_SHIFT (24U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU24_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU24_EN (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT0_SHIFT (24U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SWIFT0_EN (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU23_SHIFT (23U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU23_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU23_EN (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT2_SHIFT (23U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT2_EN (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU22_SHIFT (22U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU22_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU22_EN (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT1_SHIFT (22U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT1_EN (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU21_SHIFT (21U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU21_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU21_EN (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT0_SHIFT (21U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SWIFT0_EN (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU20_SHIFT (20U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU20_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU20_EN (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT2_SHIFT (20U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT2_EN (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU19_SHIFT (19U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU19_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU19_EN (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT1_SHIFT (19U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT1_EN (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU18_SHIFT (18U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU18_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU18_EN (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT0_SHIFT (18U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SWIFT0_EN (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU17_SHIFT (17U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU17_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU17_EN (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT2_SHIFT (17U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT2_EN (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU16_SHIFT (16U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU16_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU16_EN (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT1_SHIFT (16U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT1_EN (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU15_SHIFT (15U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU15_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU15_EN (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT0_SHIFT (15U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SWIFT0_EN (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU14_SHIFT (14U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU14_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU14_EN (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT2_SHIFT (14U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT2_EN (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU13_SHIFT (13U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU13_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU13_EN (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT1_SHIFT (13U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT1_EN (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU12_SHIFT (12U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU12_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU12_EN (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT0_SHIFT (12U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SWIFT0_EN (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU11_SHIFT (11U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU11_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU11_EN (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT2_SHIFT (11U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT2_EN (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU10_SHIFT (10U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU10_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU10_EN (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT1_SHIFT (10U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT1_EN (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU9_SHIFT (9U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU9_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU9_EN (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT0_SHIFT (9U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SWIFT0_EN (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU8_SHIFT (8U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU8_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU8_EN (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT2_SHIFT (8U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT2_EN (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU7_SHIFT (7U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU7_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU7_EN (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT1_SHIFT (7U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT1_EN (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU6_SHIFT (6U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU6_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU6_EN (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT0_SHIFT (6U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SWIFT0_EN (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU5_SHIFT (5U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU5_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU5_EN (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT2_SHIFT (5U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT2_EN (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU4_SHIFT (4U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU4_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU4_EN (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT1_SHIFT (4U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT1_EN (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU3_SHIFT (3U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU3_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU3_EN (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT0_SHIFT (3U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SWIFT0_EN (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU2_SHIFT (2U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU2_EN (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT2_SHIFT (2U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT2_EN (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU1_SHIFT (1U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU1_EN (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT1_SHIFT (1U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT1_EN (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SHIFT (0U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU0_EN (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT0_SHIFT (0U)
+#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_SWIFT_SOFT_RESET_SPU0_SWIFT0_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_RAC_SOFT_RESET
+*/
+#define RGX_CR_RAC_SOFT_RESET (0x0660U)
+#define RGX_CR_RAC_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC2_SHIFT (62U)
+#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC2_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC2_EN (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC1_SHIFT (61U)
+#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC1_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC1_EN (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC0_SHIFT (60U)
+#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC0_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU20_RAC0_EN (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC2_SHIFT (59U)
+#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC2_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC2_EN (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC1_SHIFT (58U)
+#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC1_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC1_EN (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC0_SHIFT (57U)
+#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC0_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU19_RAC0_EN (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC2_SHIFT (56U)
+#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC2_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC2_EN (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC1_SHIFT (55U)
+#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC1_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC1_EN (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC0_SHIFT (54U)
+#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC0_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU18_RAC0_EN (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC2_SHIFT (53U)
+#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC2_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC2_EN (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC1_SHIFT (52U)
+#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC1_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC1_EN (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC0_SHIFT (51U)
+#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC0_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU17_RAC0_EN (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC2_SHIFT (50U)
+#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC2_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC2_EN (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC1_SHIFT (49U)
+#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC1_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC1_EN (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC0_SHIFT (48U)
+#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC0_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU16_RAC0_EN (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC2_SHIFT (47U)
+#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC2_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC2_EN (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC1_SHIFT (46U)
+#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC1_EN (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC0_SHIFT (45U)
+#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU15_RAC0_EN (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC2_SHIFT (44U)
+#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC2_EN (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC1_SHIFT (43U)
+#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC1_EN (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC0_SHIFT (42U)
+#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU14_RAC0_EN (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC2_SHIFT (41U)
+#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC2_EN (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC1_SHIFT (40U)
+#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC1_EN (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC0_SHIFT (39U)
+#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU13_RAC0_EN (IMG_UINT64_C(0x0000008000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC2_SHIFT (38U)
+#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC2_EN (IMG_UINT64_C(0x0000004000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC1_SHIFT (37U)
+#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC1_EN (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC0_SHIFT (36U)
+#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU12_RAC0_EN (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC2_SHIFT (35U)
+#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC2_EN (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC1_SHIFT (34U)
+#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC1_EN (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC0_SHIFT (33U)
+#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU11_RAC0_EN (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC2_SHIFT (32U)
+#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC2_EN (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC1_SHIFT (31U)
+#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC1_EN (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC0_SHIFT (30U)
+#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU10_RAC0_EN (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC2_SHIFT (29U)
+#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC2_EN (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC1_SHIFT (28U)
+#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC1_EN (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC0_SHIFT (27U)
+#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU9_RAC0_EN (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC2_SHIFT (26U)
+#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC2_EN (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC1_SHIFT (25U)
+#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC1_EN (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC0_SHIFT (24U)
+#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU8_RAC0_EN (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC2_SHIFT (23U)
+#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC2_EN (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC1_SHIFT (22U)
+#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC1_EN (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC0_SHIFT (21U)
+#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU7_RAC0_EN (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC2_SHIFT (20U)
+#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC2_EN (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC1_SHIFT (19U)
+#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC1_EN (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC0_SHIFT (18U)
+#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU6_RAC0_EN (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC2_SHIFT (17U)
+#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC2_EN (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC1_SHIFT (16U)
+#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC1_EN (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC0_SHIFT (15U)
+#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU5_RAC0_EN (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC2_SHIFT (14U)
+#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC2_EN (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC1_SHIFT (13U)
+#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC1_EN (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC0_SHIFT (12U)
+#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU4_RAC0_EN (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC2_SHIFT (11U)
+#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF))
+#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC2_EN (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC1_SHIFT (10U)
+#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC1_EN (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC0_SHIFT (9U)
+#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU3_RAC0_EN (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC2_SHIFT (8U)
+#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC2_EN (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC1_SHIFT (7U)
+#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F))
+#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC1_EN (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC0_SHIFT (6U)
+#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF))
+#define RGX_CR_RAC_SOFT_RESET_SPU2_RAC0_EN (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC2_SHIFT (5U)
+#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC2_EN (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC1_SHIFT (4U)
+#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC1_EN (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC0_SHIFT (3U)
+#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7))
+#define RGX_CR_RAC_SOFT_RESET_SPU1_RAC0_EN (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC2_SHIFT (2U)
+#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC2_EN (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC1_SHIFT (1U)
+#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC1_EN (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC0_SHIFT (0U)
+#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_CR_RAC_SOFT_RESET_SPU0_RAC0_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_FWCORE_WDT_RESET
+*/
+#define RGX_CR_FWCORE_WDT_RESET (0x4500U)
+#define RGX_CR_FWCORE_WDT_RESET_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_FWCORE_WDT_RESET_EN_SHIFT (0U)
+#define RGX_CR_FWCORE_WDT_RESET_EN_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_WDT_RESET_EN_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_FWCORE_WDT_CTRL
+*/
+#define RGX_CR_FWCORE_WDT_CTRL (0x4508U)
+#define RGX_CR_FWCORE_WDT_CTRL_MASKFULL (IMG_UINT64_C(0x00000000FFFF1F01))
+#define RGX_CR_FWCORE_WDT_CTRL_PROT_SHIFT (16U)
+#define RGX_CR_FWCORE_WDT_CTRL_PROT_CLRMSK (0x0000FFFFU)
+#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT (8U)
+#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK (0xFFFFE0FFU)
+#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_EN (0x00000001U)
+
+
+/*
+ Register RGX_CR_FWCORE_WDT_COUNT
+*/
+#define RGX_CR_FWCORE_WDT_COUNT (0x4510U)
+#define RGX_CR_FWCORE_WDT_COUNT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FWCORE_WDT_COUNT_VALUE_SHIFT (0U)
+#define RGX_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK (0x00000000U)
+
+
+#endif /* RGX_CR_DEFS_KM_H */
+/*****************************************************************************
+ End of file (rgx_cr_defs_km.h)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@Title Rogue hw definitions (kernel mode)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXDEFS_KM_H
+#define RGXDEFS_KM_H
+
+#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER)
+#include RGX_BVNC_CORE_KM_HEADER
+#include RGX_BNC_CONFIG_KM_HEADER
+#endif
+
+#define IMG_EXPLICIT_INCLUDE_HWDEFS
+#if defined(__KERNEL__)
+#include "rgx_cr_defs_km.h"
+#endif
+#undef IMG_EXPLICIT_INCLUDE_HWDEFS
+
+#include "rgx_heap_firmware.h"
+
+/* The following Macros are picked up through BVNC headers for no hardware
+ * operations to be compatible with old build infrastructure.
+ */
+#if defined(NO_HARDWARE)
+/******************************************************************************
+ * Check for valid B.X.N.C
+ *****************************************************************************/
+#if !defined(RGX_BVNC_KM_B) || !defined(RGX_BVNC_KM_V) || !defined(RGX_BVNC_KM_N) || !defined(RGX_BVNC_KM_C)
+#error "Need to specify BVNC (RGX_BVNC_KM_B, RGX_BVNC_KM_V, RGX_BVNC_KM_N and RGX_BVNC_C)"
+#endif
+
+/* Check core/config compatibility */
+#if (RGX_BVNC_KM_B != RGX_BNC_KM_B) || (RGX_BVNC_KM_N != RGX_BNC_KM_N) || (RGX_BVNC_KM_C != RGX_BNC_KM_C)
+#error "BVNC headers are mismatching (KM core/config)"
+#endif
+#endif
+
+/******************************************************************************
+ * RGX Version name
+ *****************************************************************************/
+#define RGX_BVNC_KM_ST2(S) #S
+#define RGX_BVNC_KM_ST(S) RGX_BVNC_KM_ST2(S)
+#define RGX_BVNC_KM RGX_BVNC_KM_ST(RGX_BVNC_KM_B) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_V) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_N) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_C)
+#define RGX_BVNC_KM_V_ST RGX_BVNC_KM_ST(RGX_BVNC_KM_V)
+
+/* Maximum string size is [bb.vvvp.nnnn.cccc\0], includes null char */
+#define RGX_BVNC_STR_SIZE_MAX (2+1+4+1+4+1+4+1)
+#define RGX_BVNC_STR_FMTSPEC "%u.%u.%u.%u"
+#define RGX_BVNC_STRP_FMTSPEC "%u.%up.%u.%u"
+
+
+/******************************************************************************
+ * RGX Defines
+ *****************************************************************************/
+
+#define BVNC_FIELD_MASK ((1 << BVNC_FIELD_WIDTH) - 1)
+#define C_POSITION (0)
+#define N_POSITION ((C_POSITION) + (BVNC_FIELD_WIDTH))
+#define V_POSITION ((N_POSITION) + (BVNC_FIELD_WIDTH))
+#define B_POSITION ((V_POSITION) + (BVNC_FIELD_WIDTH))
+
+#define B_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (B_POSITION)))
+#define V_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (V_POSITION)))
+#define N_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (N_POSITION)))
+#define C_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (C_POSITION)))
+
+#define GET_B(x) (((x) & (B_POSTION_MASK)) >> (B_POSITION))
+#define GET_V(x) (((x) & (V_POSTION_MASK)) >> (V_POSITION))
+#define GET_N(x) (((x) & (N_POSTION_MASK)) >> (N_POSITION))
+#define GET_C(x) (((x) & (C_POSTION_MASK)) >> (C_POSITION))
+
+#define BVNC_PACK(B,V,N,C) ((((IMG_UINT64)B)) << (B_POSITION) | \
+ (((IMG_UINT64)V)) << (V_POSITION) | \
+ (((IMG_UINT64)N)) << (N_POSITION) | \
+ (((IMG_UINT64)C)) << (C_POSITION) \
+ )
+
+#define RGX_CR_CORE_ID_CONFIG_N_SHIFT (8U)
+#define RGX_CR_CORE_ID_CONFIG_C_SHIFT (0U)
+
+#define RGX_CR_CORE_ID_CONFIG_N_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CORE_ID_CONFIG_C_CLRMSK (0XFFFFFF00U)
+
+/* The default number of OSID is 1, higher number implies VZ enabled firmware */
+#if !defined(RGXFW_NATIVE) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED + 1U > 1U)
+#define RGXFW_NUM_OS RGX_NUM_DRIVERS_SUPPORTED
+#else
+#define RGXFW_NUM_OS 1U
+#endif
+
+#if defined(RGX_FEATURE_NUM_OSIDS)
+#define RGXFW_MAX_NUM_OSIDS (RGX_FEATURE_NUM_OSIDS)
+#else
+#define RGXFW_MAX_NUM_OSIDS (8U)
+#endif
+
+#define RGXFW_HOST_DRIVER_ID (0U)
+#define RGXFW_GUEST_DRIVER_ID_START (RGXFW_HOST_DRIVER_ID + 1U)
+
+#define RGXFW_THREAD_0 (0U)
+#define RGXFW_THREAD_1 (1U)
+
+/* META cores (required for the RGX_FEATURE_META) */
+#define MTP218 (1)
+#define MTP219 (2)
+#define LTP218 (3)
+#define LTP217 (4)
+
+/* META Core memory feature depending on META variants */
+#define RGX_META_COREMEM_32K (32*1024)
+#define RGX_META_COREMEM_48K (48*1024)
+#define RGX_META_COREMEM_64K (64*1024)
+#define RGX_META_COREMEM_96K (96*1024)
+#define RGX_META_COREMEM_128K (128*1024)
+#define RGX_META_COREMEM_256K (256*1024)
+
+#if !defined(__KERNEL__)
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(RGX_FEATURE_META_DMA)
+#undef SUPPORT_META_COREMEM
+#undef RGX_FEATURE_META_COREMEM_SIZE
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_META_COREMEM_SIZE (0)
+#elif defined(RGX_FEATURE_META_COREMEM_SIZE)
+#define RGX_META_COREMEM_SIZE (RGX_FEATURE_META_COREMEM_SIZE*1024U)
+#else
+#define RGX_META_COREMEM_SIZE (0)
+#endif
+
+#if RGX_META_COREMEM_SIZE != 0
+#define RGX_META_COREMEM
+#define RGX_META_COREMEM_CODE
+#define RGX_META_COREMEM_DATA
+#endif
+#endif
+
+#define GET_ROGUE_CACHE_LINE_SIZE(x) ((((IMG_INT32)x) > 0) ? ((x)/8) : (0))
+
+#if defined(RGX_FEATURE_META_DMA)
+#define RGX_META_DMA_BLOCK_SIZE (32U)
+#else
+#define RGX_META_DMA_BLOCK_SIZE (0U)
+#endif
+
+#if defined(SUPPORT_AGP)
+#if defined(SUPPORT_AGP4)
+#define MAX_HW_TA3DCONTEXTS 5U
+#else
+#define MAX_HW_TA3DCONTEXTS 3U
+#endif
+#else
+#define MAX_HW_TA3DCONTEXTS 2U
+#endif
+
+#define RGX_CR_CLK_CTRL0_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL0_MASKFULL)
+#define RGX_CR_CLK_CTRL0_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL0_MASKFULL)
+#define RGX_CR_CLK_CTRL1_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL1_MASKFULL)
+#define RGX_CR_CLK_CTRL1_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL1_MASKFULL)
+#define RGX_CR_CLK_CTRL2_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL2_MASKFULL)
+#define RGX_CR_CLK_CTRL2_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL2_MASKFULL)
+
+#define RGX_CR_MERCER0_SOFT_RESET_SPU_EN (RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER0_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER0_EN)
+
+#define RGX_CR_MERCER1_SOFT_RESET_SPU_EN (RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER1_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER1_EN)
+
+#define RGX_CR_MERCER2_SOFT_RESET_SPU_EN (RGX_CR_MERCER_SOFT_RESET_SPU0_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU1_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU2_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU3_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU4_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU5_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU6_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU7_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU8_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU9_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU10_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU11_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU12_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU13_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU14_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU15_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU16_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU17_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU18_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU19_MERCER2_EN | \
+ RGX_CR_MERCER_SOFT_RESET_SPU20_MERCER2_EN)
+
+
+/* SOFT_RESET steps as defined in the TRM */
+#define RGX_SOFT_RESET_JONES (RGX_CR_SOFT_RESET_PM_EN | \
+ RGX_CR_SOFT_RESET_ISP_EN)
+#define RGX_SOFT_RESET_JONES_ALL (RGX_SOFT_RESET_JONES | \
+ RGX_CR_SOFT_RESET_BIF_TEXAS_EN | \
+ RGX_CR_SOFT_RESET_BIF_JONES_EN | \
+ RGX_CR_SOFT_RESET_SLC_EN | \
+ RGX_CR_SOFT_RESET_GARTEN_EN)
+#define RGX_SOFT_RESET_EXTRA (RGX_CR_SOFT_RESET_PIXEL_EN | \
+ RGX_CR_SOFT_RESET_VERTEX_EN | \
+ RGX_CR_SOFT_RESET_GEO_VERTEX_EN | \
+ RGX_CR_SOFT_RESET_GEO_SHARED_EN | \
+ RGX_CR_SOFT_RESET_COMPUTE_EN | \
+ RGX_CR_SOFT_RESET_TDM_EN)
+#define RGX_SOFT_RESET_FROM_WITHIN_CORE (RGX_CR_SOFT_RESET_MASKFULL ^ \
+ (RGX_CR_SOFT_RESET_GARTEN_EN | \
+ RGX_CR_SOFT_RESET_BIF_JONES_EN | \
+ RGX_CR_SOFT_RESET_SLC_EN))
+
+
+#define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12U)
+#define RGX_BIF_PM_PHYSICAL_PAGE_SIZE (1U << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+
+#define RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT (14U)
+#define RGX_BIF_PM_VIRTUAL_PAGE_SIZE (1U << RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT)
+
+#define RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE (32U)
+
+/* To get the number of required Bernado/Phantom(s), divide
+ * the number of clusters by 4 and round up
+ */
+#define RGX_REQ_NUM_PHANTOMS(CLUSTERS) ((CLUSTERS + 3U) / 4U)
+#define RGX_REQ_NUM_BERNADOS(CLUSTERS) ((CLUSTERS + 3U) / 4U)
+#define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) ((CLUSTERS + 3U) / 4U)
+
+#if !defined(__KERNEL__)
+# define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS))
+#endif
+
+/* for nohw multicore, true max number of cores returned to client */
+#define RGX_MULTICORE_MAX_NOHW_CORES (8U)
+
+/*
+ * META second thread feature depending on META variants and
+ * available CoreMem
+ */
+#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && (RGX_FEATURE_META_COREMEM_SIZE >= 96)
+#define RGXFW_META_SUPPORT_2ND_THREAD
+#endif
+
+/*
+ * FWCORE wrapper register defines
+ */
+#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT
+#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK
+#define FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT (12U)
+
+
+/*
+ * FBC clear color register defaults based on HW defaults
+ * non-YUV clear colour 0: 0x00000000 (encoded as ch3,2,1,0)
+ * non-YUV clear colour 1: 0x01000000 (encoded as ch3,2,1,0)
+ * YUV clear colour 0: 0x000 000 (encoded as UV Y)
+ * YUV clear colour 1: 0x000 3FF (encoded as UV Y)
+ */
+#define RGX_FBC_CC_DEFAULT (0x0100000000000000)
+#define RGX_FBC_CC_YUV_DEFAULT (0x000003FF00000000)
+
+/*
+ * Virtualisation definitions
+ */
+
+#define RGX_VIRTUALISATION_REG_SIZE_PER_OS (RGX_CR_MTS_SCHEDULE1 - RGX_CR_MTS_SCHEDULE)
+
+/*
+ * Renaming MTS sideband bitfields to emphasize that the Register Bank number
+ * of the MTS register used identifies a specific Driver/VM rather than the OSID tag
+ * emitted on bus memory transactions.
+ */
+#define RGX_MTS_SBDATA_DRIVERID_CLRMSK RGX_CR_MTS_BGCTX_SBDATA0_OS_ID_CLRMSK
+#define RGX_MTS_SBDATA_DRIVERID_SHIFT RGX_CR_MTS_BGCTX_SBDATA0_OS_ID_SHIFT
+
+/* Register Bank containing registers secured against host access */
+#define RGX_HOST_SECURE_REGBANK_OFFSET (0xF0000U)
+#define RGX_HOST_SECURE_REGBANK_SIZE (0x10000U)
+
+/* GPU CR timer tick in GPU cycles */
+#define RGX_CRTIME_TICK_IN_CYCLES (256U)
+
+#if defined(FIX_HW_BRN_71840)
+#define ROGUE_RENDERSIZE_MAXX (16384U)
+#define ROGUE_RENDERSIZE_MAXY (16384U)
+#else
+#define ROGUE_RENDERSIZE_MAXX (RGX_FEATURE_RENDER_TARGET_XY_MAX)
+#define ROGUE_RENDERSIZE_MAXY (RGX_FEATURE_RENDER_TARGET_XY_MAX)
+#endif
+
+/*
+ * Register used by the FW to track the current boot stage (not used in MIPS)
+ */
+#define RGX_FW_BOOT_STAGE_REGISTER (RGX_CR_SCRATCH14)
+
+/*
+ * Define used to determine whether or not SLC range-based flush/invalidate
+ * interface is supported.
+ */
+#define RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED 1
+
+/*
+ * Macro used to indicate which version of HWPerf is active
+ */
+#define RGX_FEATURE_HWPERF_VOLCANIC
+
+/*
+ * Maximum number of cores supported by TRP
+ */
+#define RGX_TRP_MAX_NUM_CORES (8U)
+
+/*
+ * Maximum number of cores supported by WGP
+ */
+#define RGX_WGP_MAX_NUM_CORES (8U)
+
+/*
+ * Supports command to invalidate FBCDC descriptor state cache
+ */
+#define RGX_FBSC_INVALIDATE_COMMAND_SUPPORTED 1
+
+#if defined(FIX_HW_BRN_71422)
+/*
+ * The BRN71422 software workaround requires a target physical address on
+ * the hardware platform with a low latency response time and which will
+ * not suffer from delays of DRAM hardware operations such as refresh and
+ * recalibration. Only with that address defined will the workaround be used.
+ */
+#if !defined(PDUMP)
+//#define RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR (IMG_UINT64_C(0x0000000000))
+#endif
+#define RGX_BRN71422_WORKAROUND_READ_SIZE (32U)
+#endif
+
+#endif /* RGXDEFS_KM_H */
--- /dev/null
+/*************************************************************************/ /*!
+@Title Hardware definition file rgxmmudefs_km.h
+@Brief The file contains auto-generated hardware definitions without
+ BVNC-specific compile time conditionals.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* **** Autogenerated C -- do not edit **** */
+
+/*
+ * rogue_bif.def
+ */
+
+
+#ifndef RGXMMUDEFS_KM_H
+#define RGXMMUDEFS_KM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+
+#define RGXMMUDEFS_KM_REVISION 0
+
+#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_NUM_ENTRIES_VALUE (0x00000010U)
+
+
+#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_PER_OS_SHIFT_VALUE (0x00000004U)
+
+
+#define OSID_CTXT_MAPPING_REGISTERS_CONSTANTS_OSID_CTXT_MAPPING_PER_OS_MASK_VALUE (0x00000007U)
+
+
+#define RGX_BIF_DM_ENCODING_VERTEX (0x00000000U)
+#define RGX_BIF_DM_ENCODING_PIXEL (0x00000001U)
+#define RGX_BIF_DM_ENCODING_COMPUTE (0x00000002U)
+#define RGX_BIF_DM_ENCODING_TLA (0x00000003U)
+#define RGX_BIF_DM_ENCODING_PB_VCE (0x00000004U)
+#define RGX_BIF_DM_ENCODING_PB_TE (0x00000005U)
+#define RGX_BIF_DM_ENCODING_META (0x00000007U)
+#define RGX_BIF_DM_ENCODING_HOST (0x00000008U)
+#define RGX_BIF_DM_ENCODING_PM_ALIST (0x00000009U)
+
+
+#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U)
+#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFF003FFFFFFF))
+#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U)
+#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC01FFFFF))
+#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U)
+#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE00FFF))
+
+
+#define RGX_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U)
+
+
+#define RGX_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U)
+
+
+#define RGX_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U)
+
+
+#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U)
+
+
+#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U)
+
+
+#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U)
+
+
+#define RGX_MMUCTRL_PAGE_SIZE_MASK (0x00000007U)
+#define RGX_MMUCTRL_PAGE_SIZE_4KB (0x00000000U)
+#define RGX_MMUCTRL_PAGE_SIZE_16KB (0x00000001U)
+#define RGX_MMUCTRL_PAGE_SIZE_64KB (0x00000002U)
+#define RGX_MMUCTRL_PAGE_SIZE_256KB (0x00000003U)
+#define RGX_MMUCTRL_PAGE_SIZE_1MB (0x00000004U)
+#define RGX_MMUCTRL_PAGE_SIZE_2MB (0x00000005U)
+
+
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U)
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+
+
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U)
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000003FFF))
+
+
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U)
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000FFFF))
+
+
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U)
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000003FFFF))
+
+
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U)
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000FFFFF))
+
+
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U)
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00001FFFFF))
+
+
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U)
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+
+
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U)
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000003FF))
+
+
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U)
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000000FF))
+
+
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U)
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000003F))
+
+
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U)
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F))
+
+
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U)
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F))
+
+
+#define RGX_MMUCTRL_AXCACHE_MASK (0x0000000FU)
+/*
+Device Non-bufferable */
+#define RGX_MMUCTRL_AXCACHE_DEVNONBUFF (0x00000000U)
+/*
+Device Bufferable */
+#define RGX_MMUCTRL_AXCACHE_DEVBUFF (0x00000001U)
+/*
+Normal Non-cacheable Non-bufferable */
+#define RGX_MMUCTRL_AXCACHE_NORMNONBUFF (0x00000002U)
+/*
+Normal Non-cacheable Bufferable */
+#define RGX_MMUCTRL_AXCACHE_NORMBUFF (0x00000003U)
+/*
+Write-through No-allocate*/
+#define RGX_MMUCTRL_AXCACHE_WTNOALLOC (0x00000006U)
+/*
+Write-back No-allocate*/
+#define RGX_MMUCTRL_AXCACHE_WBNOALLOC (0x00000007U)
+/*
+Write-through Read-Allocate */
+#define RGX_MMUCTRL_AXCACHE_WTRALLOC (0x00000008U)
+/*
+Write-back Read-Allocate */
+#define RGX_MMUCTRL_AXCACHE_WBRALLOC (0x00000009U)
+/*
+Write-through Write-Allocate */
+#define RGX_MMUCTRL_AXCACHE_WTWALLOC (0x0000000aU)
+/*
+Write-back Write-Allocate */
+#define RGX_MMUCTRL_AXCACHE_WBWALLOC (0x0000000bU)
+/*
+Write-through Read/Write-Allocate */
+#define RGX_MMUCTRL_AXCACHE_WTRWALLOC (0x0000000eU)
+/*
+Write-back Read/Write-Allocate */
+#define RGX_MMUCTRL_AXCACHE_WBRWALLOC (0x0000000fU)
+
+
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U)
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF))
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (IMG_UINT64_C(0x4000000000000000))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_SHIFT (58U)
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_CLRMSK (IMG_UINT64_C(0xC3FFFFFFFFFFFFFF))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_DEVNONBUFF (IMG_UINT64_C(0x0000000000000000))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_DEVBUFF (IMG_UINT64_C(0x0400000000000000))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_NORMNONBUFF (IMG_UINT64_C(0x0800000000000000))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_NORMBUFF (IMG_UINT64_C(0x0c00000000000000))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTNOALLOC (IMG_UINT64_C(0x1800000000000000))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBNOALLOC (IMG_UINT64_C(0x1c00000000000000))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTRALLOC (IMG_UINT64_C(0x2000000000000000))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBRALLOC (IMG_UINT64_C(0x2400000000000000))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTWALLOC (IMG_UINT64_C(0x2800000000000000))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBWALLOC (IMG_UINT64_C(0x2c00000000000000))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_WTRWALLOC (IMG_UINT64_C(0x3800000000000000))
+#define RGX_MMUCTRL_PT_DATA_AXCACHE_WBRWALLOC (IMG_UINT64_C(0x3c00000000000000))
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (IMG_UINT64_C(0xFC0000FFFFFFFFFF))
+#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT (12U)
+#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F))
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U)
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000000000000020))
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U)
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN (IMG_UINT64_C(0x0000000000000010))
+#define RGX_MMUCTRL_PT_DATA_CC_SHIFT (2U)
+#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_MMUCTRL_PT_DATA_CC_EN (IMG_UINT64_C(0x0000000000000004))
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U)
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN (IMG_UINT64_C(0x0000000000000002))
+#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT (0U)
+#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PT_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U)
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF))
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000010000000000))
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U)
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U)
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF1))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (IMG_UINT64_C(0x0000000000000000))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (IMG_UINT64_C(0x0000000000000002))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (IMG_UINT64_C(0x0000000000000004))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (IMG_UINT64_C(0x0000000000000006))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (IMG_UINT64_C(0x0000000000000008))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (IMG_UINT64_C(0x000000000000000a))
+#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT (0U)
+#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PD_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U)
+#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT (0U)
+#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU)
+#define RGX_MMUCTRL_PC_DATA_VALID_EN (0x00000001U)
+
+
+#endif /* RGXMMUDEFS_KM_H */
+/*****************************************************************************
+ End of file (rgxmmudefs_km.h)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@Title Hardware definition file rgxtbdefs_km.h
+@Brief The file contains auto-generated hardware definitions without
+ BVNC-specific compile time conditionals.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* **** Autogenerated C -- do not edit **** */
+
+/*
+ */
+
+
+#ifndef RGXTBDEFS_KM_H
+#define RGXTBDEFS_KM_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+
+#define RGXTBDEFS_KM_REVISION 1
+
+/*
+ Register RGX_TB_SOFT_RESET
+*/
+#define RGX_TB_SOFT_RESET (0x0000U)
+#define RGX_TB_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00000000FFFF0107))
+#define RGX_TB_SOFT_RESET_SPU_SHIFT (16U)
+#define RGX_TB_SOFT_RESET_SPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF))
+#define RGX_TB_SOFT_RESET_JONES_SHIFT (8U)
+#define RGX_TB_SOFT_RESET_JONES_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_TB_SOFT_RESET_JONES_EN (IMG_UINT64_C(0x0000000000000100))
+#define RGX_TB_SOFT_RESET_SYS_SHIFT (2U)
+#define RGX_TB_SOFT_RESET_SYS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_TB_SOFT_RESET_SYS_EN (IMG_UINT64_C(0x0000000000000004))
+#define RGX_TB_SOFT_RESET_MEM_SHIFT (1U)
+#define RGX_TB_SOFT_RESET_MEM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_TB_SOFT_RESET_MEM_EN (IMG_UINT64_C(0x0000000000000002))
+#define RGX_TB_SOFT_RESET_CORE_SHIFT (0U)
+#define RGX_TB_SOFT_RESET_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_TB_SOFT_RESET_CORE_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_TB_PCI_MASTER
+*/
+#define RGX_TB_PCI_MASTER (0x0008U)
+#define RGX_TB_PCI_MASTER_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_TB_PCI_MASTER_MODE_SHIFT (0U)
+#define RGX_TB_PCI_MASTER_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_TB_PCI_MASTER_MODE_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_TB_MEM_ARBITER
+*/
+#define RGX_TB_MEM_ARBITER (0x0088U)
+#define RGX_TB_MEM_ARBITER_MASKFULL (IMG_UINT64_C(0x0000000000010F11))
+#define RGX_TB_MEM_ARBITER_LIMIT_BW_SHIFT (16U)
+#define RGX_TB_MEM_ARBITER_LIMIT_BW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_TB_MEM_ARBITER_LIMIT_BW_EN (IMG_UINT64_C(0x0000000000010000))
+#define RGX_TB_MEM_ARBITER_PRI_SKEW_SHIFT (8U)
+#define RGX_TB_MEM_ARBITER_PRI_SKEW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF0FF))
+#define RGX_TB_MEM_ARBITER_PRI_RNW_SHIFT (4U)
+#define RGX_TB_MEM_ARBITER_PRI_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_TB_MEM_ARBITER_PRI_RNW_EN (IMG_UINT64_C(0x0000000000000010))
+#define RGX_TB_MEM_ARBITER_ENABLE_SHIFT (0U)
+#define RGX_TB_MEM_ARBITER_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_TB_MEM_ARBITER_ENABLE_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_TB_QOS_RD_LATENCY
+*/
+#define RGX_TB_QOS_RD_LATENCY (0x0090U)
+#define RGX_TB_QOS_RD_LATENCY_MASKFULL (IMG_UINT64_C(0xFFFF3FFF3FFF3FFF))
+#define RGX_TB_QOS_RD_LATENCY_DIST_SHIFT (62U)
+#define RGX_TB_QOS_RD_LATENCY_DIST_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF))
+#define RGX_TB_QOS_RD_LATENCY_MAX_15_SHIFT (48U)
+#define RGX_TB_QOS_RD_LATENCY_MAX_15_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_TB_QOS_RD_LATENCY_MIN_15_SHIFT (32U)
+#define RGX_TB_QOS_RD_LATENCY_MIN_15_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_TB_QOS_RD_LATENCY_MAX_0_SHIFT (16U)
+#define RGX_TB_QOS_RD_LATENCY_MAX_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC000FFFF))
+#define RGX_TB_QOS_RD_LATENCY_MIN_0_SHIFT (0U)
+#define RGX_TB_QOS_RD_LATENCY_MIN_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFC000))
+
+
+/*
+ Register RGX_TB_QOS_WR_LATENCY
+*/
+#define RGX_TB_QOS_WR_LATENCY (0x0098U)
+#define RGX_TB_QOS_WR_LATENCY_MASKFULL (IMG_UINT64_C(0xFFFF3FFF3FFF3FFF))
+#define RGX_TB_QOS_WR_LATENCY_DIST_SHIFT (62U)
+#define RGX_TB_QOS_WR_LATENCY_DIST_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF))
+#define RGX_TB_QOS_WR_LATENCY_MAX_15_SHIFT (48U)
+#define RGX_TB_QOS_WR_LATENCY_MAX_15_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF))
+#define RGX_TB_QOS_WR_LATENCY_MIN_15_SHIFT (32U)
+#define RGX_TB_QOS_WR_LATENCY_MIN_15_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF))
+#define RGX_TB_QOS_WR_LATENCY_MAX_0_SHIFT (16U)
+#define RGX_TB_QOS_WR_LATENCY_MAX_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC000FFFF))
+#define RGX_TB_QOS_WR_LATENCY_MIN_0_SHIFT (0U)
+#define RGX_TB_QOS_WR_LATENCY_MIN_0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFC000))
+
+
+/*
+ Register RGX_TB_MAX_ID_OUTSTANDING
+*/
+#define RGX_TB_MAX_ID_OUTSTANDING (0x00B0U)
+#define RGX_TB_MAX_ID_OUTSTANDING_MASKFULL (IMG_UINT64_C(0x000003FF03FF03FF))
+#define RGX_TB_MAX_ID_OUTSTANDING_RD_WR_SHIFT (32U)
+#define RGX_TB_MAX_ID_OUTSTANDING_RD_WR_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF))
+#define RGX_TB_MAX_ID_OUTSTANDING_WRITE_SHIFT (16U)
+#define RGX_TB_MAX_ID_OUTSTANDING_WRITE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF))
+#define RGX_TB_MAX_ID_OUTSTANDING_READ_SHIFT (0U)
+#define RGX_TB_MAX_ID_OUTSTANDING_READ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFC00))
+
+
+/*
+ Register RGX_TB_COHERENT_MEM_REGION
+*/
+#define RGX_TB_COHERENT_MEM_REGION (0x00C0U)
+#define RGX_TB_COHERENT_MEM_REGION_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_TB_COHERENT_MEM_REGION_START_ADDR_SHIFT (12U)
+#define RGX_TB_COHERENT_MEM_REGION_START_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+
+
+/*
+ Register RGX_TB_LMA_MEM_REGION
+*/
+#define RGX_TB_LMA_MEM_REGION (0x00C8U)
+#define RGX_TB_LMA_MEM_REGION_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_LMA_MEM_REGION_SIZE_SHIFT (0U)
+#define RGX_TB_LMA_MEM_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_UMA_MEM_REGION
+*/
+#define RGX_TB_UMA_MEM_REGION (0x00D0U)
+#define RGX_TB_UMA_MEM_REGION_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_TB_UMA_MEM_REGION_START_ADDR_SHIFT (12U)
+#define RGX_TB_UMA_MEM_REGION_START_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF))
+
+
+/*
+ Register RGX_TB_SYSTEM_STATUS
+*/
+#define RGX_TB_SYSTEM_STATUS (0x00E0U)
+#define RGX_TB_SYSTEM_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFF33F700FF))
+#define RGX_TB_SYSTEM_STATUS_SPU_ISON_SHIFT (48U)
+#define RGX_TB_SYSTEM_STATUS_SPU_ISON_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_TB_SYSTEM_STATUS_SPU_POWER_SHIFT (32U)
+#define RGX_TB_SYSTEM_STATUS_SPU_POWER_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF))
+#define RGX_TB_SYSTEM_STATUS_JONES_ISON_SHIFT (29U)
+#define RGX_TB_SYSTEM_STATUS_JONES_ISON_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF))
+#define RGX_TB_SYSTEM_STATUS_JONES_ISON_EN (IMG_UINT64_C(0x0000000020000000))
+#define RGX_TB_SYSTEM_STATUS_JONES_POWER_SHIFT (28U)
+#define RGX_TB_SYSTEM_STATUS_JONES_POWER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF))
+#define RGX_TB_SYSTEM_STATUS_JONES_POWER_EN (IMG_UINT64_C(0x0000000010000000))
+#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_SHIFT (25U)
+#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF))
+#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_ABORT_EN (IMG_UINT64_C(0x0000000002000000))
+#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_SHIFT (24U)
+#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF))
+#define RGX_TB_SYSTEM_STATUS_HOST_POWER_EVENT_COMPLETE_EN (IMG_UINT64_C(0x0000000001000000))
+#define RGX_TB_SYSTEM_STATUS_GPU_STATE_SHIFT (20U)
+#define RGX_TB_SYSTEM_STATUS_GPU_STATE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF0FFFFF))
+#define RGX_TB_SYSTEM_STATUS_SYSTEM_IRQ_SHIFT (18U)
+#define RGX_TB_SYSTEM_STATUS_SYSTEM_IRQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF))
+#define RGX_TB_SYSTEM_STATUS_SYSTEM_IRQ_EN (IMG_UINT64_C(0x0000000000040000))
+#define RGX_TB_SYSTEM_STATUS_TRIGGER_SHIFT (17U)
+#define RGX_TB_SYSTEM_STATUS_TRIGGER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF))
+#define RGX_TB_SYSTEM_STATUS_TRIGGER_EN (IMG_UINT64_C(0x0000000000020000))
+#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_SHIFT (16U)
+#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF))
+#define RGX_TB_SYSTEM_STATUS_HMMU_IRQ_EN (IMG_UINT64_C(0x0000000000010000))
+#define RGX_TB_SYSTEM_STATUS_IRQ_SHIFT (0U)
+#define RGX_TB_SYSTEM_STATUS_IRQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_TB_SYSTEM_CONFIG
+*/
+#define RGX_TB_SYSTEM_CONFIG (0x00F0U)
+#define RGX_TB_SYSTEM_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000007007737))
+#define RGX_TB_SYSTEM_CONFIG_SOC_AXI_FEATURE_SHIFT (24U)
+#define RGX_TB_SYSTEM_CONFIG_SOC_AXI_FEATURE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF))
+#define RGX_TB_SYSTEM_CONFIG_ICE_CABLEIF256_SHIFT (14U)
+#define RGX_TB_SYSTEM_CONFIG_ICE_CABLEIF256_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF))
+#define RGX_TB_SYSTEM_CONFIG_ICE_CABLEIF256_EN (IMG_UINT64_C(0x0000000000004000))
+#define RGX_TB_SYSTEM_CONFIG_VELOCE_TBX_SHIFT (13U)
+#define RGX_TB_SYSTEM_CONFIG_VELOCE_TBX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF))
+#define RGX_TB_SYSTEM_CONFIG_VELOCE_TBX_EN (IMG_UINT64_C(0x0000000000002000))
+#define RGX_TB_SYSTEM_CONFIG_VELOCE_ICE_SHIFT (12U)
+#define RGX_TB_SYSTEM_CONFIG_VELOCE_ICE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_TB_SYSTEM_CONFIG_VELOCE_ICE_EN (IMG_UINT64_C(0x0000000000001000))
+#define RGX_TB_SYSTEM_CONFIG_CADENCE_AVIP_SHIFT (10U)
+#define RGX_TB_SYSTEM_CONFIG_CADENCE_AVIP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF))
+#define RGX_TB_SYSTEM_CONFIG_CADENCE_AVIP_EN (IMG_UINT64_C(0x0000000000000400))
+#define RGX_TB_SYSTEM_CONFIG_CADENCE_TBA_SHIFT (9U)
+#define RGX_TB_SYSTEM_CONFIG_CADENCE_TBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF))
+#define RGX_TB_SYSTEM_CONFIG_CADENCE_TBA_EN (IMG_UINT64_C(0x0000000000000200))
+#define RGX_TB_SYSTEM_CONFIG_CADENCE_ICE_SHIFT (8U)
+#define RGX_TB_SYSTEM_CONFIG_CADENCE_ICE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF))
+#define RGX_TB_SYSTEM_CONFIG_CADENCE_ICE_EN (IMG_UINT64_C(0x0000000000000100))
+#define RGX_TB_SYSTEM_CONFIG_EMU_UMA_SHIFT (5U)
+#define RGX_TB_SYSTEM_CONFIG_EMU_UMA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF))
+#define RGX_TB_SYSTEM_CONFIG_EMU_UMA_EN (IMG_UINT64_C(0x0000000000000020))
+#define RGX_TB_SYSTEM_CONFIG_EMU_BUILD_SHIFT (4U)
+#define RGX_TB_SYSTEM_CONFIG_EMU_BUILD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF))
+#define RGX_TB_SYSTEM_CONFIG_EMU_BUILD_EN (IMG_UINT64_C(0x0000000000000010))
+#define RGX_TB_SYSTEM_CONFIG_NS_NOC_SHIFT (2U)
+#define RGX_TB_SYSTEM_CONFIG_NS_NOC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB))
+#define RGX_TB_SYSTEM_CONFIG_NS_NOC_EN (IMG_UINT64_C(0x0000000000000004))
+#define RGX_TB_SYSTEM_CONFIG_IMG_NOC_SHIFT (1U)
+#define RGX_TB_SYSTEM_CONFIG_IMG_NOC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD))
+#define RGX_TB_SYSTEM_CONFIG_IMG_NOC_EN (IMG_UINT64_C(0x0000000000000002))
+#define RGX_TB_SYSTEM_CONFIG_TB_NONCOHERENT_SHIFT (0U)
+#define RGX_TB_SYSTEM_CONFIG_TB_NONCOHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_TB_SYSTEM_CONFIG_TB_NONCOHERENT_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_TB_GPU_CONTROL
+*/
+#define RGX_TB_GPU_CONTROL (0x0110U)
+#define RGX_TB_GPU_CONTROL_MASKFULL (IMG_UINT64_C(0x0000000000000007))
+#define RGX_TB_GPU_CONTROL_FW_LOG_DISABLE_SHIFT (2U)
+#define RGX_TB_GPU_CONTROL_FW_LOG_DISABLE_CLRMSK (0xFFFFFFFBU)
+#define RGX_TB_GPU_CONTROL_FW_LOG_DISABLE_EN (0x00000004U)
+#define RGX_TB_GPU_CONTROL_DXT_BC_ENABLE_SHIFT (1U)
+#define RGX_TB_GPU_CONTROL_DXT_BC_ENABLE_CLRMSK (0xFFFFFFFDU)
+#define RGX_TB_GPU_CONTROL_DXT_BC_ENABLE_EN (0x00000002U)
+#define RGX_TB_GPU_CONTROL_ASTC_ENABLE_SHIFT (0U)
+#define RGX_TB_GPU_CONTROL_ASTC_ENABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_TB_GPU_CONTROL_ASTC_ENABLE_EN (0x00000001U)
+
+
+/*
+ Register RGX_TB_RDATA_CORRUPT_ENABLE
+*/
+#define RGX_TB_RDATA_CORRUPT_ENABLE (0x1560U)
+#define RGX_TB_RDATA_CORRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_TB_RDATA_CORRUPT_ENABLE_ENABLE_SHIFT (0U)
+#define RGX_TB_RDATA_CORRUPT_ENABLE_ENABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_TB_RDATA_CORRUPT_ENABLE_ENABLE_EN (0x00000001U)
+
+
+/*
+ Register RGX_TB_RDATA_CORRUPT_MASK
+*/
+#define RGX_TB_RDATA_CORRUPT_MASK (0x1568U)
+#define RGX_TB_RDATA_CORRUPT_MASK_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_TB_RDATA_CORRUPT_MASK_MMU_SHIFT (31U)
+#define RGX_TB_RDATA_CORRUPT_MASK_MMU_CLRMSK (0x7FFFFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_MMU_EN (0x80000000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_UPS_SHIFT (30U)
+#define RGX_TB_RDATA_CORRUPT_MASK_UPS_CLRMSK (0xBFFFFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_UPS_EN (0x40000000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_FBM_SHIFT (29U)
+#define RGX_TB_RDATA_CORRUPT_MASK_FBM_CLRMSK (0xDFFFFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_FBM_EN (0x20000000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_TUL_SHIFT (28U)
+#define RGX_TB_RDATA_CORRUPT_MASK_TUL_CLRMSK (0xEFFFFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_TUL_EN (0x10000000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_SHR_SHIFT (27U)
+#define RGX_TB_RDATA_CORRUPT_MASK_SHR_CLRMSK (0xF7FFFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_SHR_EN (0x08000000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_FBA_SHIFT (26U)
+#define RGX_TB_RDATA_CORRUPT_MASK_FBA_CLRMSK (0xFBFFFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_FBA_EN (0x04000000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_VDM_SHIFT (25U)
+#define RGX_TB_RDATA_CORRUPT_MASK_VDM_CLRMSK (0xFDFFFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_VDM_EN (0x02000000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_USC_L2_SHIFT (24U)
+#define RGX_TB_RDATA_CORRUPT_MASK_USC_L2_CLRMSK (0xFEFFFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_USC_L2_EN (0x01000000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_PDS_SHIFT (23U)
+#define RGX_TB_RDATA_CORRUPT_MASK_PDS_CLRMSK (0xFF7FFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_PDS_EN (0x00800000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_PDSRW_SHIFT (22U)
+#define RGX_TB_RDATA_CORRUPT_MASK_PDSRW_CLRMSK (0xFFBFFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_PDSRW_EN (0x00400000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_TPF_SHIFT (21U)
+#define RGX_TB_RDATA_CORRUPT_MASK_TPF_CLRMSK (0xFFDFFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_TPF_EN (0x00200000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_SHF_SHIFT (20U)
+#define RGX_TB_RDATA_CORRUPT_MASK_SHF_CLRMSK (0xFFEFFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_SHF_EN (0x00100000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_AMC_SHIFT (19U)
+#define RGX_TB_RDATA_CORRUPT_MASK_AMC_CLRMSK (0xFFF7FFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_AMC_EN (0x00080000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_RAC_SHIFT (18U)
+#define RGX_TB_RDATA_CORRUPT_MASK_RAC_CLRMSK (0xFFFBFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_RAC_EN (0x00040000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_VCE_RTC_SHIFT (17U)
+#define RGX_TB_RDATA_CORRUPT_MASK_VCE_RTC_CLRMSK (0xFFFDFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_VCE_RTC_EN (0x00020000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_ISP_SHIFT (16U)
+#define RGX_TB_RDATA_CORRUPT_MASK_ISP_CLRMSK (0xFFFEFFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_ISP_EN (0x00010000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_PPP_SHIFT (15U)
+#define RGX_TB_RDATA_CORRUPT_MASK_PPP_CLRMSK (0xFFFF7FFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_PPP_EN (0x00008000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_IPF_SHIFT (14U)
+#define RGX_TB_RDATA_CORRUPT_MASK_IPF_CLRMSK (0xFFFFBFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_IPF_EN (0x00004000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_VCE_SHIFT (13U)
+#define RGX_TB_RDATA_CORRUPT_MASK_VCE_CLRMSK (0xFFFFDFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_VCE_EN (0x00002000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_PBE_SHIFT (12U)
+#define RGX_TB_RDATA_CORRUPT_MASK_PBE_CLRMSK (0xFFFFEFFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_PBE_EN (0x00001000U)
+#define RGX_TB_RDATA_CORRUPT_MASK_TCU_SHIFT (11U)
+#define RGX_TB_RDATA_CORRUPT_MASK_TCU_CLRMSK (0xFFFFF7FFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_TCU_EN (0x00000800U)
+#define RGX_TB_RDATA_CORRUPT_MASK_MCU_SHIFT (10U)
+#define RGX_TB_RDATA_CORRUPT_MASK_MCU_CLRMSK (0xFFFFFBFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_MCU_EN (0x00000400U)
+#define RGX_TB_RDATA_CORRUPT_MASK_RPM_SHIFT (9U)
+#define RGX_TB_RDATA_CORRUPT_MASK_RPM_CLRMSK (0xFFFFFDFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_RPM_EN (0x00000200U)
+#define RGX_TB_RDATA_CORRUPT_MASK_RTU_SHIFT (8U)
+#define RGX_TB_RDATA_CORRUPT_MASK_RTU_CLRMSK (0xFFFFFEFFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_RTU_EN (0x00000100U)
+#define RGX_TB_RDATA_CORRUPT_MASK_TILING_SHIFT (7U)
+#define RGX_TB_RDATA_CORRUPT_MASK_TILING_CLRMSK (0xFFFFFF7FU)
+#define RGX_TB_RDATA_CORRUPT_MASK_TILING_EN (0x00000080U)
+#define RGX_TB_RDATA_CORRUPT_MASK_META_DMA_SHIFT (6U)
+#define RGX_TB_RDATA_CORRUPT_MASK_META_DMA_CLRMSK (0xFFFFFFBFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_META_DMA_EN (0x00000040U)
+#define RGX_TB_RDATA_CORRUPT_MASK_META_SHIFT (5U)
+#define RGX_TB_RDATA_CORRUPT_MASK_META_CLRMSK (0xFFFFFFDFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_META_EN (0x00000020U)
+#define RGX_TB_RDATA_CORRUPT_MASK_CDM_SHIFT (4U)
+#define RGX_TB_RDATA_CORRUPT_MASK_CDM_CLRMSK (0xFFFFFFEFU)
+#define RGX_TB_RDATA_CORRUPT_MASK_CDM_EN (0x00000010U)
+#define RGX_TB_RDATA_CORRUPT_MASK_PM_SHIFT (3U)
+#define RGX_TB_RDATA_CORRUPT_MASK_PM_CLRMSK (0xFFFFFFF7U)
+#define RGX_TB_RDATA_CORRUPT_MASK_PM_EN (0x00000008U)
+#define RGX_TB_RDATA_CORRUPT_MASK_TDM_SHIFT (2U)
+#define RGX_TB_RDATA_CORRUPT_MASK_TDM_CLRMSK (0xFFFFFFFBU)
+#define RGX_TB_RDATA_CORRUPT_MASK_TDM_EN (0x00000004U)
+#define RGX_TB_RDATA_CORRUPT_MASK_DCE_SHIFT (1U)
+#define RGX_TB_RDATA_CORRUPT_MASK_DCE_CLRMSK (0xFFFFFFFDU)
+#define RGX_TB_RDATA_CORRUPT_MASK_DCE_EN (0x00000002U)
+#define RGX_TB_RDATA_CORRUPT_MASK_IPP_SHIFT (0U)
+#define RGX_TB_RDATA_CORRUPT_MASK_IPP_CLRMSK (0xFFFFFFFEU)
+#define RGX_TB_RDATA_CORRUPT_MASK_IPP_EN (0x00000001U)
+
+
+/*
+ Register RGX_TB_RDATA_CORRUPT_FREQ
+*/
+#define RGX_TB_RDATA_CORRUPT_FREQ (0x1570U)
+#define RGX_TB_RDATA_CORRUPT_FREQ_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_TB_RDATA_CORRUPT_FREQ_FREQ_SHIFT (0U)
+#define RGX_TB_RDATA_CORRUPT_FREQ_FREQ_CLRMSK (0x00000000U)
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE
+*/
+#define RGX_TB_TRUSTED_DEVICE (0x2000U)
+#define RGX_TB_TRUSTED_DEVICE_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_TB_TRUSTED_DEVICE_ALLOW_SECURE_READS_SHIFT (4U)
+#define RGX_TB_TRUSTED_DEVICE_ALLOW_SECURE_READS_CLRMSK (0xFFFFFFEFU)
+#define RGX_TB_TRUSTED_DEVICE_ALLOW_SECURE_READS_EN (0x00000010U)
+#define RGX_TB_TRUSTED_DEVICE_HWCONFIG_SHIFT (2U)
+#define RGX_TB_TRUSTED_DEVICE_HWCONFIG_CLRMSK (0xFFFFFFF3U)
+#define RGX_TB_TRUSTED_DEVICE_OSID_DISABLE_SHIFT (1U)
+#define RGX_TB_TRUSTED_DEVICE_OSID_DISABLE_CLRMSK (0xFFFFFFFDU)
+#define RGX_TB_TRUSTED_DEVICE_OSID_DISABLE_EN (0x00000002U)
+#define RGX_TB_TRUSTED_DEVICE_ENABLE_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_ENABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_TB_TRUSTED_DEVICE_ENABLE_EN (0x00000001U)
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_FAULT_STATUS
+*/
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS (0x2008U)
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0x01FFFFFFFFFF1771))
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_AXPROT_SHIFT (56U)
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_AXPROT_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_AXPROT_EN (IMG_UINT64_C(0x0100000000000000))
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_ADDR_SHIFT (16U)
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_ADDR_CLRMSK (IMG_UINT64_C(0xFF0000000000FFFF))
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_RNW_SHIFT (12U)
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF))
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_RNW_EN (IMG_UINT64_C(0x0000000000001000))
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_OS_ID_SHIFT (8U)
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF8FF))
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_BANK_SHIFT (4U)
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_BANK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF8F))
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_VALID_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_TB_TRUSTED_DEVICE_FAULT_STATUS_VALID_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR
+*/
+#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR (0x2010U)
+#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_PULSE_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_PULSE_CLRMSK (0xFFFFFFFEU)
+#define RGX_TB_TRUSTED_DEVICE_FAULT_CLEAR_PULSE_EN (0x00000001U)
+
+
+/*
+ Register group: RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS, with 8 repeats
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS_REPEATCOUNT (8U)
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0 (0x2018U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1 (0x2020U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2 (0x2028U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3 (0x2030U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4 (0x2038U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5 (0x2040U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6 (0x2048U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7 (0x2050U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MIN_OS7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register group: RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS, with 8 repeats
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS_REPEATCOUNT (8U)
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0 (0x2058U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1 (0x2060U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2 (0x2068U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3 (0x2070U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4 (0x2078U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5 (0x2080U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6 (0x2088U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7 (0x2090U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION0_MAX_OS7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register group: RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS, with 8 repeats
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS_REPEATCOUNT (8U)
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0 (0x2098U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1 (0x20A0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2 (0x20A8U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3 (0x20B0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4 (0x20B8U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5 (0x20C0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6 (0x20C8U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7 (0x20D0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MIN_OS7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register group: RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS, with 8 repeats
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS_REPEATCOUNT (8U)
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0 (0x20D8U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1 (0x20E0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2 (0x20E8U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3 (0x20F0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4 (0x20F8U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5 (0x2100U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6 (0x2108U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7
+*/
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7 (0x2110U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFFF))
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7_ADDR_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_REGION1_MAX_OS7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000000))
+
+
+/*
+ Register RGX_TB_BW_LIMITER
+*/
+#define RGX_TB_BW_LIMITER (0x2118U)
+#define RGX_TB_BW_LIMITER_MASKFULL (IMG_UINT64_C(0x00000000007707FF))
+#define RGX_TB_BW_LIMITER_DROPN_EXT_SHIFT (20U)
+#define RGX_TB_BW_LIMITER_DROPN_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF8FFFFF))
+#define RGX_TB_BW_LIMITER_PERIOD_EXT_SHIFT (16U)
+#define RGX_TB_BW_LIMITER_PERIOD_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF8FFFF))
+#define RGX_TB_BW_LIMITER_DROPN_SHIFT (6U)
+#define RGX_TB_BW_LIMITER_DROPN_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF83F))
+#define RGX_TB_BW_LIMITER_PERIOD_SHIFT (1U)
+#define RGX_TB_BW_LIMITER_PERIOD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC1))
+#define RGX_TB_BW_LIMITER_ENABLE_SHIFT (0U)
+#define RGX_TB_BW_LIMITER_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_TB_BW_LIMITER_ENABLE_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_TB_TRUSTED_DEVICE_ACECONFIG
+*/
+#define RGX_TB_TRUSTED_DEVICE_ACECONFIG (0x2120U)
+#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_OSID_SECURITY_SHIFT (1U)
+#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE01))
+#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_ENABLE_SHIFT (0U)
+#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_TB_TRUSTED_DEVICE_ACECONFIG_ENABLE_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_TB_DRAM_CROSSBAR
+*/
+#define RGX_TB_DRAM_CROSSBAR (0x2128U)
+#define RGX_TB_DRAM_CROSSBAR_MASKFULL (IMG_UINT64_C(0x0000000000003301))
+#define RGX_TB_DRAM_CROSSBAR_CHANNELS_SHIFT (12U)
+#define RGX_TB_DRAM_CROSSBAR_CHANNELS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF))
+#define RGX_TB_DRAM_CROSSBAR_SEL_MODE_SHIFT (8U)
+#define RGX_TB_DRAM_CROSSBAR_SEL_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF))
+#define RGX_TB_DRAM_CROSSBAR_ENABLE_SHIFT (0U)
+#define RGX_TB_DRAM_CROSSBAR_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE))
+#define RGX_TB_DRAM_CROSSBAR_ENABLE_EN (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_TB_SOC_TIMER
+*/
+#define RGX_TB_SOC_TIMER (0x2140U)
+#define RGX_TB_SOC_TIMER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_TB_SOC_TIMER_COUNT_SHIFT (0U)
+#define RGX_TB_SOC_TIMER_COUNT_CLRMSK (IMG_UINT64_C(0x0000000000000000))
+
+
+/*
+ Register RGX_TB_PROGRAMABLE_CLK_DIV
+*/
+#define RGX_TB_PROGRAMABLE_CLK_DIV (0x2150U)
+#define RGX_TB_PROGRAMABLE_CLK_DIV_MASKFULL (IMG_UINT64_C(0x0000000000000FFF))
+#define RGX_TB_PROGRAMABLE_CLK_DIV_MODE_SHIFT (11U)
+#define RGX_TB_PROGRAMABLE_CLK_DIV_MODE_CLRMSK (0xFFFFF7FFU)
+#define RGX_TB_PROGRAMABLE_CLK_DIV_MODE_EN (0x00000800U)
+#define RGX_TB_PROGRAMABLE_CLK_DIV_PROFILE_SEL_SHIFT (9U)
+#define RGX_TB_PROGRAMABLE_CLK_DIV_PROFILE_SEL_CLRMSK (0xFFFFF9FFU)
+#define RGX_TB_PROGRAMABLE_CLK_DIV_DIV_SHIFT (5U)
+#define RGX_TB_PROGRAMABLE_CLK_DIV_DIV_CLRMSK (0xFFFFFE1FU)
+#define RGX_TB_PROGRAMABLE_CLK_DIV_DELAY_SHIFT (1U)
+#define RGX_TB_PROGRAMABLE_CLK_DIV_DELAY_CLRMSK (0xFFFFFFE1U)
+#define RGX_TB_PROGRAMABLE_CLK_DIV_EVENT_SHIFT (0U)
+#define RGX_TB_PROGRAMABLE_CLK_DIV_EVENT_CLRMSK (0xFFFFFFFEU)
+#define RGX_TB_PROGRAMABLE_CLK_DIV_EVENT_EN (0x00000001U)
+
+
+/*
+ Register RGX_TB_GPIO_FREQ_CTRL
+*/
+#define RGX_TB_GPIO_FREQ_CTRL (0x2160U)
+#define RGX_TB_GPIO_FREQ_CTRL_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_TB_GPIO_FREQ_CTRL_COUNT_SHIFT (1U)
+#define RGX_TB_GPIO_FREQ_CTRL_COUNT_CLRMSK (0xFFFFFFE1U)
+#define RGX_TB_GPIO_FREQ_CTRL_ENABLE_SHIFT (0U)
+#define RGX_TB_GPIO_FREQ_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU)
+#define RGX_TB_GPIO_FREQ_CTRL_ENABLE_EN (0x00000001U)
+
+
+/*
+ Register RGX_TB_GPIO_MODE
+*/
+#define RGX_TB_GPIO_MODE (0x2170U)
+#define RGX_TB_GPIO_MODE_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_TB_GPIO_MODE_PROTOCOL_SHIFT (0U)
+#define RGX_TB_GPIO_MODE_PROTOCOL_CLRMSK (0xFFFFFFFCU)
+
+
+#endif /* RGXTBDEFS_KM_H */
+/*****************************************************************************
+ End of file (rgxtbdefs_km.h)
+*****************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@Title Hardware definition file rgxpmdefs.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* **** Autogenerated C -- do not edit **** */
+
+/*
+ * rogue_pm.def: #13
+ */
+
+
+#ifndef RGXPMDEFS_H
+#define RGXPMDEFS_H
+
+#include "img_types.h"
+#include "img_defs.h"
+
+
+#define RGXPMDEFS_REVISION 13
+
+/*
+The mini PB size on a per-RT basis
+*/
+typedef struct PM_DATA_MINI_PB_TAG {
+ IMG_UINT32 u32_0;
+} PM_DATA_MINI_PB;
+
+/*
+
+*/
+#define PM_DATA_MINI_PB_SIZE_WOFF (0U)
+#define PM_DATA_MINI_PB_SIZE_SHIFT (0U)
+#define PM_DATA_MINI_PB_SIZE_CLRMSK (0xFFFFFC00U)
+#define PM_DATA_MINI_PB_SET_SIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_MINI_PB_SIZE_CLRMSK ) | (((_x_) & (0x000003ffU)) << (PM_DATA_MINI_PB_SIZE_SHIFT))))
+#define PM_DATA_MINI_PB_GET_SIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_MINI_PB_SIZE_SHIFT)) & 0x000003ffU)
+
+
+/*
+The minimum PB size for the WDDM driver only. It is consistent with the OPENGL/OPENGLES. However, it is breaking down as two parts: the pagable memory and non pagable memory.
+*/
+typedef struct PM_DATA_WDDM_MINI_PB_TAG {
+ IMG_UINT32 u32_0;
+} PM_DATA_WDDM_MINI_PB;
+
+/*
+
+*/
+#define PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_WOFF (0U)
+#define PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_SHIFT (10U)
+#define PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_CLRMSK (0xFFF003FFU)
+#define PM_DATA_WDDM_MINI_PB_SET_NON_PAGABLE_SIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_CLRMSK ) | (((_x_) & (0x000003ffU)) << (PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_SHIFT))))
+#define PM_DATA_WDDM_MINI_PB_GET_NON_PAGABLE_SIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_WDDM_MINI_PB_NON_PAGABLE_SIZE_SHIFT)) & 0x000003ffU)
+/*
+
+*/
+#define PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_WOFF (0U)
+#define PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_SHIFT (0U)
+#define PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_CLRMSK (0xFFFFFC00U)
+#define PM_DATA_WDDM_MINI_PB_SET_PAGABLE_SIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_CLRMSK ) | (((_x_) & (0x000003ffU)) << (PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_SHIFT))))
+#define PM_DATA_WDDM_MINI_PB_GET_PAGABLE_SIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_WDDM_MINI_PB_PAGABLE_SIZE_SHIFT)) & 0x000003ffU)
+
+
+/*
+the mini number of the reserve pages when only the local free list is used */
+#define PM_DATA_PM_RESERVE_PAGES_MIN_SIZE (0x00000007U)
+
+
+/*
+the mini number of the reserve pages when unified free list is present */
+#define PM_DATA_PM_RESERVE_PAGES_MIN_UNIFIED_SIZE (0x0000000bU)
+
+
+/*
+This defines the format of entries in the FSTACK, UFSTACK and MMUSTACK
+*/
+typedef struct PM_DATA_FSTACK_ENTRY_TAG {
+ IMG_UINT32 u32_0;
+} PM_DATA_FSTACK_ENTRY;
+
+/*
+Reserved for future use
+*/
+#define PM_DATA_FSTACK_ENTRY_RSV_WOFF (0U)
+#define PM_DATA_FSTACK_ENTRY_RSV_SHIFT (28U)
+#define PM_DATA_FSTACK_ENTRY_RSV_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_FSTACK_ENTRY_SET_RSV(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_FSTACK_ENTRY_RSV_CLRMSK ) | (((_x_) & (0x0000000fU)) << (PM_DATA_FSTACK_ENTRY_RSV_SHIFT))))
+#define PM_DATA_FSTACK_ENTRY_GET_RSV(_ft_) (((_ft_).u32_0 >> (PM_DATA_FSTACK_ENTRY_RSV_SHIFT)) & 0x0000000fU)
+/*
+Address of 4 kB physical page
+*/
+#define PM_DATA_FSTACK_ENTRY_PPAGE_WOFF (0U)
+#define PM_DATA_FSTACK_ENTRY_PPAGE_SHIFT (0U)
+#define PM_DATA_FSTACK_ENTRY_PPAGE_CLRMSK (0xF0000000U)
+#define PM_DATA_FSTACK_ENTRY_SET_PPAGE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_FSTACK_ENTRY_PPAGE_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_FSTACK_ENTRY_PPAGE_SHIFT))))
+#define PM_DATA_FSTACK_ENTRY_GET_PPAGE(_ft_) (((_ft_).u32_0 >> (PM_DATA_FSTACK_ENTRY_PPAGE_SHIFT)) & 0x0fffffffU)
+
+
+/*
+This defines the format of an ALIST (Allocation List) entry
+*/
+typedef struct PM_DATA_ALIST_ENTRY_TAG {
+ IMG_UINT32 u32_0;
+ IMG_UINT32 u32_1;
+} PM_DATA_ALIST_ENTRY;
+
+/*
+Valid bit. Indicates whether this ALIST entry is valid.
+*/
+#define PM_DATA_ALIST_ENTRY_VAL_WOFF (1U)
+#define PM_DATA_ALIST_ENTRY_VAL_SHIFT (31U)
+#define PM_DATA_ALIST_ENTRY_VAL_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_ALIST_ENTRY_SET_VAL(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_ALIST_ENTRY_VAL_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_ALIST_ENTRY_VAL_SHIFT))))
+#define PM_DATA_ALIST_ENTRY_GET_VAL(_ft_) (((_ft_).u32_1 >> (PM_DATA_ALIST_ENTRY_VAL_SHIFT)) & 0x00000001U)
+/*
+The "data master" of the virtual page. 0=VCE, 1=TE, 2,3=reserved.
+*/
+#define PM_DATA_ALIST_ENTRY_DM_INDEX_WOFF (1U)
+#define PM_DATA_ALIST_ENTRY_DM_INDEX_SHIFT (26U)
+#define PM_DATA_ALIST_ENTRY_DM_INDEX_CLRMSK (0xF3FFFFFFU)
+#define PM_DATA_ALIST_ENTRY_SET_DM_INDEX(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_ALIST_ENTRY_DM_INDEX_CLRMSK ) | (((_x_) & (0x00000003U)) << (PM_DATA_ALIST_ENTRY_DM_INDEX_SHIFT))))
+#define PM_DATA_ALIST_ENTRY_GET_DM_INDEX(_ft_) (((_ft_).u32_1 >> (PM_DATA_ALIST_ENTRY_DM_INDEX_SHIFT)) & 0x00000003U)
+/*
+Render Target Array index. Up to 2 k Render Target Arrays are supported.
+*/
+#define PM_DATA_ALIST_ENTRY_RTA_INDEX_WOFF (1U)
+#define PM_DATA_ALIST_ENTRY_RTA_INDEX_SHIFT (14U)
+#define PM_DATA_ALIST_ENTRY_RTA_INDEX_CLRMSK (0xFE003FFFU)
+#define PM_DATA_ALIST_ENTRY_SET_RTA_INDEX(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_ALIST_ENTRY_RTA_INDEX_CLRMSK ) | (((_x_) & (0x000007ffU)) << (PM_DATA_ALIST_ENTRY_RTA_INDEX_SHIFT))))
+#define PM_DATA_ALIST_ENTRY_GET_RTA_INDEX(_ft_) (((_ft_).u32_1 >> (PM_DATA_ALIST_ENTRY_RTA_INDEX_SHIFT)) & 0x000007ffU)
+/*
+The virtual page number (16 kB virtual page).
+*/
+#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W0_WOFF (0U)
+#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W1_WOFF (1U)
+#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W0_SHIFT (16U)
+#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W1_SHIFT (0U)
+#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W0_CLRMSK (0x0000FFFFU)
+#define PM_DATA_ALIST_ENTRY_VRP_PPAGE_W1_CLRMSK (0xFFFFFFF0U)
+#define PM_DATA_ALIST_ENTRY_SET_VRP_PPAGE(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_ALIST_ENTRY_VRP_PPAGE_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000ffff))) << 16))); \
+ ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_ALIST_ENTRY_VRP_PPAGE_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000f0000))) >> 16))); }
+#define PM_DATA_ALIST_ENTRY_GET_VRP_PPAGE(_ft_) (((_ft_).u32_0 >> (16)) | ((IMG_UINT64)((_ft_).u32_1 & 0x0000000fU ) << (16)))
+/*
+The 16-bit macrotile mask. Indicates which macrotile(s) are using this 16 kB page
+*/
+#define PM_DATA_ALIST_ENTRY_MTILE_MASK_WOFF (0U)
+#define PM_DATA_ALIST_ENTRY_MTILE_MASK_SHIFT (0U)
+#define PM_DATA_ALIST_ENTRY_MTILE_MASK_CLRMSK (0xFFFF0000U)
+#define PM_DATA_ALIST_ENTRY_SET_MTILE_MASK(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_ALIST_ENTRY_MTILE_MASK_CLRMSK ) | (((_x_) & (0x0000ffffU)) << (PM_DATA_ALIST_ENTRY_MTILE_MASK_SHIFT))))
+#define PM_DATA_ALIST_ENTRY_GET_MTILE_MASK(_ft_) (((_ft_).u32_0 >> (PM_DATA_ALIST_ENTRY_MTILE_MASK_SHIFT)) & 0x0000ffffU)
+
+
+/*
+This defines the format of entries in the MLIST
+*/
+typedef struct PM_DATA_MLIST_ENTRY_TAG {
+ IMG_UINT32 u32_0;
+} PM_DATA_MLIST_ENTRY;
+
+/*
+Original source of the MMU page:
+0=Page was allocated from the FSTACK,
+1=Page was allocated from the UFSTACK.
+This bit is ignored when RGX_CR_PM_MMU_STACK_POLICY=1
+*/
+#define PM_DATA_MLIST_ENTRY_SRC_WOFF (0U)
+#define PM_DATA_MLIST_ENTRY_SRC_SHIFT (31U)
+#define PM_DATA_MLIST_ENTRY_SRC_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_MLIST_ENTRY_SET_SRC(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_MLIST_ENTRY_SRC_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_MLIST_ENTRY_SRC_SHIFT))))
+#define PM_DATA_MLIST_ENTRY_GET_SRC(_ft_) (((_ft_).u32_0 >> (PM_DATA_MLIST_ENTRY_SRC_SHIFT)) & 0x00000001U)
+/*
+Address of Physical Page allocated to MMU
+*/
+#define PM_DATA_MLIST_ENTRY_PPAGE_WOFF (0U)
+#define PM_DATA_MLIST_ENTRY_PPAGE_SHIFT (0U)
+#define PM_DATA_MLIST_ENTRY_PPAGE_CLRMSK (0xF0000000U)
+#define PM_DATA_MLIST_ENTRY_SET_PPAGE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_MLIST_ENTRY_PPAGE_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_MLIST_ENTRY_PPAGE_SHIFT))))
+#define PM_DATA_MLIST_ENTRY_GET_PPAGE(_ft_) (((_ft_).u32_0 >> (PM_DATA_MLIST_ENTRY_PPAGE_SHIFT)) & 0x0fffffffU)
+
+
+/*
+This defines the format of entries in the VFP Table
+*/
+typedef struct PM_DATA_VFP_TABLE_ENTRY_TAG {
+ IMG_UINT32 u32_0;
+ IMG_UINT32 u32_1;
+} PM_DATA_VFP_TABLE_ENTRY;
+
+/*
+Valid bit. 0=VFP is unmapped, 1=VFP is mapped.
+*/
+#define PM_DATA_VFP_TABLE_ENTRY_VALID_WOFF (1U)
+#define PM_DATA_VFP_TABLE_ENTRY_VALID_SHIFT (31U)
+#define PM_DATA_VFP_TABLE_ENTRY_VALID_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VFP_TABLE_ENTRY_SET_VALID(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_VFP_TABLE_ENTRY_VALID_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VFP_TABLE_ENTRY_VALID_SHIFT))))
+#define PM_DATA_VFP_TABLE_ENTRY_GET_VALID(_ft_) (((_ft_).u32_1 >> (PM_DATA_VFP_TABLE_ENTRY_VALID_SHIFT)) & 0x00000001U)
+/*
+Address of MMU Page Table Entry. 8 Byte Granular.
+*/
+#define PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_WOFF (1U)
+#define PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_SHIFT (0U)
+#define PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_CLRMSK (0x80000000U)
+#define PM_DATA_VFP_TABLE_ENTRY_SET_PTE_PTR(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_SHIFT))))
+#define PM_DATA_VFP_TABLE_ENTRY_GET_PTE_PTR(_ft_) (((_ft_).u32_1 >> (PM_DATA_VFP_TABLE_ENTRY_PTE_PTR_SHIFT)) & 0x7fffffffU)
+/*
+Reserved for future use.
+*/
+#define PM_DATA_VFP_TABLE_ENTRY_RSV_WOFF (0U)
+#define PM_DATA_VFP_TABLE_ENTRY_RSV_SHIFT (28U)
+#define PM_DATA_VFP_TABLE_ENTRY_RSV_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VFP_TABLE_ENTRY_SET_RSV(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_VFP_TABLE_ENTRY_RSV_CLRMSK ) | (((_x_) & (0x0000000fU)) << (PM_DATA_VFP_TABLE_ENTRY_RSV_SHIFT))))
+#define PM_DATA_VFP_TABLE_ENTRY_GET_RSV(_ft_) (((_ft_).u32_0 >> (PM_DATA_VFP_TABLE_ENTRY_RSV_SHIFT)) & 0x0000000fU)
+/*
+Address of 1 kB Physical Page. 1 TB addressable.
+*/
+#define PM_DATA_VFP_TABLE_ENTRY_PPAGE_WOFF (0U)
+#define PM_DATA_VFP_TABLE_ENTRY_PPAGE_SHIFT (0U)
+#define PM_DATA_VFP_TABLE_ENTRY_PPAGE_CLRMSK (0xF0000000U)
+#define PM_DATA_VFP_TABLE_ENTRY_SET_PPAGE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_VFP_TABLE_ENTRY_PPAGE_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VFP_TABLE_ENTRY_PPAGE_SHIFT))))
+#define PM_DATA_VFP_TABLE_ENTRY_GET_PPAGE(_ft_) (((_ft_).u32_0 >> (PM_DATA_VFP_TABLE_ENTRY_PPAGE_SHIFT)) & 0x0fffffffU)
+
+
+/*
+PerPipe Segment SIZE, it has a fixed mapping as follows:
+PIPE Number - Segment Size
+1 16G
+2 8G
+4 4G
+8 2G
+*/
+typedef struct PM_DATA_PERPIPE_SEGSIZE_TAG {
+ IMG_UINT32 u32_0;
+} PM_DATA_PERPIPE_SEGSIZE;
+
+/*
+PerSegment Size 2G
+*/
+#define PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_WOFF (0U)
+#define PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_SHIFT (3U)
+#define PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_CLRMSK (0xFFFFFFF7U)
+#define PM_DATA_PERPIPE_SEGSIZE_SET_PIPE8_SEGSZIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_SHIFT))))
+#define PM_DATA_PERPIPE_SEGSIZE_GET_PIPE8_SEGSZIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_PERPIPE_SEGSIZE_PIPE8_SEGSZIZE_SHIFT)) & 0x00000001U)
+/*
+PerSegment Size 4G
+*/
+#define PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_WOFF (0U)
+#define PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_SHIFT (2U)
+#define PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_CLRMSK (0xFFFFFFFBU)
+#define PM_DATA_PERPIPE_SEGSIZE_SET_PIPE4_SEGSZIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_SHIFT))))
+#define PM_DATA_PERPIPE_SEGSIZE_GET_PIPE4_SEGSZIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_PERPIPE_SEGSIZE_PIPE4_SEGSZIZE_SHIFT)) & 0x00000001U)
+/*
+PerSegment Size 8G
+*/
+#define PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_WOFF (0U)
+#define PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_SHIFT (1U)
+#define PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_CLRMSK (0xFFFFFFFDU)
+#define PM_DATA_PERPIPE_SEGSIZE_SET_PIPE2_SEGSZIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_SHIFT))))
+#define PM_DATA_PERPIPE_SEGSIZE_GET_PIPE2_SEGSZIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_PERPIPE_SEGSIZE_PIPE2_SEGSZIZE_SHIFT)) & 0x00000001U)
+/*
+PerSegment Size 16G
+*/
+#define PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_WOFF (0U)
+#define PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_SHIFT (0U)
+#define PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_CLRMSK (0xFFFFFFFEU)
+#define PM_DATA_PERPIPE_SEGSIZE_SET_PIPE1_SEGSZIZE(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_SHIFT))))
+#define PM_DATA_PERPIPE_SEGSIZE_GET_PIPE1_SEGSZIZE(_ft_) (((_ft_).u32_0 >> (PM_DATA_PERPIPE_SEGSIZE_PIPE1_SEGSZIZE_SHIFT)) & 0x00000001U)
+
+
+/*
+PM Virtual Heap Buffer Offset. This buffer contains all the meta-data associated with each render target.
+size is 11904/8 = 1488 Bytes.
+
+Natively the buffer supports up to 8-VCEs and 8-TEs scaling without changing HW.
+
+In case relevant PIPE N is not present, the corresponding space is just reserved.
+*/
+typedef struct PM_DATA_VHEAP_BUFFER_TAG {
+ IMG_UINT32 u32_0;
+ IMG_UINT32 u32_1;
+ IMG_UINT32 u32_2;
+ IMG_UINT32 u32_3;
+ IMG_UINT32 u32_4;
+ IMG_UINT32 u32_5;
+ IMG_UINT32 u32_6;
+ IMG_UINT32 u32_7;
+ IMG_UINT32 u32_8;
+ IMG_UINT32 u32_9;
+ IMG_UINT32 u32_10;
+ IMG_UINT32 u32_11;
+ IMG_UINT32 u32_12;
+ IMG_UINT32 u32_13;
+ IMG_UINT32 u32_14;
+ IMG_UINT32 u32_15;
+ IMG_UINT32 u32_16;
+ IMG_UINT32 u32_17;
+ IMG_UINT32 u32_18;
+ IMG_UINT32 u32_19;
+ IMG_UINT32 u32_20;
+ IMG_UINT32 u32_21;
+ IMG_UINT32 u32_22;
+ IMG_UINT32 u32_23;
+ IMG_UINT32 u32_24;
+ IMG_UINT32 u32_25;
+ IMG_UINT32 u32_26;
+ IMG_UINT32 u32_27;
+ IMG_UINT32 u32_28;
+ IMG_UINT32 u32_29;
+ IMG_UINT32 u32_30;
+ IMG_UINT32 u32_31;
+ IMG_UINT32 u32_32;
+ IMG_UINT32 u32_33;
+ IMG_UINT32 u32_34;
+ IMG_UINT32 u32_35;
+ IMG_UINT32 u32_36;
+ IMG_UINT32 u32_37;
+ IMG_UINT32 u32_38;
+ IMG_UINT32 u32_39;
+ IMG_UINT32 u32_40;
+ IMG_UINT32 u32_41;
+ IMG_UINT32 u32_42;
+ IMG_UINT32 u32_43;
+ IMG_UINT32 u32_44;
+ IMG_UINT32 u32_45;
+ IMG_UINT32 u32_46;
+ IMG_UINT32 u32_47;
+ IMG_UINT32 u32_48;
+ IMG_UINT32 u32_49;
+ IMG_UINT32 u32_50;
+ IMG_UINT32 u32_51;
+ IMG_UINT32 u32_52;
+ IMG_UINT32 u32_53;
+ IMG_UINT32 u32_54;
+ IMG_UINT32 u32_55;
+ IMG_UINT32 u32_56;
+ IMG_UINT32 u32_57;
+ IMG_UINT32 u32_58;
+ IMG_UINT32 u32_59;
+ IMG_UINT32 u32_60;
+ IMG_UINT32 u32_61;
+ IMG_UINT32 u32_62;
+ IMG_UINT32 u32_63;
+ IMG_UINT32 u32_64;
+ IMG_UINT32 u32_65;
+ IMG_UINT32 u32_66;
+ IMG_UINT32 u32_67;
+ IMG_UINT32 u32_68;
+ IMG_UINT32 u32_69;
+ IMG_UINT32 u32_70;
+ IMG_UINT32 u32_71;
+ IMG_UINT32 u32_72;
+ IMG_UINT32 u32_73;
+ IMG_UINT32 u32_74;
+ IMG_UINT32 u32_75;
+ IMG_UINT32 u32_76;
+ IMG_UINT32 u32_77;
+ IMG_UINT32 u32_78;
+ IMG_UINT32 u32_79;
+ IMG_UINT32 u32_80;
+ IMG_UINT32 u32_81;
+ IMG_UINT32 u32_82;
+ IMG_UINT32 u32_83;
+ IMG_UINT32 u32_84;
+ IMG_UINT32 u32_85;
+ IMG_UINT32 u32_86;
+ IMG_UINT32 u32_87;
+ IMG_UINT32 u32_88;
+ IMG_UINT32 u32_89;
+ IMG_UINT32 u32_90;
+ IMG_UINT32 u32_91;
+ IMG_UINT32 u32_92;
+ IMG_UINT32 u32_93;
+ IMG_UINT32 u32_94;
+ IMG_UINT32 u32_95;
+ IMG_UINT32 u32_96;
+ IMG_UINT32 u32_97;
+ IMG_UINT32 u32_98;
+ IMG_UINT32 u32_99;
+ IMG_UINT32 u32_100;
+ IMG_UINT32 u32_101;
+ IMG_UINT32 u32_102;
+ IMG_UINT32 u32_103;
+ IMG_UINT32 u32_104;
+ IMG_UINT32 u32_105;
+ IMG_UINT32 u32_106;
+ IMG_UINT32 u32_107;
+ IMG_UINT32 u32_108;
+ IMG_UINT32 u32_109;
+ IMG_UINT32 u32_110;
+ IMG_UINT32 u32_111;
+ IMG_UINT32 u32_112;
+ IMG_UINT32 u32_113;
+ IMG_UINT32 u32_114;
+ IMG_UINT32 u32_115;
+ IMG_UINT32 u32_116;
+ IMG_UINT32 u32_117;
+ IMG_UINT32 u32_118;
+ IMG_UINT32 u32_119;
+ IMG_UINT32 u32_120;
+ IMG_UINT32 u32_121;
+ IMG_UINT32 u32_122;
+ IMG_UINT32 u32_123;
+ IMG_UINT32 u32_124;
+ IMG_UINT32 u32_125;
+ IMG_UINT32 u32_126;
+ IMG_UINT32 u32_127;
+ IMG_UINT32 u32_128;
+ IMG_UINT32 u32_129;
+ IMG_UINT32 u32_130;
+ IMG_UINT32 u32_131;
+ IMG_UINT32 u32_132;
+ IMG_UINT32 u32_133;
+ IMG_UINT32 u32_134;
+ IMG_UINT32 u32_135;
+ IMG_UINT32 u32_136;
+ IMG_UINT32 u32_137;
+ IMG_UINT32 u32_138;
+ IMG_UINT32 u32_139;
+ IMG_UINT32 u32_140;
+ IMG_UINT32 u32_141;
+ IMG_UINT32 u32_142;
+ IMG_UINT32 u32_143;
+ IMG_UINT32 u32_144;
+ IMG_UINT32 u32_145;
+ IMG_UINT32 u32_146;
+ IMG_UINT32 u32_147;
+ IMG_UINT32 u32_148;
+ IMG_UINT32 u32_149;
+ IMG_UINT32 u32_150;
+ IMG_UINT32 u32_151;
+ IMG_UINT32 u32_152;
+ IMG_UINT32 u32_153;
+ IMG_UINT32 u32_154;
+ IMG_UINT32 u32_155;
+ IMG_UINT32 u32_156;
+ IMG_UINT32 u32_157;
+ IMG_UINT32 u32_158;
+ IMG_UINT32 u32_159;
+ IMG_UINT32 u32_160;
+ IMG_UINT32 u32_161;
+ IMG_UINT32 u32_162;
+ IMG_UINT32 u32_163;
+ IMG_UINT32 u32_164;
+ IMG_UINT32 u32_165;
+ IMG_UINT32 u32_166;
+ IMG_UINT32 u32_167;
+ IMG_UINT32 u32_168;
+ IMG_UINT32 u32_169;
+ IMG_UINT32 u32_170;
+ IMG_UINT32 u32_171;
+ IMG_UINT32 u32_172;
+ IMG_UINT32 u32_173;
+ IMG_UINT32 u32_174;
+ IMG_UINT32 u32_175;
+ IMG_UINT32 u32_176;
+ IMG_UINT32 u32_177;
+ IMG_UINT32 u32_178;
+ IMG_UINT32 u32_179;
+ IMG_UINT32 u32_180;
+ IMG_UINT32 u32_181;
+ IMG_UINT32 u32_182;
+ IMG_UINT32 u32_183;
+ IMG_UINT32 u32_184;
+ IMG_UINT32 u32_185;
+ IMG_UINT32 u32_186;
+ IMG_UINT32 u32_187;
+ IMG_UINT32 u32_188;
+ IMG_UINT32 u32_189;
+ IMG_UINT32 u32_190;
+ IMG_UINT32 u32_191;
+ IMG_UINT32 u32_192;
+ IMG_UINT32 u32_193;
+ IMG_UINT32 u32_194;
+ IMG_UINT32 u32_195;
+ IMG_UINT32 u32_196;
+ IMG_UINT32 u32_197;
+ IMG_UINT32 u32_198;
+ IMG_UINT32 u32_199;
+ IMG_UINT32 u32_200;
+ IMG_UINT32 u32_201;
+ IMG_UINT32 u32_202;
+ IMG_UINT32 u32_203;
+ IMG_UINT32 u32_204;
+ IMG_UINT32 u32_205;
+ IMG_UINT32 u32_206;
+ IMG_UINT32 u32_207;
+ IMG_UINT32 u32_208;
+ IMG_UINT32 u32_209;
+ IMG_UINT32 u32_210;
+ IMG_UINT32 u32_211;
+ IMG_UINT32 u32_212;
+ IMG_UINT32 u32_213;
+ IMG_UINT32 u32_214;
+ IMG_UINT32 u32_215;
+ IMG_UINT32 u32_216;
+ IMG_UINT32 u32_217;
+ IMG_UINT32 u32_218;
+ IMG_UINT32 u32_219;
+ IMG_UINT32 u32_220;
+ IMG_UINT32 u32_221;
+ IMG_UINT32 u32_222;
+ IMG_UINT32 u32_223;
+ IMG_UINT32 u32_224;
+ IMG_UINT32 u32_225;
+ IMG_UINT32 u32_226;
+ IMG_UINT32 u32_227;
+ IMG_UINT32 u32_228;
+ IMG_UINT32 u32_229;
+ IMG_UINT32 u32_230;
+ IMG_UINT32 u32_231;
+ IMG_UINT32 u32_232;
+ IMG_UINT32 u32_233;
+ IMG_UINT32 u32_234;
+ IMG_UINT32 u32_235;
+ IMG_UINT32 u32_236;
+ IMG_UINT32 u32_237;
+ IMG_UINT32 u32_238;
+ IMG_UINT32 u32_239;
+ IMG_UINT32 u32_240;
+ IMG_UINT32 u32_241;
+ IMG_UINT32 u32_242;
+ IMG_UINT32 u32_243;
+ IMG_UINT32 u32_244;
+ IMG_UINT32 u32_245;
+ IMG_UINT32 u32_246;
+ IMG_UINT32 u32_247;
+ IMG_UINT32 u32_248;
+ IMG_UINT32 u32_249;
+ IMG_UINT32 u32_250;
+ IMG_UINT32 u32_251;
+ IMG_UINT32 u32_252;
+ IMG_UINT32 u32_253;
+ IMG_UINT32 u32_254;
+ IMG_UINT32 u32_255;
+ IMG_UINT32 u32_256;
+ IMG_UINT32 u32_257;
+ IMG_UINT32 u32_258;
+ IMG_UINT32 u32_259;
+ IMG_UINT32 u32_260;
+ IMG_UINT32 u32_261;
+ IMG_UINT32 u32_262;
+ IMG_UINT32 u32_263;
+ IMG_UINT32 u32_264;
+ IMG_UINT32 u32_265;
+ IMG_UINT32 u32_266;
+ IMG_UINT32 u32_267;
+ IMG_UINT32 u32_268;
+ IMG_UINT32 u32_269;
+ IMG_UINT32 u32_270;
+ IMG_UINT32 u32_271;
+ IMG_UINT32 u32_272;
+ IMG_UINT32 u32_273;
+ IMG_UINT32 u32_274;
+ IMG_UINT32 u32_275;
+ IMG_UINT32 u32_276;
+ IMG_UINT32 u32_277;
+ IMG_UINT32 u32_278;
+ IMG_UINT32 u32_279;
+ IMG_UINT32 u32_280;
+ IMG_UINT32 u32_281;
+ IMG_UINT32 u32_282;
+ IMG_UINT32 u32_283;
+ IMG_UINT32 u32_284;
+ IMG_UINT32 u32_285;
+ IMG_UINT32 u32_286;
+ IMG_UINT32 u32_287;
+ IMG_UINT32 u32_288;
+ IMG_UINT32 u32_289;
+ IMG_UINT32 u32_290;
+ IMG_UINT32 u32_291;
+ IMG_UINT32 u32_292;
+ IMG_UINT32 u32_293;
+ IMG_UINT32 u32_294;
+ IMG_UINT32 u32_295;
+ IMG_UINT32 u32_296;
+ IMG_UINT32 u32_297;
+ IMG_UINT32 u32_298;
+ IMG_UINT32 u32_299;
+ IMG_UINT32 u32_300;
+ IMG_UINT32 u32_301;
+ IMG_UINT32 u32_302;
+ IMG_UINT32 u32_303;
+ IMG_UINT32 u32_304;
+ IMG_UINT32 u32_305;
+ IMG_UINT32 u32_306;
+ IMG_UINT32 u32_307;
+ IMG_UINT32 u32_308;
+ IMG_UINT32 u32_309;
+ IMG_UINT32 u32_310;
+ IMG_UINT32 u32_311;
+ IMG_UINT32 u32_312;
+ IMG_UINT32 u32_313;
+ IMG_UINT32 u32_314;
+ IMG_UINT32 u32_315;
+ IMG_UINT32 u32_316;
+ IMG_UINT32 u32_317;
+ IMG_UINT32 u32_318;
+ IMG_UINT32 u32_319;
+ IMG_UINT32 u32_320;
+ IMG_UINT32 u32_321;
+ IMG_UINT32 u32_322;
+ IMG_UINT32 u32_323;
+ IMG_UINT32 u32_324;
+ IMG_UINT32 u32_325;
+ IMG_UINT32 u32_326;
+ IMG_UINT32 u32_327;
+ IMG_UINT32 u32_328;
+ IMG_UINT32 u32_329;
+ IMG_UINT32 u32_330;
+ IMG_UINT32 u32_331;
+ IMG_UINT32 u32_332;
+ IMG_UINT32 u32_333;
+ IMG_UINT32 u32_334;
+ IMG_UINT32 u32_335;
+ IMG_UINT32 u32_336;
+ IMG_UINT32 u32_337;
+ IMG_UINT32 u32_338;
+ IMG_UINT32 u32_339;
+ IMG_UINT32 u32_340;
+ IMG_UINT32 u32_341;
+ IMG_UINT32 u32_342;
+ IMG_UINT32 u32_343;
+ IMG_UINT32 u32_344;
+ IMG_UINT32 u32_345;
+ IMG_UINT32 u32_346;
+ IMG_UINT32 u32_347;
+ IMG_UINT32 u32_348;
+ IMG_UINT32 u32_349;
+ IMG_UINT32 u32_350;
+ IMG_UINT32 u32_351;
+ IMG_UINT32 u32_352;
+ IMG_UINT32 u32_353;
+ IMG_UINT32 u32_354;
+ IMG_UINT32 u32_355;
+ IMG_UINT32 u32_356;
+ IMG_UINT32 u32_357;
+ IMG_UINT32 u32_358;
+ IMG_UINT32 u32_359;
+ IMG_UINT32 u32_360;
+ IMG_UINT32 u32_361;
+ IMG_UINT32 u32_362;
+ IMG_UINT32 u32_363;
+ IMG_UINT32 u32_364;
+ IMG_UINT32 u32_365;
+ IMG_UINT32 u32_366;
+ IMG_UINT32 u32_367;
+ IMG_UINT32 u32_368;
+ IMG_UINT32 u32_369;
+ IMG_UINT32 u32_370;
+ IMG_UINT32 u32_371;
+} PM_DATA_VHEAP_BUFFER;
+
+/*
+TE7 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_WOFF (371U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_371 = (((_ft_).u32_371 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_371 >> (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+TE7 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W0_WOFF (370U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W1_WOFF (371U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_370 = (((_ft_).u32_370 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_371 = (((_ft_).u32_371 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_370 >> (20)) | ((IMG_UINT64)((_ft_).u32_371 & 0x0000ffffU ) << (12)))
+/*
+TE7 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W0_WOFF (369U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W1_WOFF (370U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_369 = (((_ft_).u32_369 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_370 = (((_ft_).u32_370 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_369 >> (24)) | ((IMG_UINT64)((_ft_).u32_370 & 0x000fffffU ) << (8)))
+/*
+TE7 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W0_WOFF (368U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W1_WOFF (369U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_368 = (((_ft_).u32_368 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_369 = (((_ft_).u32_369 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_368 >> (28)) | ((IMG_UINT64)((_ft_).u32_369 & 0x00ffffffU ) << (4)))
+/*
+TE7 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_WOFF (368U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_368 = (((_ft_).u32_368 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_368 >> (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+TE7 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_WOFF (364U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_364 = (((_ft_).u32_364 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_364 >> (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+TE6 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_WOFF (363U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_363 = (((_ft_).u32_363 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_363 >> (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+TE6 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W0_WOFF (362U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W1_WOFF (363U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_362 = (((_ft_).u32_362 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_363 = (((_ft_).u32_363 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_362 >> (20)) | ((IMG_UINT64)((_ft_).u32_363 & 0x0000ffffU ) << (12)))
+/*
+TE6 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W0_WOFF (361U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W1_WOFF (362U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_361 = (((_ft_).u32_361 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_362 = (((_ft_).u32_362 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_361 >> (24)) | ((IMG_UINT64)((_ft_).u32_362 & 0x000fffffU ) << (8)))
+/*
+TE6 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W0_WOFF (360U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W1_WOFF (361U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_360 = (((_ft_).u32_360 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_361 = (((_ft_).u32_361 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_360 >> (28)) | ((IMG_UINT64)((_ft_).u32_361 & 0x00ffffffU ) << (4)))
+/*
+TE6 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_WOFF (360U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_360 = (((_ft_).u32_360 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_360 >> (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+TE6 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_WOFF (356U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_356 = (((_ft_).u32_356 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_356 >> (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+TE5 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_WOFF (355U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_355 = (((_ft_).u32_355 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_355 >> (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+TE5 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W0_WOFF (354U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W1_WOFF (355U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_354 = (((_ft_).u32_354 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_355 = (((_ft_).u32_355 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_354 >> (20)) | ((IMG_UINT64)((_ft_).u32_355 & 0x0000ffffU ) << (12)))
+/*
+TE5 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W0_WOFF (353U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W1_WOFF (354U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_353 = (((_ft_).u32_353 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_354 = (((_ft_).u32_354 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_353 >> (24)) | ((IMG_UINT64)((_ft_).u32_354 & 0x000fffffU ) << (8)))
+/*
+TE5 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W0_WOFF (352U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W1_WOFF (353U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_352 = (((_ft_).u32_352 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_353 = (((_ft_).u32_353 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_352 >> (28)) | ((IMG_UINT64)((_ft_).u32_353 & 0x00ffffffU ) << (4)))
+/*
+TE5 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_WOFF (352U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_352 = (((_ft_).u32_352 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_352 >> (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+TE5 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_WOFF (348U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_348 = (((_ft_).u32_348 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_348 >> (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+TE4 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_WOFF (347U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_347 = (((_ft_).u32_347 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_347 >> (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+TE4 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W0_WOFF (346U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W1_WOFF (347U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_346 = (((_ft_).u32_346 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_347 = (((_ft_).u32_347 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_346 >> (20)) | ((IMG_UINT64)((_ft_).u32_347 & 0x0000ffffU ) << (12)))
+/*
+TE4 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W0_WOFF (345U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W1_WOFF (346U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_345 = (((_ft_).u32_345 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_346 = (((_ft_).u32_346 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_345 >> (24)) | ((IMG_UINT64)((_ft_).u32_346 & 0x000fffffU ) << (8)))
+/*
+TE4 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W0_WOFF (344U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W1_WOFF (345U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_344 = (((_ft_).u32_344 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_345 = (((_ft_).u32_345 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_344 >> (28)) | ((IMG_UINT64)((_ft_).u32_345 & 0x00ffffffU ) << (4)))
+/*
+TE4 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_WOFF (344U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_344 = (((_ft_).u32_344 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_344 >> (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+TE4 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_WOFF (340U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_340 = (((_ft_).u32_340 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_340 >> (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+TE3 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_WOFF (339U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_339 = (((_ft_).u32_339 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_339 >> (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+TE3 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W0_WOFF (338U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W1_WOFF (339U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_338 = (((_ft_).u32_338 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_339 = (((_ft_).u32_339 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_338 >> (20)) | ((IMG_UINT64)((_ft_).u32_339 & 0x0000ffffU ) << (12)))
+/*
+TE3 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W0_WOFF (337U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W1_WOFF (338U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_337 = (((_ft_).u32_337 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_338 = (((_ft_).u32_338 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_337 >> (24)) | ((IMG_UINT64)((_ft_).u32_338 & 0x000fffffU ) << (8)))
+/*
+TE3 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W0_WOFF (336U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W1_WOFF (337U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_336 = (((_ft_).u32_336 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_337 = (((_ft_).u32_337 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_336 >> (28)) | ((IMG_UINT64)((_ft_).u32_337 & 0x00ffffffU ) << (4)))
+/*
+TE3 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_WOFF (336U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_336 = (((_ft_).u32_336 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_336 >> (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+TE3 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_WOFF (332U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_332 = (((_ft_).u32_332 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_332 >> (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+TE2 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_WOFF (331U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_331 = (((_ft_).u32_331 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_331 >> (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+TE2 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W0_WOFF (330U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W1_WOFF (331U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_330 = (((_ft_).u32_330 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_331 = (((_ft_).u32_331 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_330 >> (20)) | ((IMG_UINT64)((_ft_).u32_331 & 0x0000ffffU ) << (12)))
+/*
+TE2 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W0_WOFF (329U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W1_WOFF (330U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_329 = (((_ft_).u32_329 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_330 = (((_ft_).u32_330 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_329 >> (24)) | ((IMG_UINT64)((_ft_).u32_330 & 0x000fffffU ) << (8)))
+/*
+TE2 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W0_WOFF (328U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W1_WOFF (329U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_328 = (((_ft_).u32_328 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_329 = (((_ft_).u32_329 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_328 >> (28)) | ((IMG_UINT64)((_ft_).u32_329 & 0x00ffffffU ) << (4)))
+/*
+TE2 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_WOFF (328U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_328 = (((_ft_).u32_328 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_328 >> (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+TE2 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_WOFF (324U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_324 = (((_ft_).u32_324 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_324 >> (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+TE1 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_WOFF (323U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_323 = (((_ft_).u32_323 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_323 >> (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+TE1 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W0_WOFF (322U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W1_WOFF (323U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_322 = (((_ft_).u32_322 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_323 = (((_ft_).u32_323 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_322 >> (20)) | ((IMG_UINT64)((_ft_).u32_323 & 0x0000ffffU ) << (12)))
+/*
+TE1 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W0_WOFF (321U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W1_WOFF (322U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_321 = (((_ft_).u32_321 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_322 = (((_ft_).u32_322 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_321 >> (24)) | ((IMG_UINT64)((_ft_).u32_322 & 0x000fffffU ) << (8)))
+/*
+TE1 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W0_WOFF (320U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W1_WOFF (321U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_320 = (((_ft_).u32_320 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_321 = (((_ft_).u32_321 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_320 >> (28)) | ((IMG_UINT64)((_ft_).u32_321 & 0x00ffffffU ) << (4)))
+/*
+TE1 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_WOFF (320U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_320 = (((_ft_).u32_320 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_320 >> (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+TE1 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_WOFF (316U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_316 = (((_ft_).u32_316 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_316 >> (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+TE0 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_WOFF (315U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_315 = (((_ft_).u32_315 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_315 >> (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+TE0 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W0_WOFF (314U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W1_WOFF (315U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_314 = (((_ft_).u32_314 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_315 = (((_ft_).u32_315 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_314 >> (20)) | ((IMG_UINT64)((_ft_).u32_315 & 0x0000ffffU ) << (12)))
+/*
+TE0 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W0_WOFF (313U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W1_WOFF (314U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_313 = (((_ft_).u32_313 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_314 = (((_ft_).u32_314 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_313 >> (24)) | ((IMG_UINT64)((_ft_).u32_314 & 0x000fffffU ) << (8)))
+/*
+TE0 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W0_WOFF (312U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W1_WOFF (313U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_312 = (((_ft_).u32_312 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_313 = (((_ft_).u32_313 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_312 >> (28)) | ((IMG_UINT64)((_ft_).u32_313 & 0x00ffffffU ) << (4)))
+/*
+TE0 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_WOFF (312U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_312 = (((_ft_).u32_312 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_312 >> (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+TE0 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_WOFF (308U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_308 = (((_ft_).u32_308 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_308 >> (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE7 opened page3 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_WOFF (307U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_307 = (((_ft_).u32_307 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_307 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE7 opened page3 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W0_WOFF (306U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W1_WOFF (307U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_306 = (((_ft_).u32_306 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_307 = (((_ft_).u32_307 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_306 >> (20)) | ((IMG_UINT64)((_ft_).u32_307 & 0x0000ffffU ) << (12)))
+/*
+VCE7 opened page3 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W0_WOFF (305U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W1_WOFF (306U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_305 = (((_ft_).u32_305 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_306 = (((_ft_).u32_306 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_305 >> (24)) | ((IMG_UINT64)((_ft_).u32_306 & 0x000fffffU ) << (8)))
+/*
+VCE7 opened page3 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W0_WOFF (304U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W1_WOFF (305U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_304 = (((_ft_).u32_304 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_305 = (((_ft_).u32_305 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_304 >> (28)) | ((IMG_UINT64)((_ft_).u32_305 & 0x00ffffffU ) << (4)))
+/*
+VCE7 opened page3 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_WOFF (304U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_304 = (((_ft_).u32_304 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_304 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE7 opened page3 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_WOFF (300U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_300 = (((_ft_).u32_300 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_300 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE7 opened page2 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_WOFF (299U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_299 = (((_ft_).u32_299 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_299 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE7 opened page2 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W0_WOFF (298U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W1_WOFF (299U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_298 = (((_ft_).u32_298 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_299 = (((_ft_).u32_299 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_298 >> (20)) | ((IMG_UINT64)((_ft_).u32_299 & 0x0000ffffU ) << (12)))
+/*
+VCE7 opened page2 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W0_WOFF (297U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W1_WOFF (298U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_297 = (((_ft_).u32_297 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_298 = (((_ft_).u32_298 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_297 >> (24)) | ((IMG_UINT64)((_ft_).u32_298 & 0x000fffffU ) << (8)))
+/*
+VCE7 opened page2 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W0_WOFF (296U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W1_WOFF (297U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_296 = (((_ft_).u32_296 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_297 = (((_ft_).u32_297 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_296 >> (28)) | ((IMG_UINT64)((_ft_).u32_297 & 0x00ffffffU ) << (4)))
+/*
+VCE7 opened page2 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_WOFF (296U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_296 = (((_ft_).u32_296 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_296 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE7 opened page2 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_WOFF (292U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_292 = (((_ft_).u32_292 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_292 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE7 opened page1 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_WOFF (291U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_291 = (((_ft_).u32_291 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_291 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE7 opened page1 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W0_WOFF (290U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W1_WOFF (291U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_290 = (((_ft_).u32_290 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_291 = (((_ft_).u32_291 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_290 >> (20)) | ((IMG_UINT64)((_ft_).u32_291 & 0x0000ffffU ) << (12)))
+/*
+VCE7 opened page1 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W0_WOFF (289U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W1_WOFF (290U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_289 = (((_ft_).u32_289 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_290 = (((_ft_).u32_290 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_289 >> (24)) | ((IMG_UINT64)((_ft_).u32_290 & 0x000fffffU ) << (8)))
+/*
+VCE7 opened page1 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W0_WOFF (288U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W1_WOFF (289U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_288 = (((_ft_).u32_288 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_289 = (((_ft_).u32_289 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_288 >> (28)) | ((IMG_UINT64)((_ft_).u32_289 & 0x00ffffffU ) << (4)))
+/*
+VCE7 opened page1 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_WOFF (288U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_288 = (((_ft_).u32_288 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_288 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE7 opened page1 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_WOFF (284U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_284 = (((_ft_).u32_284 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_284 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE7 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_WOFF (283U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_283 = (((_ft_).u32_283 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_283 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE7 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W0_WOFF (282U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W1_WOFF (283U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_282 = (((_ft_).u32_282 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_283 = (((_ft_).u32_283 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_282 >> (20)) | ((IMG_UINT64)((_ft_).u32_283 & 0x0000ffffU ) << (12)))
+/*
+VCE7 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W0_WOFF (281U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W1_WOFF (282U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_281 = (((_ft_).u32_281 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_282 = (((_ft_).u32_282 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_281 >> (24)) | ((IMG_UINT64)((_ft_).u32_282 & 0x000fffffU ) << (8)))
+/*
+VCE7 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W0_WOFF (280U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W1_WOFF (281U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_280 = (((_ft_).u32_280 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_281 = (((_ft_).u32_281 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_280 >> (28)) | ((IMG_UINT64)((_ft_).u32_281 & 0x00ffffffU ) << (4)))
+/*
+VCE7 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_WOFF (280U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_280 = (((_ft_).u32_280 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_280 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE7 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_WOFF (276U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_276 = (((_ft_).u32_276 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_276 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE6 opened page1 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_WOFF (259U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_259 = (((_ft_).u32_259 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_259 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE6 opened page1 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W0_WOFF (258U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W1_WOFF (259U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_258 = (((_ft_).u32_258 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_259 = (((_ft_).u32_259 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_258 >> (20)) | ((IMG_UINT64)((_ft_).u32_259 & 0x0000ffffU ) << (12)))
+/*
+VCE6 opened page1 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W0_WOFF (257U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W1_WOFF (258U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_257 = (((_ft_).u32_257 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_258 = (((_ft_).u32_258 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_257 >> (24)) | ((IMG_UINT64)((_ft_).u32_258 & 0x000fffffU ) << (8)))
+/*
+VCE6 opened page1 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W0_WOFF (256U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W1_WOFF (257U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_256 = (((_ft_).u32_256 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_257 = (((_ft_).u32_257 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_256 >> (28)) | ((IMG_UINT64)((_ft_).u32_257 & 0x00ffffffU ) << (4)))
+/*
+VCE6 opened page1 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_WOFF (256U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_256 = (((_ft_).u32_256 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_256 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE6 opened page1 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_WOFF (252U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_252 = (((_ft_).u32_252 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_252 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE6 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_WOFF (251U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_251 = (((_ft_).u32_251 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_251 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE6 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W0_WOFF (250U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W1_WOFF (251U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_250 = (((_ft_).u32_250 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_251 = (((_ft_).u32_251 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_250 >> (20)) | ((IMG_UINT64)((_ft_).u32_251 & 0x0000ffffU ) << (12)))
+/*
+VCE6 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W0_WOFF (249U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W1_WOFF (250U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_249 = (((_ft_).u32_249 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_250 = (((_ft_).u32_250 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_249 >> (24)) | ((IMG_UINT64)((_ft_).u32_250 & 0x000fffffU ) << (8)))
+/*
+VCE6 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W0_WOFF (248U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W1_WOFF (249U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_248 = (((_ft_).u32_248 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_249 = (((_ft_).u32_249 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_248 >> (28)) | ((IMG_UINT64)((_ft_).u32_249 & 0x00ffffffU ) << (4)))
+/*
+VCE6 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_WOFF (248U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_248 = (((_ft_).u32_248 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_248 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE6 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_WOFF (244U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_244 = (((_ft_).u32_244 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_244 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE5 opened page3 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_WOFF (243U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_243 = (((_ft_).u32_243 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_243 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE5 opened page3 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W0_WOFF (242U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W1_WOFF (243U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_242 = (((_ft_).u32_242 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_243 = (((_ft_).u32_243 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_242 >> (20)) | ((IMG_UINT64)((_ft_).u32_243 & 0x0000ffffU ) << (12)))
+/*
+VCE5 opened page3 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W0_WOFF (241U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W1_WOFF (242U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_241 = (((_ft_).u32_241 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_242 = (((_ft_).u32_242 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_241 >> (24)) | ((IMG_UINT64)((_ft_).u32_242 & 0x000fffffU ) << (8)))
+/*
+VCE5 opened page3 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W0_WOFF (240U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W1_WOFF (241U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_240 = (((_ft_).u32_240 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_241 = (((_ft_).u32_241 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_240 >> (28)) | ((IMG_UINT64)((_ft_).u32_241 & 0x00ffffffU ) << (4)))
+/*
+VCE5 opened page3 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_WOFF (240U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_240 = (((_ft_).u32_240 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_240 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE5 opened page3 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_WOFF (236U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_236 = (((_ft_).u32_236 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_236 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE5 opened page2 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_WOFF (235U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_235 = (((_ft_).u32_235 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_235 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE5 opened page2 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W0_WOFF (234U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W1_WOFF (235U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_234 = (((_ft_).u32_234 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_235 = (((_ft_).u32_235 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_234 >> (20)) | ((IMG_UINT64)((_ft_).u32_235 & 0x0000ffffU ) << (12)))
+/*
+VCE5 opened page2 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W0_WOFF (233U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W1_WOFF (234U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_233 = (((_ft_).u32_233 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_234 = (((_ft_).u32_234 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_233 >> (24)) | ((IMG_UINT64)((_ft_).u32_234 & 0x000fffffU ) << (8)))
+/*
+VCE5 opened page2 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W0_WOFF (232U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W1_WOFF (233U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_232 = (((_ft_).u32_232 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_233 = (((_ft_).u32_233 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_232 >> (28)) | ((IMG_UINT64)((_ft_).u32_233 & 0x00ffffffU ) << (4)))
+/*
+VCE5 opened page2 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_WOFF (232U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_232 = (((_ft_).u32_232 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_232 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE5 opened page2 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_WOFF (228U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_228 = (((_ft_).u32_228 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_228 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE5 opened page1 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_WOFF (227U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_227 = (((_ft_).u32_227 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_227 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE5 opened page1 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W0_WOFF (226U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W1_WOFF (227U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_226 = (((_ft_).u32_226 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_227 = (((_ft_).u32_227 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_226 >> (20)) | ((IMG_UINT64)((_ft_).u32_227 & 0x0000ffffU ) << (12)))
+/*
+VCE5 opened page1 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W0_WOFF (225U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W1_WOFF (226U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_225 = (((_ft_).u32_225 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_226 = (((_ft_).u32_226 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_225 >> (24)) | ((IMG_UINT64)((_ft_).u32_226 & 0x000fffffU ) << (8)))
+/*
+VCE5 opened page1 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W0_WOFF (224U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W1_WOFF (225U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_224 = (((_ft_).u32_224 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_225 = (((_ft_).u32_225 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_224 >> (28)) | ((IMG_UINT64)((_ft_).u32_225 & 0x00ffffffU ) << (4)))
+/*
+VCE5 opened page1 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_WOFF (224U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_224 = (((_ft_).u32_224 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_224 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE5 opened page1 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_WOFF (220U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_220 = (((_ft_).u32_220 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_220 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE5 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_WOFF (219U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_219 = (((_ft_).u32_219 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_219 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE5 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W0_WOFF (218U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W1_WOFF (219U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_218 = (((_ft_).u32_218 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_219 = (((_ft_).u32_219 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_218 >> (20)) | ((IMG_UINT64)((_ft_).u32_219 & 0x0000ffffU ) << (12)))
+/*
+VCE5 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W0_WOFF (217U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W1_WOFF (218U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_217 = (((_ft_).u32_217 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_218 = (((_ft_).u32_218 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_217 >> (24)) | ((IMG_UINT64)((_ft_).u32_218 & 0x000fffffU ) << (8)))
+/*
+VCE5 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W0_WOFF (216U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W1_WOFF (217U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_216 = (((_ft_).u32_216 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_217 = (((_ft_).u32_217 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_216 >> (28)) | ((IMG_UINT64)((_ft_).u32_217 & 0x00ffffffU ) << (4)))
+/*
+VCE5 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_WOFF (216U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_216 = (((_ft_).u32_216 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_216 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE5 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_WOFF (212U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_212 = (((_ft_).u32_212 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_212 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE4 opened page3 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_WOFF (211U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_211 = (((_ft_).u32_211 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_211 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE4 opened page3 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W0_WOFF (210U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W1_WOFF (211U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_210 = (((_ft_).u32_210 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_211 = (((_ft_).u32_211 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_210 >> (20)) | ((IMG_UINT64)((_ft_).u32_211 & 0x0000ffffU ) << (12)))
+/*
+VCE4 opened page3 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W0_WOFF (209U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W1_WOFF (210U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_209 = (((_ft_).u32_209 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_210 = (((_ft_).u32_210 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_209 >> (24)) | ((IMG_UINT64)((_ft_).u32_210 & 0x000fffffU ) << (8)))
+/*
+VCE4 opened page3 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W0_WOFF (208U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W1_WOFF (209U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_208 = (((_ft_).u32_208 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_209 = (((_ft_).u32_209 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_208 >> (28)) | ((IMG_UINT64)((_ft_).u32_209 & 0x00ffffffU ) << (4)))
+/*
+VCE4 opened page3 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_WOFF (208U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_208 = (((_ft_).u32_208 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_208 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE4 opened page3 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_WOFF (204U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_204 = (((_ft_).u32_204 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_204 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE4 opened page2 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_WOFF (203U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_203 = (((_ft_).u32_203 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_203 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE4 opened page2 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W0_WOFF (202U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W1_WOFF (203U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_202 = (((_ft_).u32_202 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_203 = (((_ft_).u32_203 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_202 >> (20)) | ((IMG_UINT64)((_ft_).u32_203 & 0x0000ffffU ) << (12)))
+/*
+VCE4 opened page2 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W0_WOFF (201U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W1_WOFF (202U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_201 = (((_ft_).u32_201 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_202 = (((_ft_).u32_202 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_201 >> (24)) | ((IMG_UINT64)((_ft_).u32_202 & 0x000fffffU ) << (8)))
+/*
+VCE4 opened page2 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W0_WOFF (200U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W1_WOFF (201U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_200 = (((_ft_).u32_200 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_201 = (((_ft_).u32_201 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_200 >> (28)) | ((IMG_UINT64)((_ft_).u32_201 & 0x00ffffffU ) << (4)))
+/*
+VCE4 opened page2 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_WOFF (200U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_200 = (((_ft_).u32_200 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_200 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE4 opened page2 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_WOFF (196U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_196 = (((_ft_).u32_196 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_196 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE4 opened page1 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_WOFF (195U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_195 = (((_ft_).u32_195 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_195 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE4 opened page1 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W0_WOFF (194U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W1_WOFF (195U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_194 = (((_ft_).u32_194 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_195 = (((_ft_).u32_195 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_194 >> (20)) | ((IMG_UINT64)((_ft_).u32_195 & 0x0000ffffU ) << (12)))
+/*
+VCE4 opened page1 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W0_WOFF (193U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W1_WOFF (194U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_193 = (((_ft_).u32_193 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_194 = (((_ft_).u32_194 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_193 >> (24)) | ((IMG_UINT64)((_ft_).u32_194 & 0x000fffffU ) << (8)))
+/*
+VCE4 opened page1 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W0_WOFF (192U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W1_WOFF (193U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_192 = (((_ft_).u32_192 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_193 = (((_ft_).u32_193 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_192 >> (28)) | ((IMG_UINT64)((_ft_).u32_193 & 0x00ffffffU ) << (4)))
+/*
+VCE4 opened page1 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_WOFF (192U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_192 = (((_ft_).u32_192 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_192 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE4 opened page1 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_WOFF (188U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_188 = (((_ft_).u32_188 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_188 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE4 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_WOFF (187U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_187 = (((_ft_).u32_187 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_187 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE4 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W0_WOFF (186U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W1_WOFF (187U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_186 = (((_ft_).u32_186 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_187 = (((_ft_).u32_187 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_186 >> (20)) | ((IMG_UINT64)((_ft_).u32_187 & 0x0000ffffU ) << (12)))
+/*
+VCE4 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W0_WOFF (185U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W1_WOFF (186U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_185 = (((_ft_).u32_185 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_186 = (((_ft_).u32_186 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_185 >> (24)) | ((IMG_UINT64)((_ft_).u32_186 & 0x000fffffU ) << (8)))
+/*
+VCE4 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W0_WOFF (184U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W1_WOFF (185U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_184 = (((_ft_).u32_184 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_185 = (((_ft_).u32_185 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_184 >> (28)) | ((IMG_UINT64)((_ft_).u32_185 & 0x00ffffffU ) << (4)))
+/*
+VCE4 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_WOFF (184U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_184 = (((_ft_).u32_184 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_184 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE4 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_WOFF (180U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_180 = (((_ft_).u32_180 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_180 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE3 opened page3 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_WOFF (179U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_179 = (((_ft_).u32_179 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_179 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE3 opened page3 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W0_WOFF (178U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W1_WOFF (179U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_178 = (((_ft_).u32_178 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_179 = (((_ft_).u32_179 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_178 >> (20)) | ((IMG_UINT64)((_ft_).u32_179 & 0x0000ffffU ) << (12)))
+/*
+VCE3 opened page3 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W0_WOFF (177U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W1_WOFF (178U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_177 = (((_ft_).u32_177 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_178 = (((_ft_).u32_178 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_177 >> (24)) | ((IMG_UINT64)((_ft_).u32_178 & 0x000fffffU ) << (8)))
+/*
+VCE3 opened page3 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W0_WOFF (176U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W1_WOFF (177U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_176 = (((_ft_).u32_176 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_177 = (((_ft_).u32_177 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_176 >> (28)) | ((IMG_UINT64)((_ft_).u32_177 & 0x00ffffffU ) << (4)))
+/*
+VCE3 opened page3 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_WOFF (176U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_176 = (((_ft_).u32_176 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_176 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE3 opened page3 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_WOFF (172U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_172 = (((_ft_).u32_172 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_172 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE3 opened page2 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_WOFF (171U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_171 = (((_ft_).u32_171 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_171 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE3 opened page2 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W0_WOFF (170U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W1_WOFF (171U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_170 = (((_ft_).u32_170 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_171 = (((_ft_).u32_171 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_170 >> (20)) | ((IMG_UINT64)((_ft_).u32_171 & 0x0000ffffU ) << (12)))
+/*
+VCE3 opened page2 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W0_WOFF (169U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W1_WOFF (170U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_169 = (((_ft_).u32_169 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_170 = (((_ft_).u32_170 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_169 >> (24)) | ((IMG_UINT64)((_ft_).u32_170 & 0x000fffffU ) << (8)))
+/*
+VCE3 opened page2 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W0_WOFF (168U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W1_WOFF (169U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_168 = (((_ft_).u32_168 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_169 = (((_ft_).u32_169 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_168 >> (28)) | ((IMG_UINT64)((_ft_).u32_169 & 0x00ffffffU ) << (4)))
+/*
+VCE3 opened page2 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_WOFF (168U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_168 = (((_ft_).u32_168 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_168 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE3 opened page2 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_WOFF (164U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_164 = (((_ft_).u32_164 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_164 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE3 opened page1 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_WOFF (163U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_163 = (((_ft_).u32_163 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_163 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE3 opened page1 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W0_WOFF (162U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W1_WOFF (163U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_162 = (((_ft_).u32_162 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_163 = (((_ft_).u32_163 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_162 >> (20)) | ((IMG_UINT64)((_ft_).u32_163 & 0x0000ffffU ) << (12)))
+/*
+VCE3 opened page1 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W0_WOFF (161U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W1_WOFF (162U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_161 = (((_ft_).u32_161 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_162 = (((_ft_).u32_162 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_161 >> (24)) | ((IMG_UINT64)((_ft_).u32_162 & 0x000fffffU ) << (8)))
+/*
+VCE3 opened page1 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W0_WOFF (160U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W1_WOFF (161U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_160 = (((_ft_).u32_160 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_161 = (((_ft_).u32_161 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_160 >> (28)) | ((IMG_UINT64)((_ft_).u32_161 & 0x00ffffffU ) << (4)))
+/*
+VCE3 opened page1 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_WOFF (160U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_160 = (((_ft_).u32_160 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_160 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE3 opened page1 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_WOFF (156U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_156 = (((_ft_).u32_156 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_156 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE3 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_WOFF (155U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_155 = (((_ft_).u32_155 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_155 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE3 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W0_WOFF (154U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W1_WOFF (155U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_154 = (((_ft_).u32_154 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_155 = (((_ft_).u32_155 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_154 >> (20)) | ((IMG_UINT64)((_ft_).u32_155 & 0x0000ffffU ) << (12)))
+/*
+VCE3 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W0_WOFF (153U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W1_WOFF (154U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_153 = (((_ft_).u32_153 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_154 = (((_ft_).u32_154 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_153 >> (24)) | ((IMG_UINT64)((_ft_).u32_154 & 0x000fffffU ) << (8)))
+/*
+VCE3 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W0_WOFF (152U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W1_WOFF (153U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_152 = (((_ft_).u32_152 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_153 = (((_ft_).u32_153 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_152 >> (28)) | ((IMG_UINT64)((_ft_).u32_153 & 0x00ffffffU ) << (4)))
+/*
+VCE3 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_WOFF (152U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_152 = (((_ft_).u32_152 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_152 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE3 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_WOFF (148U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_148 = (((_ft_).u32_148 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_148 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE2 opened page3 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_WOFF (147U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_147 = (((_ft_).u32_147 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_147 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE2 opened page3 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W0_WOFF (146U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W1_WOFF (147U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_146 = (((_ft_).u32_146 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_147 = (((_ft_).u32_147 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_146 >> (20)) | ((IMG_UINT64)((_ft_).u32_147 & 0x0000ffffU ) << (12)))
+/*
+VCE2 opened page3 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W0_WOFF (145U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W1_WOFF (146U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_145 = (((_ft_).u32_145 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_146 = (((_ft_).u32_146 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_145 >> (24)) | ((IMG_UINT64)((_ft_).u32_146 & 0x000fffffU ) << (8)))
+/*
+VCE2 opened page3 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W0_WOFF (144U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W1_WOFF (145U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_144 = (((_ft_).u32_144 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_145 = (((_ft_).u32_145 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_144 >> (28)) | ((IMG_UINT64)((_ft_).u32_145 & 0x00ffffffU ) << (4)))
+/*
+VCE2 opened page3 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_WOFF (144U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_144 = (((_ft_).u32_144 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_144 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE2 opened page3 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_WOFF (140U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_140 = (((_ft_).u32_140 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_140 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE2 opened page2 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_WOFF (139U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_139 = (((_ft_).u32_139 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_139 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE2 opened page2 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W0_WOFF (138U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W1_WOFF (139U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_138 = (((_ft_).u32_138 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_139 = (((_ft_).u32_139 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_138 >> (20)) | ((IMG_UINT64)((_ft_).u32_139 & 0x0000ffffU ) << (12)))
+/*
+VCE2 opened page2 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W0_WOFF (137U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W1_WOFF (138U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_137 = (((_ft_).u32_137 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_138 = (((_ft_).u32_138 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_137 >> (24)) | ((IMG_UINT64)((_ft_).u32_138 & 0x000fffffU ) << (8)))
+/*
+VCE2 opened page2 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W0_WOFF (136U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W1_WOFF (137U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_136 = (((_ft_).u32_136 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_137 = (((_ft_).u32_137 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_136 >> (28)) | ((IMG_UINT64)((_ft_).u32_137 & 0x00ffffffU ) << (4)))
+/*
+VCE2 opened page2 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_WOFF (136U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_136 = (((_ft_).u32_136 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_136 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE2 opened page2 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_WOFF (132U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_132 = (((_ft_).u32_132 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_132 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE2 opened page1 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_WOFF (131U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_131 = (((_ft_).u32_131 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_131 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE2 opened page1 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W0_WOFF (130U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W1_WOFF (131U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_130 = (((_ft_).u32_130 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_131 = (((_ft_).u32_131 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_130 >> (20)) | ((IMG_UINT64)((_ft_).u32_131 & 0x0000ffffU ) << (12)))
+/*
+VCE2 opened page1 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W0_WOFF (129U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W1_WOFF (130U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_129 = (((_ft_).u32_129 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_130 = (((_ft_).u32_130 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_129 >> (24)) | ((IMG_UINT64)((_ft_).u32_130 & 0x000fffffU ) << (8)))
+/*
+VCE2 opened page1 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W0_WOFF (128U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W1_WOFF (129U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_128 = (((_ft_).u32_128 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_129 = (((_ft_).u32_129 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_128 >> (28)) | ((IMG_UINT64)((_ft_).u32_129 & 0x00ffffffU ) << (4)))
+/*
+VCE2 opened page1 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_WOFF (128U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_128 = (((_ft_).u32_128 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_128 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE2 opened page1 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_WOFF (124U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_124 = (((_ft_).u32_124 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_124 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE2 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_WOFF (123U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_123 = (((_ft_).u32_123 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_123 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE2 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W0_WOFF (122U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W1_WOFF (123U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_122 = (((_ft_).u32_122 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_123 = (((_ft_).u32_123 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_122 >> (20)) | ((IMG_UINT64)((_ft_).u32_123 & 0x0000ffffU ) << (12)))
+/*
+VCE2 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W0_WOFF (121U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W1_WOFF (122U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_121 = (((_ft_).u32_121 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_122 = (((_ft_).u32_122 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_121 >> (24)) | ((IMG_UINT64)((_ft_).u32_122 & 0x000fffffU ) << (8)))
+/*
+VCE2 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W0_WOFF (120U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W1_WOFF (121U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_120 = (((_ft_).u32_120 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_121 = (((_ft_).u32_121 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_120 >> (28)) | ((IMG_UINT64)((_ft_).u32_121 & 0x00ffffffU ) << (4)))
+/*
+VCE2 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_WOFF (120U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_120 = (((_ft_).u32_120 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_120 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE2 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_WOFF (116U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_116 = (((_ft_).u32_116 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_116 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE1 opened page3 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_WOFF (115U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_115 = (((_ft_).u32_115 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_115 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE1 opened page3 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W0_WOFF (114U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W1_WOFF (115U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_114 = (((_ft_).u32_114 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_115 = (((_ft_).u32_115 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_114 >> (20)) | ((IMG_UINT64)((_ft_).u32_115 & 0x0000ffffU ) << (12)))
+/*
+VCE1 opened page3 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W0_WOFF (113U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W1_WOFF (114U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_113 = (((_ft_).u32_113 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_114 = (((_ft_).u32_114 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_113 >> (24)) | ((IMG_UINT64)((_ft_).u32_114 & 0x000fffffU ) << (8)))
+/*
+VCE1 opened page3 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W0_WOFF (112U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W1_WOFF (113U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_112 = (((_ft_).u32_112 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_113 = (((_ft_).u32_113 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_112 >> (28)) | ((IMG_UINT64)((_ft_).u32_113 & 0x00ffffffU ) << (4)))
+/*
+VCE1 opened page3 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_WOFF (112U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_112 = (((_ft_).u32_112 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_112 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE1 opened page3 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_WOFF (108U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_108 = (((_ft_).u32_108 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_108 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE1 opened page2 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_WOFF (107U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_107 = (((_ft_).u32_107 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_107 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE1 opened page2 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W0_WOFF (106U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W1_WOFF (107U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_106 = (((_ft_).u32_106 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_107 = (((_ft_).u32_107 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_106 >> (20)) | ((IMG_UINT64)((_ft_).u32_107 & 0x0000ffffU ) << (12)))
+/*
+VCE1 opened page2 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W0_WOFF (105U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W1_WOFF (106U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_105 = (((_ft_).u32_105 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_106 = (((_ft_).u32_106 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_105 >> (24)) | ((IMG_UINT64)((_ft_).u32_106 & 0x000fffffU ) << (8)))
+/*
+VCE1 opened page2 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W0_WOFF (104U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W1_WOFF (105U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_104 = (((_ft_).u32_104 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_105 = (((_ft_).u32_105 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_104 >> (28)) | ((IMG_UINT64)((_ft_).u32_105 & 0x00ffffffU ) << (4)))
+/*
+VCE1 opened page2 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_WOFF (104U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_104 = (((_ft_).u32_104 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_104 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE1 opened page2 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_WOFF (100U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_100 = (((_ft_).u32_100 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_100 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE1 opened page1 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_WOFF (99U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_99 = (((_ft_).u32_99 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_99 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE1 opened page1 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W0_WOFF (98U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W1_WOFF (99U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_98 = (((_ft_).u32_98 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_99 = (((_ft_).u32_99 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_98 >> (20)) | ((IMG_UINT64)((_ft_).u32_99 & 0x0000ffffU ) << (12)))
+/*
+VCE1 opened page1 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W0_WOFF (97U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W1_WOFF (98U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_97 = (((_ft_).u32_97 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_98 = (((_ft_).u32_98 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_97 >> (24)) | ((IMG_UINT64)((_ft_).u32_98 & 0x000fffffU ) << (8)))
+/*
+VCE1 opened page1 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W0_WOFF (96U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W1_WOFF (97U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_96 = (((_ft_).u32_96 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_97 = (((_ft_).u32_97 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_96 >> (28)) | ((IMG_UINT64)((_ft_).u32_97 & 0x00ffffffU ) << (4)))
+/*
+VCE1 opened page1 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_WOFF (96U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_96 = (((_ft_).u32_96 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_96 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE1 opened page1 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_WOFF (92U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_92 = (((_ft_).u32_92 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_92 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE1 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_WOFF (91U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_91 = (((_ft_).u32_91 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_91 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE1 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W0_WOFF (90U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W1_WOFF (91U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_90 = (((_ft_).u32_90 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_91 = (((_ft_).u32_91 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_90 >> (20)) | ((IMG_UINT64)((_ft_).u32_91 & 0x0000ffffU ) << (12)))
+/*
+VCE1 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W0_WOFF (89U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W1_WOFF (90U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_89 = (((_ft_).u32_89 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_90 = (((_ft_).u32_90 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_89 >> (24)) | ((IMG_UINT64)((_ft_).u32_90 & 0x000fffffU ) << (8)))
+/*
+VCE1 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W0_WOFF (88U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W1_WOFF (89U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_88 = (((_ft_).u32_88 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_89 = (((_ft_).u32_89 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_88 >> (28)) | ((IMG_UINT64)((_ft_).u32_89 & 0x00ffffffU ) << (4)))
+/*
+VCE1 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_WOFF (88U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_88 = (((_ft_).u32_88 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_88 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE1 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_WOFF (84U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_84 = (((_ft_).u32_84 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_84 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE0 opened page3 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_WOFF (83U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_UFSTACK(_ft_,_x_) ((_ft_).u32_83 = (((_ft_).u32_83 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_UFSTACK(_ft_) (((_ft_).u32_83 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE0 opened page3 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W0_WOFF (82U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W1_WOFF (83U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PPAGE3(_ft_,_x_) { ((_ft_).u32_82 = (((_ft_).u32_82 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_83 = (((_ft_).u32_83 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PPAGE3(_ft_) (((_ft_).u32_82 >> (20)) | ((IMG_UINT64)((_ft_).u32_83 & 0x0000ffffU ) << (12)))
+/*
+VCE0 opened page3 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W0_WOFF (81U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W1_WOFF (82U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PPAGE2(_ft_,_x_) { ((_ft_).u32_81 = (((_ft_).u32_81 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_82 = (((_ft_).u32_82 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PPAGE2(_ft_) (((_ft_).u32_81 >> (24)) | ((IMG_UINT64)((_ft_).u32_82 & 0x000fffffU ) << (8)))
+/*
+VCE0 opened page3 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W0_WOFF (80U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W1_WOFF (81U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PPAGE1(_ft_,_x_) { ((_ft_).u32_80 = (((_ft_).u32_80 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_81 = (((_ft_).u32_81 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PPAGE1(_ft_) (((_ft_).u32_80 >> (28)) | ((IMG_UINT64)((_ft_).u32_81 & 0x00ffffffU ) << (4)))
+/*
+VCE0 opened page3 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_WOFF (80U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PPAGE0(_ft_,_x_) ((_ft_).u32_80 = (((_ft_).u32_80 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PPAGE0(_ft_) (((_ft_).u32_80 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE0 opened page3 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_WOFF (76U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT3_PAGE(_ft_,_x_) ((_ft_).u32_76 = (((_ft_).u32_76 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT3_PAGE(_ft_) (((_ft_).u32_76 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT3_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE0 opened page2 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_WOFF (75U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_UFSTACK(_ft_,_x_) ((_ft_).u32_75 = (((_ft_).u32_75 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_UFSTACK(_ft_) (((_ft_).u32_75 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE0 opened page2 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W0_WOFF (74U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W1_WOFF (75U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PPAGE3(_ft_,_x_) { ((_ft_).u32_74 = (((_ft_).u32_74 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_75 = (((_ft_).u32_75 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PPAGE3(_ft_) (((_ft_).u32_74 >> (20)) | ((IMG_UINT64)((_ft_).u32_75 & 0x0000ffffU ) << (12)))
+/*
+VCE0 opened page2 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W0_WOFF (73U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W1_WOFF (74U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PPAGE2(_ft_,_x_) { ((_ft_).u32_73 = (((_ft_).u32_73 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_74 = (((_ft_).u32_74 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PPAGE2(_ft_) (((_ft_).u32_73 >> (24)) | ((IMG_UINT64)((_ft_).u32_74 & 0x000fffffU ) << (8)))
+/*
+VCE0 opened page2 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W0_WOFF (72U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W1_WOFF (73U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PPAGE1(_ft_,_x_) { ((_ft_).u32_72 = (((_ft_).u32_72 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_73 = (((_ft_).u32_73 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PPAGE1(_ft_) (((_ft_).u32_72 >> (28)) | ((IMG_UINT64)((_ft_).u32_73 & 0x00ffffffU ) << (4)))
+/*
+VCE0 opened page2 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_WOFF (72U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PPAGE0(_ft_,_x_) ((_ft_).u32_72 = (((_ft_).u32_72 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PPAGE0(_ft_) (((_ft_).u32_72 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE0 opened page2 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_WOFF (68U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT2_PAGE(_ft_,_x_) ((_ft_).u32_68 = (((_ft_).u32_68 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT2_PAGE(_ft_) (((_ft_).u32_68 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT2_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE0 opened page1 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_WOFF (67U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_UFSTACK(_ft_,_x_) ((_ft_).u32_67 = (((_ft_).u32_67 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_UFSTACK(_ft_) (((_ft_).u32_67 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE0 opened page1 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W0_WOFF (66U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W1_WOFF (67U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PPAGE3(_ft_,_x_) { ((_ft_).u32_66 = (((_ft_).u32_66 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_67 = (((_ft_).u32_67 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PPAGE3(_ft_) (((_ft_).u32_66 >> (20)) | ((IMG_UINT64)((_ft_).u32_67 & 0x0000ffffU ) << (12)))
+/*
+VCE0 opened page1 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W0_WOFF (65U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W1_WOFF (66U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PPAGE2(_ft_,_x_) { ((_ft_).u32_65 = (((_ft_).u32_65 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_66 = (((_ft_).u32_66 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PPAGE2(_ft_) (((_ft_).u32_65 >> (24)) | ((IMG_UINT64)((_ft_).u32_66 & 0x000fffffU ) << (8)))
+/*
+VCE0 opened page1 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W0_WOFF (64U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W1_WOFF (65U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PPAGE1(_ft_,_x_) { ((_ft_).u32_64 = (((_ft_).u32_64 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_65 = (((_ft_).u32_65 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PPAGE1(_ft_) (((_ft_).u32_64 >> (28)) | ((IMG_UINT64)((_ft_).u32_65 & 0x00ffffffU ) << (4)))
+/*
+VCE0 opened page1 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_WOFF (64U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PPAGE0(_ft_,_x_) ((_ft_).u32_64 = (((_ft_).u32_64 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PPAGE0(_ft_) (((_ft_).u32_64 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE0 opened page1 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_WOFF (60U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT1_PAGE(_ft_,_x_) ((_ft_).u32_60 = (((_ft_).u32_60 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT1_PAGE(_ft_) (((_ft_).u32_60 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT1_PAGE_SHIFT)) & 0x000fffffU)
+/*
+VCE0 opened page0 struct: unified stack bit
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_WOFF (59U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_CLRMSK (0xFFFEFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_UFSTACK(_ft_,_x_) ((_ft_).u32_59 = (((_ft_).u32_59 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_UFSTACK(_ft_) (((_ft_).u32_59 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_UFSTACK_SHIFT)) & 0x00000001U)
+/*
+VCE0 opened page0 struct: physical page 3
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W0_WOFF (58U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W1_WOFF (59U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W0_SHIFT (20U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W0_CLRMSK (0x000FFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W1_CLRMSK (0xFFFF0000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PPAGE3(_ft_,_x_) { ((_ft_).u32_58 = (((_ft_).u32_58 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000000000fff))) << 20))); \
+ ((_ft_).u32_59 = (((_ft_).u32_59 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE3_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffff000))) >> 12))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PPAGE3(_ft_) (((_ft_).u32_58 >> (20)) | ((IMG_UINT64)((_ft_).u32_59 & 0x0000ffffU ) << (12)))
+/*
+VCE0 opened page0 struct: physical page 2
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W0_WOFF (57U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W1_WOFF (58U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W0_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W0_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PPAGE2(_ft_,_x_) { ((_ft_).u32_57 = (((_ft_).u32_57 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000000000ff))) << 24))); \
+ ((_ft_).u32_58 = (((_ft_).u32_58 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE2_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffff00))) >> 8))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PPAGE2(_ft_) (((_ft_).u32_57 >> (24)) | ((IMG_UINT64)((_ft_).u32_58 & 0x000fffffU ) << (8)))
+/*
+VCE0 opened page0 struct: physical page 1
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W0_WOFF (56U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W1_WOFF (57U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W0_SHIFT (28U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W0_CLRMSK (0x0FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W1_CLRMSK (0xFF000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PPAGE1(_ft_,_x_) { ((_ft_).u32_56 = (((_ft_).u32_56 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000000000f))) << 28))); \
+ ((_ft_).u32_57 = (((_ft_).u32_57 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE1_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000ffffff0))) >> 4))); }
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PPAGE1(_ft_) (((_ft_).u32_56 >> (28)) | ((IMG_UINT64)((_ft_).u32_57 & 0x00ffffffU ) << (4)))
+/*
+VCE0 opened page0 struct: physical page 0
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_WOFF (56U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_CLRMSK (0xF0000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PPAGE0(_ft_,_x_) ((_ft_).u32_56 = (((_ft_).u32_56 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PPAGE0(_ft_) (((_ft_).u32_56 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PPAGE0_SHIFT)) & 0x0fffffffU)
+/*
+VCE0 opened page0 struct: virtual page number
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_WOFF (52U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT0_PAGE(_ft_,_x_) ((_ft_).u32_52 = (((_ft_).u32_52 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT0_PAGE(_ft_) (((_ft_).u32_52 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT0_PAGE_SHIFT)) & 0x000fffffU)
+/*
+Rsv2 area
+*/
+#define PM_DATA_VHEAP_BUFFER_RESV2_W0_WOFF (42U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W1_WOFF (43U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W2_WOFF (44U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W2_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W3_WOFF (45U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W3_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W4_WOFF (46U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W4_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W5_WOFF (47U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W5_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W6_WOFF (48U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W6_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W7_WOFF (49U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W7_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W8_WOFF (50U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W8_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W9_WOFF (51U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W9_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W0_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W1_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W2_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W3_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W4_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W5_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W6_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W7_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W8_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV2_W9_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W0(_ft_,_x_) ((_ft_).u32_42 = (((_ft_).u32_42 & PM_DATA_VHEAP_BUFFER_RESV2_W0_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W0_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W0(_ft_) (((_ft_).u32_42 >> (PM_DATA_VHEAP_BUFFER_RESV2_W0_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W1(_ft_,_x_) ((_ft_).u32_43 = (((_ft_).u32_43 & PM_DATA_VHEAP_BUFFER_RESV2_W1_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W1_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W1(_ft_) (((_ft_).u32_43 >> (PM_DATA_VHEAP_BUFFER_RESV2_W1_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W2(_ft_,_x_) ((_ft_).u32_44 = (((_ft_).u32_44 & PM_DATA_VHEAP_BUFFER_RESV2_W2_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W2_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W2(_ft_) (((_ft_).u32_44 >> (PM_DATA_VHEAP_BUFFER_RESV2_W2_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W3(_ft_,_x_) ((_ft_).u32_45 = (((_ft_).u32_45 & PM_DATA_VHEAP_BUFFER_RESV2_W3_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W3_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W3(_ft_) (((_ft_).u32_45 >> (PM_DATA_VHEAP_BUFFER_RESV2_W3_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W4(_ft_,_x_) ((_ft_).u32_46 = (((_ft_).u32_46 & PM_DATA_VHEAP_BUFFER_RESV2_W4_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W4_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W4(_ft_) (((_ft_).u32_46 >> (PM_DATA_VHEAP_BUFFER_RESV2_W4_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W5(_ft_,_x_) ((_ft_).u32_47 = (((_ft_).u32_47 & PM_DATA_VHEAP_BUFFER_RESV2_W5_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W5_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W5(_ft_) (((_ft_).u32_47 >> (PM_DATA_VHEAP_BUFFER_RESV2_W5_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W6(_ft_,_x_) ((_ft_).u32_48 = (((_ft_).u32_48 & PM_DATA_VHEAP_BUFFER_RESV2_W6_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W6_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W6(_ft_) (((_ft_).u32_48 >> (PM_DATA_VHEAP_BUFFER_RESV2_W6_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W7(_ft_,_x_) ((_ft_).u32_49 = (((_ft_).u32_49 & PM_DATA_VHEAP_BUFFER_RESV2_W7_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W7_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W7(_ft_) (((_ft_).u32_49 >> (PM_DATA_VHEAP_BUFFER_RESV2_W7_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W8(_ft_,_x_) ((_ft_).u32_50 = (((_ft_).u32_50 & PM_DATA_VHEAP_BUFFER_RESV2_W8_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W8_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W8(_ft_) (((_ft_).u32_50 >> (PM_DATA_VHEAP_BUFFER_RESV2_W8_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV2_W9(_ft_,_x_) ((_ft_).u32_51 = (((_ft_).u32_51 & PM_DATA_VHEAP_BUFFER_RESV2_W9_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV2_W9_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV2_W9(_ft_) (((_ft_).u32_51 >> (PM_DATA_VHEAP_BUFFER_RESV2_W9_SHIFT)) & 0xffffffffU)
+/*
+Number of pages allocated to TE7 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_WOFF (41U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE7_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE7_INFLIGHT_CNT(_ft_) (((_ft_).u32_41 >> (PM_DATA_VHEAP_BUFFER_TE7_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to TE6 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_WOFF (41U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_CLRMSK (0xFF00FFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE6_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE6_INFLIGHT_CNT(_ft_) (((_ft_).u32_41 >> (PM_DATA_VHEAP_BUFFER_TE6_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to TE5 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_WOFF (41U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_SHIFT (8U)
+#define PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_CLRMSK (0xFFFF00FFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE5_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE5_INFLIGHT_CNT(_ft_) (((_ft_).u32_41 >> (PM_DATA_VHEAP_BUFFER_TE5_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to TE4 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_WOFF (41U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_CLRMSK (0xFFFFFF00U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE4_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE4_INFLIGHT_CNT(_ft_) (((_ft_).u32_41 >> (PM_DATA_VHEAP_BUFFER_TE4_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to TE3 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_WOFF (40U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE3_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE3_INFLIGHT_CNT(_ft_) (((_ft_).u32_40 >> (PM_DATA_VHEAP_BUFFER_TE3_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to TE2 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_WOFF (40U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_CLRMSK (0xFF00FFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE2_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE2_INFLIGHT_CNT(_ft_) (((_ft_).u32_40 >> (PM_DATA_VHEAP_BUFFER_TE2_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to TE1 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_WOFF (40U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_SHIFT (8U)
+#define PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_CLRMSK (0xFFFF00FFU)
+#define PM_DATA_VHEAP_BUFFER_SET_TE1_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE1_INFLIGHT_CNT(_ft_) (((_ft_).u32_40 >> (PM_DATA_VHEAP_BUFFER_TE1_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to TE0 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_WOFF (40U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_CLRMSK (0xFFFFFF00U)
+#define PM_DATA_VHEAP_BUFFER_SET_TE0_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_TE0_INFLIGHT_CNT(_ft_) (((_ft_).u32_40 >> (PM_DATA_VHEAP_BUFFER_TE0_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to VCE7 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_WOFF (39U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE7_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_39 = (((_ft_).u32_39 & PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE7_INFLIGHT_CNT(_ft_) (((_ft_).u32_39 >> (PM_DATA_VHEAP_BUFFER_VCE7_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to VCE6 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_WOFF (39U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_CLRMSK (0xFF00FFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE6_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_39 = (((_ft_).u32_39 & PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE6_INFLIGHT_CNT(_ft_) (((_ft_).u32_39 >> (PM_DATA_VHEAP_BUFFER_VCE6_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to VCE5 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_WOFF (39U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_SHIFT (8U)
+#define PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_CLRMSK (0xFFFF00FFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE5_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_39 = (((_ft_).u32_39 & PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE5_INFLIGHT_CNT(_ft_) (((_ft_).u32_39 >> (PM_DATA_VHEAP_BUFFER_VCE5_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to VCE4 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_WOFF (39U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_CLRMSK (0xFFFFFF00U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE4_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_39 = (((_ft_).u32_39 & PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE4_INFLIGHT_CNT(_ft_) (((_ft_).u32_39 >> (PM_DATA_VHEAP_BUFFER_VCE4_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to VCE3 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_WOFF (38U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_SHIFT (24U)
+#define PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_CLRMSK (0x00FFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE3_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_38 = (((_ft_).u32_38 & PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE3_INFLIGHT_CNT(_ft_) (((_ft_).u32_38 >> (PM_DATA_VHEAP_BUFFER_VCE3_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to VCE2 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_WOFF (38U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_SHIFT (16U)
+#define PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_CLRMSK (0xFF00FFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE2_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_38 = (((_ft_).u32_38 & PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE2_INFLIGHT_CNT(_ft_) (((_ft_).u32_38 >> (PM_DATA_VHEAP_BUFFER_VCE2_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to VCE1 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_WOFF (38U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_SHIFT (8U)
+#define PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_CLRMSK (0xFFFF00FFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE1_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_38 = (((_ft_).u32_38 & PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE1_INFLIGHT_CNT(_ft_) (((_ft_).u32_38 >> (PM_DATA_VHEAP_BUFFER_VCE1_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+Number of pages allocated to VCE0 but not yet closed
+*/
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_WOFF (38U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_CLRMSK (0xFFFFFF00U)
+#define PM_DATA_VHEAP_BUFFER_SET_VCE0_INFLIGHT_CNT(_ft_,_x_) ((_ft_).u32_38 = (((_ft_).u32_38 & PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_CLRMSK ) | (((_x_) & (0x000000ffU)) << (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VCE0_INFLIGHT_CNT(_ft_) (((_ft_).u32_38 >> (PM_DATA_VHEAP_BUFFER_VCE0_INFLIGHT_CNT_SHIFT)) & 0x000000ffU)
+/*
+1=The PM ran out of memory during processing
+*/
+#define PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_WOFF (37U)
+#define PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_PM_OUTOFMEM_R(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_PM_OUTOFMEM_R(_ft_) (((_ft_).u32_37 >> (PM_DATA_VHEAP_BUFFER_PM_OUTOFMEM_R_SHIFT)) & 0x00000001U)
+/*
+A copy of rgx_cr_pm_outofmem_abortall (at the point the VHEAP buffer was written)
+*/
+#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_WOFF (37U)
+#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_SHIFT (30U)
+#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_CLRMSK (0xBFFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_OUTOFMEM_ABORT(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_OUTOFMEM_ABORT(_ft_) (((_ft_).u32_37 >> (PM_DATA_VHEAP_BUFFER_OUTOFMEM_ABORT_SHIFT)) & 0x00000001U)
+/*
+When running out of memory, indicates which of the free stacks have run out of memory.
+If bit 2 is set, MMUSTACK has run out of memory.
+Bit 2 is reserved.
+If bit 1 is set, UFSTACK has run out of memory.
+If bit 0 is set, FSTACK has run out of memory.
+*/
+#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_WOFF (37U)
+#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_SHIFT (2U)
+#define PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_CLRMSK (0xFFFFFFE3U)
+#define PM_DATA_VHEAP_BUFFER_SET_OUTOFMEM_SRC(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_CLRMSK ) | (((_x_) & (0x00000007U)) << (PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_OUTOFMEM_SRC(_ft_) (((_ft_).u32_37 >> (PM_DATA_VHEAP_BUFFER_OUTOFMEM_SRC_SHIFT)) & 0x00000007U)
+/*
+When running out of memory, indicates the source of the request that caused the OOM event
+If bit 1 is set, TE caused the OOM.
+If bit 0 is set, VCE caused the OOM.
+*/
+#define PM_DATA_VHEAP_BUFFER_REQ_SRC_WOFF (37U)
+#define PM_DATA_VHEAP_BUFFER_REQ_SRC_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_REQ_SRC_CLRMSK (0xFFFFFFFCU)
+#define PM_DATA_VHEAP_BUFFER_SET_REQ_SRC(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & PM_DATA_VHEAP_BUFFER_REQ_SRC_CLRMSK ) | (((_x_) & (0x00000003U)) << (PM_DATA_VHEAP_BUFFER_REQ_SRC_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_REQ_SRC(_ft_) (((_ft_).u32_37 >> (PM_DATA_VHEAP_BUFFER_REQ_SRC_SHIFT)) & 0x00000003U)
+/*
+MAX RTA index dword in TA stream
+*/
+#define PM_DATA_VHEAP_BUFFER_MAX_RTA_WOFF (36U)
+#define PM_DATA_VHEAP_BUFFER_MAX_RTA_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_MAX_RTA_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_MAX_RTA(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & PM_DATA_VHEAP_BUFFER_MAX_RTA_CLRMSK ) | (((_x_) & (0xffffffffU)) << (PM_DATA_VHEAP_BUFFER_MAX_RTA_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_MAX_RTA(_ft_) (((_ft_).u32_36 >> (PM_DATA_VHEAP_BUFFER_MAX_RTA_SHIFT)) & 0xffffffffU)
+/*
+Rsv1 area
+*/
+#define PM_DATA_VHEAP_BUFFER_RESV1_W0_WOFF (20U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W1_WOFF (21U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W2_WOFF (22U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W2_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W3_WOFF (23U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W3_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W4_WOFF (24U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W4_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W5_WOFF (25U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W5_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W6_WOFF (26U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W6_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W7_WOFF (27U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W7_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W8_WOFF (28U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W8_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W9_WOFF (29U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W9_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W10_WOFF (30U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W10_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W11_WOFF (31U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W11_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W12_WOFF (32U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W12_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W13_WOFF (33U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W13_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W14_WOFF (34U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W14_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W15_WOFF (35U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W15_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W0_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W1_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W2_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W3_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W4_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W5_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W6_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W7_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W8_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W9_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W10_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W11_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W12_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W13_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W14_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_RESV1_W15_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W0(_ft_,_x_) ((_ft_).u32_20 = (((_ft_).u32_20 & PM_DATA_VHEAP_BUFFER_RESV1_W0_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W0_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W0(_ft_) (((_ft_).u32_20 >> (PM_DATA_VHEAP_BUFFER_RESV1_W0_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W1(_ft_,_x_) ((_ft_).u32_21 = (((_ft_).u32_21 & PM_DATA_VHEAP_BUFFER_RESV1_W1_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W1_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W1(_ft_) (((_ft_).u32_21 >> (PM_DATA_VHEAP_BUFFER_RESV1_W1_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W2(_ft_,_x_) ((_ft_).u32_22 = (((_ft_).u32_22 & PM_DATA_VHEAP_BUFFER_RESV1_W2_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W2_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W2(_ft_) (((_ft_).u32_22 >> (PM_DATA_VHEAP_BUFFER_RESV1_W2_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W3(_ft_,_x_) ((_ft_).u32_23 = (((_ft_).u32_23 & PM_DATA_VHEAP_BUFFER_RESV1_W3_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W3_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W3(_ft_) (((_ft_).u32_23 >> (PM_DATA_VHEAP_BUFFER_RESV1_W3_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W4(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & PM_DATA_VHEAP_BUFFER_RESV1_W4_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W4_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W4(_ft_) (((_ft_).u32_24 >> (PM_DATA_VHEAP_BUFFER_RESV1_W4_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W5(_ft_,_x_) ((_ft_).u32_25 = (((_ft_).u32_25 & PM_DATA_VHEAP_BUFFER_RESV1_W5_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W5_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W5(_ft_) (((_ft_).u32_25 >> (PM_DATA_VHEAP_BUFFER_RESV1_W5_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W6(_ft_,_x_) ((_ft_).u32_26 = (((_ft_).u32_26 & PM_DATA_VHEAP_BUFFER_RESV1_W6_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W6_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W6(_ft_) (((_ft_).u32_26 >> (PM_DATA_VHEAP_BUFFER_RESV1_W6_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W7(_ft_,_x_) ((_ft_).u32_27 = (((_ft_).u32_27 & PM_DATA_VHEAP_BUFFER_RESV1_W7_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W7_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W7(_ft_) (((_ft_).u32_27 >> (PM_DATA_VHEAP_BUFFER_RESV1_W7_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W8(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & PM_DATA_VHEAP_BUFFER_RESV1_W8_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W8_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W8(_ft_) (((_ft_).u32_28 >> (PM_DATA_VHEAP_BUFFER_RESV1_W8_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W9(_ft_,_x_) ((_ft_).u32_29 = (((_ft_).u32_29 & PM_DATA_VHEAP_BUFFER_RESV1_W9_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W9_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W9(_ft_) (((_ft_).u32_29 >> (PM_DATA_VHEAP_BUFFER_RESV1_W9_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W10(_ft_,_x_) ((_ft_).u32_30 = (((_ft_).u32_30 & PM_DATA_VHEAP_BUFFER_RESV1_W10_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W10_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W10(_ft_) (((_ft_).u32_30 >> (PM_DATA_VHEAP_BUFFER_RESV1_W10_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W11(_ft_,_x_) ((_ft_).u32_31 = (((_ft_).u32_31 & PM_DATA_VHEAP_BUFFER_RESV1_W11_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W11_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W11(_ft_) (((_ft_).u32_31 >> (PM_DATA_VHEAP_BUFFER_RESV1_W11_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W12(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & PM_DATA_VHEAP_BUFFER_RESV1_W12_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W12_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W12(_ft_) (((_ft_).u32_32 >> (PM_DATA_VHEAP_BUFFER_RESV1_W12_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W13(_ft_,_x_) ((_ft_).u32_33 = (((_ft_).u32_33 & PM_DATA_VHEAP_BUFFER_RESV1_W13_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W13_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W13(_ft_) (((_ft_).u32_33 >> (PM_DATA_VHEAP_BUFFER_RESV1_W13_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W14(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & PM_DATA_VHEAP_BUFFER_RESV1_W14_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W14_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W14(_ft_) (((_ft_).u32_34 >> (PM_DATA_VHEAP_BUFFER_RESV1_W14_SHIFT)) & 0xffffffffU)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV1_W15(_ft_,_x_) ((_ft_).u32_35 = (((_ft_).u32_35 & PM_DATA_VHEAP_BUFFER_RESV1_W15_CLRMSK ) | (((_x_) & (0xffffffffU)) << PM_DATA_VHEAP_BUFFER_RESV1_W15_SHIFT)))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV1_W15(_ft_) (((_ft_).u32_35 >> (PM_DATA_VHEAP_BUFFER_RESV1_W15_SHIFT)) & 0xffffffffU)
+/*
+Rsv0 area
+*/
+#define PM_DATA_VHEAP_BUFFER_RESV0_WOFF (19U)
+#define PM_DATA_VHEAP_BUFFER_RESV0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_RESV0_CLRMSK (0x00000000U)
+#define PM_DATA_VHEAP_BUFFER_SET_RESV0(_ft_,_x_) ((_ft_).u32_19 = (((_ft_).u32_19 & PM_DATA_VHEAP_BUFFER_RESV0_CLRMSK ) | (((_x_) & (0xffffffffU)) << (PM_DATA_VHEAP_BUFFER_RESV0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_RESV0(_ft_) (((_ft_).u32_19 >> (PM_DATA_VHEAP_BUFFER_RESV0_SHIFT)) & 0xffffffffU)
+/*
+Init Bit Sent Flag for TE7
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_WOFF (18U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE7_INIT(_ft_,_x_) ((_ft_).u32_18 = (((_ft_).u32_18 & PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE7_INIT(_ft_) (((_ft_).u32_18 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE7_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE7
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_WOFF (18U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE7_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE7(_ft_,_x_) ((_ft_).u32_18 = (((_ft_).u32_18 & PM_DATA_VHEAP_BUFFER_VPTR_TE7_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE7_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE7(_ft_) (((_ft_).u32_18 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE7_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for VCE7
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_WOFF (17U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE7_INIT(_ft_,_x_) ((_ft_).u32_17 = (((_ft_).u32_17 & PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE7_INIT(_ft_) (((_ft_).u32_17 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE7_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE7
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_WOFF (17U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE7_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE7(_ft_,_x_) ((_ft_).u32_17 = (((_ft_).u32_17 & PM_DATA_VHEAP_BUFFER_VPTR_VCE7_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE7_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE7(_ft_) (((_ft_).u32_17 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE7_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for TE6
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_WOFF (16U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE6_INIT(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE6_INIT(_ft_) (((_ft_).u32_16 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE6_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE6
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_WOFF (16U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE6_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE6(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & PM_DATA_VHEAP_BUFFER_VPTR_TE6_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE6_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE6(_ft_) (((_ft_).u32_16 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE6_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for VCE6
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_WOFF (15U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE6_INIT(_ft_,_x_) ((_ft_).u32_15 = (((_ft_).u32_15 & PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE6_INIT(_ft_) (((_ft_).u32_15 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE6_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE6
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_WOFF (15U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE6_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE6(_ft_,_x_) ((_ft_).u32_15 = (((_ft_).u32_15 & PM_DATA_VHEAP_BUFFER_VPTR_VCE6_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE6_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE6(_ft_) (((_ft_).u32_15 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE6_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for TE5
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_WOFF (14U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE5_INIT(_ft_,_x_) ((_ft_).u32_14 = (((_ft_).u32_14 & PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE5_INIT(_ft_) (((_ft_).u32_14 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE5_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE5
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_WOFF (14U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE5_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE5(_ft_,_x_) ((_ft_).u32_14 = (((_ft_).u32_14 & PM_DATA_VHEAP_BUFFER_VPTR_TE5_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE5_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE5(_ft_) (((_ft_).u32_14 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE5_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for VCE5
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_WOFF (13U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE5_INIT(_ft_,_x_) ((_ft_).u32_13 = (((_ft_).u32_13 & PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE5_INIT(_ft_) (((_ft_).u32_13 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE5_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE5
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_WOFF (13U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE5_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE5(_ft_,_x_) ((_ft_).u32_13 = (((_ft_).u32_13 & PM_DATA_VHEAP_BUFFER_VPTR_VCE5_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE5_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE5(_ft_) (((_ft_).u32_13 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE5_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for TE4
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_WOFF (12U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE4_INIT(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE4_INIT(_ft_) (((_ft_).u32_12 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE4_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE4
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_WOFF (12U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE4_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE4(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & PM_DATA_VHEAP_BUFFER_VPTR_TE4_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE4_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE4(_ft_) (((_ft_).u32_12 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE4_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for VCE4
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_WOFF (11U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE4_INIT(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE4_INIT(_ft_) (((_ft_).u32_11 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE4_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE4
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_WOFF (11U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE4_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE4(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & PM_DATA_VHEAP_BUFFER_VPTR_VCE4_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE4_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE4(_ft_) (((_ft_).u32_11 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE4_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for TE3
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_WOFF (10U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE3_INIT(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE3_INIT(_ft_) (((_ft_).u32_10 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE3_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE3
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_WOFF (10U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE3_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE3(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & PM_DATA_VHEAP_BUFFER_VPTR_TE3_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE3_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE3(_ft_) (((_ft_).u32_10 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE3_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for VCE3
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_WOFF (9U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE3_INIT(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE3_INIT(_ft_) (((_ft_).u32_9 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE3_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE3
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_WOFF (9U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE3_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE3(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & PM_DATA_VHEAP_BUFFER_VPTR_VCE3_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE3_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE3(_ft_) (((_ft_).u32_9 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE3_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for TE2
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_WOFF (8U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE2_INIT(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE2_INIT(_ft_) (((_ft_).u32_8 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE2_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE2
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_WOFF (8U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE2_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE2(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & PM_DATA_VHEAP_BUFFER_VPTR_TE2_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE2_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE2(_ft_) (((_ft_).u32_8 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE2_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for VCE2
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_WOFF (7U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE2_INIT(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE2_INIT(_ft_) (((_ft_).u32_7 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE2_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE2
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_WOFF (7U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE2_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE2(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & PM_DATA_VHEAP_BUFFER_VPTR_VCE2_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE2_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE2(_ft_) (((_ft_).u32_7 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE2_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for TE1
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_WOFF (6U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE1_INIT(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE1_INIT(_ft_) (((_ft_).u32_6 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE1_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE1
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_WOFF (6U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE1(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & PM_DATA_VHEAP_BUFFER_VPTR_TE1_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE1_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE1(_ft_) (((_ft_).u32_6 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE1_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for VCE1
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_WOFF (5U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE1_INIT(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE1_INIT(_ft_) (((_ft_).u32_5 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE1_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE1
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_WOFF (5U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE1_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE1(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & PM_DATA_VHEAP_BUFFER_VPTR_VCE1_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE1_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE1(_ft_) (((_ft_).u32_5 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE1_SHIFT)) & 0x000fffffU)
+/*
+4KB aligned top pointer for MMU
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_MMU_WOFF (4U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_MMU_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_MMU_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_MMU(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & PM_DATA_VHEAP_BUFFER_VPTR_MMU_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_MMU_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_MMU(_ft_) (((_ft_).u32_4 >> (PM_DATA_VHEAP_BUFFER_VPTR_MMU_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for ALIST
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_WOFF (3U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_ALIST_INIT(_ft_,_x_) ((_ft_).u32_3 = (((_ft_).u32_3 & PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_ALIST_INIT(_ft_) (((_ft_).u32_3 >> (PM_DATA_VHEAP_BUFFER_VPTR_ALIST_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for ALIST
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_WOFF (3U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_ALIST_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_ALIST(_ft_,_x_) ((_ft_).u32_3 = (((_ft_).u32_3 & PM_DATA_VHEAP_BUFFER_VPTR_ALIST_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_ALIST_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_ALIST(_ft_) (((_ft_).u32_3 >> (PM_DATA_VHEAP_BUFFER_VPTR_ALIST_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for TE0
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_WOFF (1U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE0_INIT(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE0_INIT(_ft_) (((_ft_).u32_1 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE0_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE0
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_WOFF (1U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_TE0_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_TE0(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & PM_DATA_VHEAP_BUFFER_VPTR_TE0_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_TE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_TE0(_ft_) (((_ft_).u32_1 >> (PM_DATA_VHEAP_BUFFER_VPTR_TE0_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for VCE0
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_WOFF (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_SHIFT (31U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_CLRMSK (0x7FFFFFFFU)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE0_INIT(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE0_INIT(_ft_) (((_ft_).u32_0 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE0_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE0
+*/
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_WOFF (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_SHIFT (0U)
+#define PM_DATA_VHEAP_BUFFER_VPTR_VCE0_CLRMSK (0xFFF00000U)
+#define PM_DATA_VHEAP_BUFFER_SET_VPTR_VCE0(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & PM_DATA_VHEAP_BUFFER_VPTR_VCE0_CLRMSK ) | (((_x_) & (0x000fffffU)) << (PM_DATA_VHEAP_BUFFER_VPTR_VCE0_SHIFT))))
+#define PM_DATA_VHEAP_BUFFER_GET_VPTR_VCE0(_ft_) (((_ft_).u32_0 >> (PM_DATA_VHEAP_BUFFER_VPTR_VCE0_SHIFT)) & 0x000fffffU)
+
+
+#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)
+/*
+The PM FreeListState Buffer Layout - this will apply to 3 resources - FSTACK, UFSTACK and MMUSTACK
+*/
+typedef struct RGX_PM_FREELISTSTATE_BUFFER_TAG {
+ IMG_UINT32 u32_0;
+ IMG_UINT32 u32_1;
+ IMG_UINT32 u32_2;
+ IMG_UINT32 u32_3;
+ IMG_UINT32 u32_4;
+ IMG_UINT32 u32_5;
+ IMG_UINT32 u32_6;
+ IMG_UINT32 u32_7;
+} RGX_PM_FREELISTSTATE_BUFFER;
+
+/*
+Reserved field word 2
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_WOFF (7U)
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_2(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT))))
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_2(_ft_) (((_ft_).u32_7 >> (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT)) & 0xffffffffU)
+/*
+Reserved field word 1
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_WOFF (6U)
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_1(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT))))
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_1(_ft_) (((_ft_).u32_6 >> (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT)) & 0xffffffffU)
+/*
+Reserved field word 0
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_WOFF (5U)
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_0(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT))))
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_0(_ft_) (((_ft_).u32_5 >> (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT)) & 0xffffffffU)
+/*
+The number of pages consumed for the MMU Page Table. Must be initialised to zero.
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_WOFF (4U)
+#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_MMUPAGE_STATUS(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT))))
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_MMUPAGE_STATUS(_ft_) (((_ft_).u32_4 >> (RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT)) & 0xffffffffU)
+/*
+The total number of pages consumed from the free stack. Must be initialised to zero. This field is unused in the MMUSTACK.
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_WOFF (3U)
+#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_PAGE_STATUS(_ft_,_x_) ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT))))
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_PAGE_STATUS(_ft_) (((_ft_).u32_3 >> (RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT)) & 0xffffffffU)
+/*
+Stack pointer for the free stack - the location of the next free page relative to the BaseAddr, in number of DWORDs. Must be initialised to zero.
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_WOFF (2U)
+#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_STACK_PTR(_ft_,_x_) ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT))))
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_STACK_PTR(_ft_) (((_ft_).u32_2 >> (RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT)) & 0xffffffffU)
+/*
+Base address of the free stack - points to the bottom of the stack.
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_WOFF (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_SHIFT (5U)
+#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_WOFF (1U)
+#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_SHIFT (5U)
+#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK (0x0000001FU)
+#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000007ffffff))) << 5))); \
+ ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x07fffffff8000000))) >> 27))); }
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_BASE_ADDR(_ft_) (((_ft_).u32_0 >> (5)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (27)))
+#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */
+
+
+#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)
+/*
+The PM FreeListState Buffer Layout - this will apply to 3 resources - FSTACK, UFSTACK and MMUSTACK
+*/
+typedef struct RGX_PM_FREELISTSTATE_BUFFER_TAG {
+ IMG_UINT32 u32_0;
+ IMG_UINT32 u32_1;
+ IMG_UINT32 u32_2;
+ IMG_UINT32 u32_3;
+ IMG_UINT32 u32_4;
+ IMG_UINT32 u32_5;
+ IMG_UINT32 u32_6;
+ IMG_UINT32 u32_7;
+} RGX_PM_FREELISTSTATE_BUFFER;
+
+/*
+Reserved field word 2
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_WOFF (7U)
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_2(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT))))
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_2(_ft_) (((_ft_).u32_7 >> (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_2_SHIFT)) & 0xffffffffU)
+/*
+Reserved field word 1
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_WOFF (6U)
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_1(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT))))
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_1(_ft_) (((_ft_).u32_6 >> (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_1_SHIFT)) & 0xffffffffU)
+/*
+Reserved field word 0
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_WOFF (5U)
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_RSV_STUFF32_0(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT))))
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_RSV_STUFF32_0(_ft_) (((_ft_).u32_5 >> (RGX_PM_FREELISTSTATE_BUFFER_RSV_STUFF32_0_SHIFT)) & 0xffffffffU)
+/*
+The number of pages consumed for the MMU Page Table. Must be initialised to zero.
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_WOFF (4U)
+#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_MMUPAGE_STATUS(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT))))
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_MMUPAGE_STATUS(_ft_) (((_ft_).u32_4 >> (RGX_PM_FREELISTSTATE_BUFFER_MMUPAGE_STATUS_SHIFT)) & 0xffffffffU)
+/*
+The total number of pages consumed from the free stack. Must be initialised to zero. This field is unused in the MMUSTACK.
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_WOFF (3U)
+#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_PAGE_STATUS(_ft_,_x_) ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT))))
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_PAGE_STATUS(_ft_) (((_ft_).u32_3 >> (RGX_PM_FREELISTSTATE_BUFFER_PAGE_STATUS_SHIFT)) & 0xffffffffU)
+/*
+Stack pointer for the free stack - the location of the next free page relative to the BaseAddr, in number of DWORDs. Must be initialised to zero.
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_WOFF (2U)
+#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_STACK_PTR(_ft_,_x_) ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT))))
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_STACK_PTR(_ft_) (((_ft_).u32_2 >> (RGX_PM_FREELISTSTATE_BUFFER_STACK_PTR_SHIFT)) & 0xffffffffU)
+/*
+Base address of the free stack - points to the bottom of the stack.
+*/
+#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_WOFF (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_WOFF (1U)
+#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_SHIFT (0U)
+#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK (0x00000000U)
+#define RGX_PM_FREELISTSTATE_BUFFER_SET_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \
+ ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_FREELISTSTATE_BUFFER_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); }
+#define RGX_PM_FREELISTSTATE_BUFFER_GET_BASE_ADDR(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (32)))
+#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */
+
+
+#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)
+/*
+256-bit granular, lower bits ignored.
+Maximum addressable range supported by hardware is 1 TB.
+*/
+#define RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSHIFT (5U)
+#define RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSIZE (32U)
+#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR (0U)
+#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_LOWER (0U)
+#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_UPPER (68719476735ULL)
+#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */
+
+
+#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)
+/*
+128-bit aligned.
+Maximum addressable range supported by hardware is 1 TB.
+The 40-bit, 16-byte-aligned address is packed into bits 35:0 of the two DWORDs.
+*/
+#define RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSHIFT (4U)
+#define RGX_PM_FREELISTSTATE_BASE_ADDR_ALIGNSIZE (16U)
+#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR (0U)
+#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_LOWER (0U)
+#define RGX_PM_FREELISTSTATE_BASE_ADDR_BASE_ADDR_UPPER (68719476735ULL)
+#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */
+
+
+/*
+Maximum range supported by hardware is 23 bits.
+*/
+#define RGX_PM_FREELISTSTATE_STACK_PTR_STACK_PTR (0U)
+#define RGX_PM_FREELISTSTATE_STACK_PTR_STACK_PTR_LOWER (0U)
+#define RGX_PM_FREELISTSTATE_STACK_PTR_STACK_PTR_UPPER (16777215U)
+
+
+/*
+Maximum range supported by hardware is 23 bits.
+*/
+#define RGX_PM_FREELISTSTATE_PAGE_STATUS_PAGE_STATUS (0U)
+#define RGX_PM_FREELISTSTATE_PAGE_STATUS_PAGE_STATUS_LOWER (0U)
+#define RGX_PM_FREELISTSTATE_PAGE_STATUS_PAGE_STATUS_UPPER (16777215U)
+
+
+/*
+Maximum range supported by hardware is 23 bits.
+*/
+#define RGX_PM_FREELISTSTATE_MMUPAGE_STATUS_MMUPAGE_STATUS (0U)
+#define RGX_PM_FREELISTSTATE_MMUPAGE_STATUS_MMUPAGE_STATUS_LOWER (0U)
+#define RGX_PM_FREELISTSTATE_MMUPAGE_STATUS_MMUPAGE_STATUS_UPPER (16777215U)
+
+
+#if defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE)
+/*
+The PM Render Context Buffer Layout
+*/
+typedef struct RGX_PM_RENDERSTATE_BUFFER_TAG {
+ IMG_UINT32 u32_0;
+ IMG_UINT32 u32_1;
+ IMG_UINT32 u32_2;
+ IMG_UINT32 u32_3;
+ IMG_UINT32 u32_4;
+ IMG_UINT32 u32_5;
+ IMG_UINT32 u32_6;
+ IMG_UINT32 u32_7;
+ IMG_UINT32 u32_8;
+ IMG_UINT32 u32_9;
+ IMG_UINT32 u32_10;
+ IMG_UINT32 u32_11;
+ IMG_UINT32 u32_12;
+ IMG_UINT32 u32_13;
+ IMG_UINT32 u32_14;
+ IMG_UINT32 u32_15;
+ IMG_UINT32 u32_16;
+ IMG_UINT32 u32_17;
+ IMG_UINT32 u32_18;
+ IMG_UINT32 u32_19;
+ IMG_UINT32 u32_20;
+ IMG_UINT32 u32_21;
+ IMG_UINT32 u32_22;
+ IMG_UINT32 u32_23;
+ IMG_UINT32 u32_24;
+ IMG_UINT32 u32_25;
+ IMG_UINT32 u32_26;
+ IMG_UINT32 u32_27;
+ IMG_UINT32 u32_28;
+ IMG_UINT32 u32_29;
+ IMG_UINT32 u32_30;
+ IMG_UINT32 u32_31;
+ IMG_UINT32 u32_32;
+ IMG_UINT32 u32_33;
+ IMG_UINT32 u32_34;
+ IMG_UINT32 u32_35;
+ IMG_UINT32 u32_36;
+ IMG_UINT32 u32_37;
+ IMG_UINT32 u32_38;
+ IMG_UINT32 u32_39;
+ IMG_UINT32 u32_40;
+ IMG_UINT32 u32_41;
+ IMG_UINT32 u32_42;
+ IMG_UINT32 u32_43;
+ IMG_UINT32 u32_44;
+ IMG_UINT32 u32_45;
+} RGX_PM_RENDERSTATE_BUFFER;
+
+/*
+The base address of the Virtual-Physical Page Translation Table.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_WOFF (10U)
+#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_SHIFT (4U)
+#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_WOFF (11U)
+#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_SHIFT (4U)
+#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_CLRMSK (0x0000000FU)
+#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VFP_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffffff))) << 4))); \
+ ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0ffffffff0000000))) >> 28))); }
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VFP_BASE_ADDR(_ft_) (((_ft_).u32_10 >> (4)) | ((IMG_UINT64)((_ft_).u32_11 & 0xffffffffU ) << (28)))
+/*
+A 16-bit macrotile mask indicating which macrotiles have been freed by the ISP.
+A '1' in a bit position indicates that the ISP has signalled that the corresponding macrotile can be freed.
+Only the least-significant 16 bits are valid.
+Only used in the 3D phase.
+Must be initialised to zero.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_WOFF (9U)
+#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MTILEFREE_STATUS(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MTILEFREE_STATUS(_ft_) (((_ft_).u32_9 >> (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)) & 0xffffffffU)
+/*
+A 16-bit macrotile mask indicating which macrotiles have been freed by the PM.
+A '1' in a bit position indicates that the corresponding macrotile has been freed, and its pages released back to the appropriate free stack.
+Only the least-significant 16 bits are valid.
+Only used in the 3D phase.
+Must be initialised to zero.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_WOFF (8U)
+#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_DEALLOC_MASK_STATUS(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_DEALLOC_MASK_STATUS(_ft_) (((_ft_).u32_8 >> (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)) & 0xffffffffU)
+/*
+The base address of the VHEAP buffer.
+Must be initialised to point to the location of the VHEAP buffer in memory.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_WOFF (6U)
+#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_SHIFT (4U)
+#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_WOFF (7U)
+#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_SHIFT (4U)
+#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_CLRMSK (0x0000000FU)
+#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VHEAP_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x000000000fffffff))) << 4))); \
+ ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0ffffffff0000000))) >> 28))); }
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VHEAP_BASE_ADDR(_ft_) (((_ft_).u32_6 >> (4)) | ((IMG_UINT64)((_ft_).u32_7 & 0xffffffffU ) << (28)))
+/*
+Reserved bits, un-used.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_WOFF (5U)
+#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_RSV_STUFF32(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_RSV_STUFF32(_ft_) (((_ft_).u32_5 >> (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)) & 0xffffffffU)
+/*
+The number of entries on the MLIST. Must be initialised to zero, meaning no pages allocated.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_WOFF (4U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_TAIL(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_TAIL(_ft_) (((_ft_).u32_4 >> (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)) & 0xffffffffU)
+/*
+The base address of the MLIST.
+Must be initialised to point to a block of memory where the PM can write the MLIST.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_WOFF (2U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_SHIFT (5U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_WOFF (3U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_SHIFT (5U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK (0x0000001FU)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000007ffffff))) << 5))); \
+ ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x07fffffff8000000))) >> 27))); }
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_BASE_ADDR(_ft_) (((_ft_).u32_2 >> (5)) | ((IMG_UINT64)((_ft_).u32_3 & 0xffffffffU ) << (27)))
+/*
+The number of entries on the ALIST. Must be initialised to zero, meaning no pages allocated.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_WOFF (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_WOFF (1U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \
+ ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); }
+#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (32)))
+#endif /* RGX_FEATURE_PM_REGISTER_CONFIG_MODE&&PM_BYTE_ALIGNED_BASE_ADDRESSES&&!SINGLE_TE_VSPACE */
+
+
+#if defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE)
+/*
+The PM Render Context Buffer Layout
+*/
+typedef struct RGX_PM_RENDERSTATE_BUFFER_TAG {
+ IMG_UINT32 u32_0;
+ IMG_UINT32 u32_1;
+ IMG_UINT32 u32_2;
+ IMG_UINT32 u32_3;
+ IMG_UINT32 u32_4;
+ IMG_UINT32 u32_5;
+ IMG_UINT32 u32_6;
+ IMG_UINT32 u32_7;
+ IMG_UINT32 u32_8;
+ IMG_UINT32 u32_9;
+ IMG_UINT32 u32_10;
+ IMG_UINT32 u32_11;
+} RGX_PM_RENDERSTATE_BUFFER;
+
+/*
+The base address of the Virtual-Physical Page Translation Table.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_WOFF (10U)
+#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_WOFF (11U)
+#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VFP_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \
+ ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VFP_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); }
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VFP_BASE_ADDR(_ft_) (((_ft_).u32_10 >> (0)) | ((IMG_UINT64)((_ft_).u32_11 & 0xffffffffU ) << (32)))
+/*
+A 16-bit macrotile mask indicating which macrotiles have been freed by the ISP.
+A '1' in a bit position indicates that the ISP has signalled that the corresponding macrotile can be freed.
+Only the least-significant 16 bits are valid.
+Only used in the 3D phase.
+Must be initialised to zero.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_WOFF (9U)
+#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MTILEFREE_STATUS(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MTILEFREE_STATUS(_ft_) (((_ft_).u32_9 >> (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)) & 0xffffffffU)
+/*
+A 16-bit macrotile mask indicating which macrotiles have been freed by the PM.
+A '1' in a bit position indicates that the corresponding macrotile has been freed, and its pages released back to the appropriate free stack.
+Only the least-significant 16 bits are valid.
+Only used in the 3D phase.
+Must be initialised to zero.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_WOFF (8U)
+#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_DEALLOC_MASK_STATUS(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_DEALLOC_MASK_STATUS(_ft_) (((_ft_).u32_8 >> (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)) & 0xffffffffU)
+/*
+The base address of the VHEAP buffer.
+Must be initialised to point to the location of the VHEAP buffer in memory.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_WOFF (6U)
+#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_WOFF (7U)
+#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VHEAP_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \
+ ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_RENDERSTATE_BUFFER_VHEAP_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); }
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VHEAP_BASE_ADDR(_ft_) (((_ft_).u32_6 >> (0)) | ((IMG_UINT64)((_ft_).u32_7 & 0xffffffffU ) << (32)))
+/*
+Reserved bits, un-used.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_WOFF (5U)
+#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_RSV_STUFF32(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_RSV_STUFF32(_ft_) (((_ft_).u32_5 >> (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)) & 0xffffffffU)
+/*
+The number of entries on the MLIST. Must be initialised to zero, meaning no pages allocated.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_WOFF (4U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_TAIL(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_TAIL(_ft_) (((_ft_).u32_4 >> (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)) & 0xffffffffU)
+/*
+The base address of the MLIST.
+Must be initialised to point to a block of memory where the PM can write the MLIST.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_WOFF (2U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_WOFF (3U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \
+ ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); }
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_BASE_ADDR(_ft_) (((_ft_).u32_2 >> (0)) | ((IMG_UINT64)((_ft_).u32_3 & 0xffffffffU ) << (32)))
+/*
+The number of entries on the ALIST. Must be initialised to zero, meaning no pages allocated.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_WOFF (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_WOFF (1U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \
+ ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0xffffffff00000000))) >> 32))); }
+#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0xffffffffU ) << (32)))
+#endif /* RGX_FEATURE_PM_REGISTER_CONFIG_MODE&&!PM_BYTE_ALIGNED_BASE_ADDRESSES&&!SINGLE_TE_VSPACE */
+
+
+#if defined(RGX_FEATURE_SINGLE_TE_VSPACE)
+/*
+The PM Render Context Buffer Layout
+*/
+typedef struct RGX_PM_RENDERSTATE_BUFFER_TAG {
+ IMG_UINT32 u32_0;
+ IMG_UINT32 u32_1;
+ IMG_UINT32 u32_2;
+ IMG_UINT32 u32_3;
+ IMG_UINT32 u32_4;
+ IMG_UINT32 u32_5;
+ IMG_UINT32 u32_6;
+ IMG_UINT32 u32_7;
+ IMG_UINT32 u32_8;
+ IMG_UINT32 u32_9;
+ IMG_UINT32 u32_10;
+ IMG_UINT32 u32_11;
+ IMG_UINT32 u32_12;
+ IMG_UINT32 u32_13;
+ IMG_UINT32 u32_14;
+ IMG_UINT32 u32_15;
+ IMG_UINT32 u32_16;
+ IMG_UINT32 u32_17;
+ IMG_UINT32 u32_18;
+ IMG_UINT32 u32_19;
+ IMG_UINT32 u32_20;
+ IMG_UINT32 u32_21;
+ IMG_UINT32 u32_22;
+ IMG_UINT32 u32_23;
+ IMG_UINT32 u32_24;
+ IMG_UINT32 u32_25;
+ IMG_UINT32 u32_26;
+ IMG_UINT32 u32_27;
+ IMG_UINT32 u32_28;
+ IMG_UINT32 u32_29;
+ IMG_UINT32 u32_30;
+ IMG_UINT32 u32_31;
+ IMG_UINT32 u32_32;
+ IMG_UINT32 u32_33;
+ IMG_UINT32 u32_34;
+ IMG_UINT32 u32_35;
+ IMG_UINT32 u32_36;
+ IMG_UINT32 u32_37;
+ IMG_UINT32 u32_38;
+ IMG_UINT32 u32_39;
+ IMG_UINT32 u32_40;
+ IMG_UINT32 u32_41;
+ IMG_UINT32 u32_42;
+ IMG_UINT32 u32_43;
+ IMG_UINT32 u32_44;
+ IMG_UINT32 u32_45;
+} RGX_PM_RENDERSTATE_BUFFER;
+
+/*
+MMU catalogue base address for VCE pipe 3 LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_WOFF (37U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE(_ft_) (((_ft_).u32_37 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 3 MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_WOFF (36U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_MAPPED(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_MAPPED(_ft_) (((_ft_).u32_36 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for VCE pipe 3 INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_WOFF (36U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE(_ft_) (((_ft_).u32_36 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 2 LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_WOFF (35U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE(_ft_,_x_) ((_ft_).u32_35 = (((_ft_).u32_35 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE(_ft_) (((_ft_).u32_35 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 2 MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_WOFF (34U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_MAPPED(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_MAPPED(_ft_) (((_ft_).u32_34 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for VCE pipe 2 INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_WOFF (34U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE(_ft_) (((_ft_).u32_34 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 1 LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_WOFF (33U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE(_ft_,_x_) ((_ft_).u32_33 = (((_ft_).u32_33 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE(_ft_) (((_ft_).u32_33 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 1 MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_WOFF (32U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_MAPPED(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_MAPPED(_ft_) (((_ft_).u32_32 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for VCE pipe 1 INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_WOFF (32U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE(_ft_) (((_ft_).u32_32 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 0 LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_WOFF (30U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE(_ft_,_x_) ((_ft_).u32_30 = (((_ft_).u32_30 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE(_ft_) (((_ft_).u32_30 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 0 ADDR
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_WOFF (29U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_CLRMSK (0xF0000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_ADDR(_ft_,_x_) ((_ft_).u32_29 = (((_ft_).u32_29 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_ADDR(_ft_) (((_ft_).u32_29 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT)) & 0x0fffffffU)
+/*
+MMU catalogue base address for VCE pipe 0 MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_WOFF (28U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_MAPPED(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_MAPPED(_ft_) (((_ft_).u32_28 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for VCE pipe 0 INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_WOFF (28U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE(_ft_) (((_ft_).u32_28 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for TE LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_WOFF (26U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE(_ft_,_x_) ((_ft_).u32_26 = (((_ft_).u32_26 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE(_ft_) (((_ft_).u32_26 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for TE ADDR
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_WOFF (25U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_CLRMSK (0xF0000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_ADDR(_ft_,_x_) ((_ft_).u32_25 = (((_ft_).u32_25 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_ADDR(_ft_) (((_ft_).u32_25 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT)) & 0x0fffffffU)
+/*
+MMU catalogue base address for TE MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_WOFF (24U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_MAPPED(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_MAPPED(_ft_) (((_ft_).u32_24 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for TE INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_WOFF (24U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE(_ft_) (((_ft_).u32_24 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for ALIST LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_WOFF (18U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_LAST_PAGE(_ft_,_x_) ((_ft_).u32_18 = (((_ft_).u32_18 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_LAST_PAGE(_ft_) (((_ft_).u32_18 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for ALIST ADDR
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_WOFF (17U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_CLRMSK (0xF0000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_ADDR(_ft_,_x_) ((_ft_).u32_17 = (((_ft_).u32_17 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_ADDR(_ft_) (((_ft_).u32_17 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT)) & 0x0fffffffU)
+/*
+MMU catalogue base address for ALIST MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_WOFF (16U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_MAPPED(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_MAPPED(_ft_) (((_ft_).u32_16 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for ALIST INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_WOFF (16U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_INIT_PAGE(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_INIT_PAGE(_ft_) (((_ft_).u32_16 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for TE0
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_WOFF (12U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE0_INIT(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE0_INIT(_ft_) (((_ft_).u32_12 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE0
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_WOFF (12U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE0(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE0(_ft_) (((_ft_).u32_12 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT)) & 0x7fffffffU)
+/*
+Init Bit Sent Flag for VCE3
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_WOFF (11U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE3_INIT(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE3_INIT(_ft_) (((_ft_).u32_11 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE3
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_WOFF (11U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE3(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE3(_ft_) (((_ft_).u32_11 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT)) & 0x7fffffffU)
+/*
+Init Bit Sent Flag for VCE2
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_WOFF (10U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE2_INIT(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE2_INIT(_ft_) (((_ft_).u32_10 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE2
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_WOFF (10U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE2(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE2(_ft_) (((_ft_).u32_10 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT)) & 0x7fffffffU)
+/*
+Init Bit Sent Flag for VCE1
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_WOFF (9U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE1_INIT(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE1_INIT(_ft_) (((_ft_).u32_9 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE1
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_WOFF (9U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE1(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE1(_ft_) (((_ft_).u32_9 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT)) & 0x7fffffffU)
+/*
+Init Bit Sent Flag for VCE0
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_WOFF (8U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE0_INIT(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE0_INIT(_ft_) (((_ft_).u32_8 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE0
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_WOFF (8U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE0(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE0(_ft_) (((_ft_).u32_8 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT)) & 0x7fffffffU)
+/*
+A 16-bit macrotile mask indicating which macrotiles have been freed by the ISP.
+A '1' in a bit position indicates that the ISP has signalled that the corresponding macrotile can be freed.
+Only the least-significant 16 bits are valid.
+Only used in the 3D phase.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_WOFF (7U)
+#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MTILEFREE_STATUS(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MTILEFREE_STATUS(_ft_) (((_ft_).u32_7 >> (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)) & 0xffffffffU)
+/*
+A 16-bit macrotile mask indicating which macrotiles have been freed by the PM.
+A '1' in a bit position indicates that the corresponding macrotile has been freed, and its pages released back to the appropriate free stack.
+Only the least-significant 16 bits are valid.
+Only used in the 3D phase.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_WOFF (6U)
+#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_DEALLOC_MASK_STATUS(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_DEALLOC_MASK_STATUS(_ft_) (((_ft_).u32_6 >> (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)) & 0xffffffffU)
+/*
+Reserved bits, un-used.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_WOFF (5U)
+#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_RSV_STUFF32(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_RSV_STUFF32(_ft_) (((_ft_).u32_5 >> (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)) & 0xffffffffU)
+/*
+The number of entries on the MLIST.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_WOFF (4U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_TAIL(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_TAIL(_ft_) (((_ft_).u32_4 >> (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)) & 0xffffffffU)
+/*
+The base address of the MLIST.
+Must be initialised to point to a block of memory where the PM can write the MLIST.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_WOFF (2U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_SHIFT (5U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_WOFF (3U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_SHIFT (5U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK (0x0000001FU)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000007ffffff))) << 5))); \
+ ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x07fffffff8000000))) >> 27))); }
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_BASE_ADDR(_ft_) (((_ft_).u32_2 >> (5)) | ((IMG_UINT64)((_ft_).u32_3 & 0xffffffffU ) << (27)))
+/*
+Init bit sent flag for ALIST
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_WOFF (1U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_INIT(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_INIT(_ft_) (((_ft_).u32_1 >> (RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT)) & 0x00000001U)
+/*
+The number of entries on the ALIST.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_WOFF (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_WOFF (1U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \
+ ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x7fffffff00000000))) >> 32))); }
+#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0x7fffffffU ) << (32)))
+#endif /* RGX_FEATURE_SINGLE_TE_VSPACE */
+
+
+#if !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_SINGLE_TE_VSPACE))
+/*
+The PM Render Context Buffer Layout
+*/
+typedef struct RGX_PM_RENDERSTATE_BUFFER_TAG {
+ IMG_UINT32 u32_0;
+ IMG_UINT32 u32_1;
+ IMG_UINT32 u32_2;
+ IMG_UINT32 u32_3;
+ IMG_UINT32 u32_4;
+ IMG_UINT32 u32_5;
+ IMG_UINT32 u32_6;
+ IMG_UINT32 u32_7;
+ IMG_UINT32 u32_8;
+ IMG_UINT32 u32_9;
+ IMG_UINT32 u32_10;
+ IMG_UINT32 u32_11;
+ IMG_UINT32 u32_12;
+ IMG_UINT32 u32_13;
+ IMG_UINT32 u32_14;
+ IMG_UINT32 u32_15;
+ IMG_UINT32 u32_16;
+ IMG_UINT32 u32_17;
+ IMG_UINT32 u32_18;
+ IMG_UINT32 u32_19;
+ IMG_UINT32 u32_20;
+ IMG_UINT32 u32_21;
+ IMG_UINT32 u32_22;
+ IMG_UINT32 u32_23;
+ IMG_UINT32 u32_24;
+ IMG_UINT32 u32_25;
+ IMG_UINT32 u32_26;
+ IMG_UINT32 u32_27;
+ IMG_UINT32 u32_28;
+ IMG_UINT32 u32_29;
+ IMG_UINT32 u32_30;
+ IMG_UINT32 u32_31;
+ IMG_UINT32 u32_32;
+ IMG_UINT32 u32_33;
+ IMG_UINT32 u32_34;
+ IMG_UINT32 u32_35;
+ IMG_UINT32 u32_36;
+ IMG_UINT32 u32_37;
+ IMG_UINT32 u32_38;
+ IMG_UINT32 u32_39;
+ IMG_UINT32 u32_40;
+ IMG_UINT32 u32_41;
+ IMG_UINT32 u32_42;
+ IMG_UINT32 u32_43;
+ IMG_UINT32 u32_44;
+ IMG_UINT32 u32_45;
+} RGX_PM_RENDERSTATE_BUFFER;
+
+/*
+MMU catalogue base address for VCE pipe 3 LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_WOFF (45U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE(_ft_,_x_) ((_ft_).u32_45 = (((_ft_).u32_45 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE(_ft_) (((_ft_).u32_45 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 3 MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_WOFF (44U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_MAPPED(_ft_,_x_) ((_ft_).u32_44 = (((_ft_).u32_44 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_MAPPED(_ft_) (((_ft_).u32_44 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for VCE pipe 3 INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_WOFF (44U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE(_ft_,_x_) ((_ft_).u32_44 = (((_ft_).u32_44 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE(_ft_) (((_ft_).u32_44 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE3_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 2 LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_WOFF (43U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE(_ft_,_x_) ((_ft_).u32_43 = (((_ft_).u32_43 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE(_ft_) (((_ft_).u32_43 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 2 MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_WOFF (42U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_MAPPED(_ft_,_x_) ((_ft_).u32_42 = (((_ft_).u32_42 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_MAPPED(_ft_) (((_ft_).u32_42 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for VCE pipe 2 INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_WOFF (42U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE(_ft_,_x_) ((_ft_).u32_42 = (((_ft_).u32_42 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE(_ft_) (((_ft_).u32_42 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE2_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 1 LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_WOFF (41U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE(_ft_,_x_) ((_ft_).u32_41 = (((_ft_).u32_41 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE(_ft_) (((_ft_).u32_41 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 1 MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_WOFF (40U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_MAPPED(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_MAPPED(_ft_) (((_ft_).u32_40 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for VCE pipe 1 INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_WOFF (40U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE(_ft_,_x_) ((_ft_).u32_40 = (((_ft_).u32_40 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE(_ft_) (((_ft_).u32_40 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE1_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for TE pipe 3 LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_WOFF (37U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE(_ft_,_x_) ((_ft_).u32_37 = (((_ft_).u32_37 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE(_ft_) (((_ft_).u32_37 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for TE pipe 3 MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_WOFF (36U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE3_MAPPED(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE3_MAPPED(_ft_) (((_ft_).u32_36 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for TE pipe 3 INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_WOFF (36U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE(_ft_,_x_) ((_ft_).u32_36 = (((_ft_).u32_36 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE(_ft_) (((_ft_).u32_36 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE3_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for TE pipe 2 LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_WOFF (35U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE(_ft_,_x_) ((_ft_).u32_35 = (((_ft_).u32_35 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE(_ft_) (((_ft_).u32_35 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for TE pipe 2 MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_WOFF (34U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE2_MAPPED(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE2_MAPPED(_ft_) (((_ft_).u32_34 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for TE pipe 2 INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_WOFF (34U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE(_ft_,_x_) ((_ft_).u32_34 = (((_ft_).u32_34 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE(_ft_) (((_ft_).u32_34 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE2_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for TE pipe 1 LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_WOFF (33U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE(_ft_,_x_) ((_ft_).u32_33 = (((_ft_).u32_33 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE(_ft_) (((_ft_).u32_33 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for TE pipe 1 MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_WOFF (32U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE1_MAPPED(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE1_MAPPED(_ft_) (((_ft_).u32_32 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for TE pipe 1 INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_WOFF (32U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE(_ft_,_x_) ((_ft_).u32_32 = (((_ft_).u32_32 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE(_ft_) (((_ft_).u32_32 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE1_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 0 LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_WOFF (30U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE(_ft_,_x_) ((_ft_).u32_30 = (((_ft_).u32_30 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE(_ft_) (((_ft_).u32_30 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for VCE pipe 0 ADDR
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_WOFF (29U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_CLRMSK (0xF0000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_ADDR(_ft_,_x_) ((_ft_).u32_29 = (((_ft_).u32_29 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_ADDR(_ft_) (((_ft_).u32_29 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_ADDR_SHIFT)) & 0x0fffffffU)
+/*
+MMU catalogue base address for VCE pipe 0 MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_WOFF (28U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_MAPPED(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_MAPPED(_ft_) (((_ft_).u32_28 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for VCE pipe 0 INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_WOFF (28U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE(_ft_,_x_) ((_ft_).u32_28 = (((_ft_).u32_28 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE(_ft_) (((_ft_).u32_28 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_VCE_PIPE0_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for TE pipe 0 LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_WOFF (26U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE(_ft_,_x_) ((_ft_).u32_26 = (((_ft_).u32_26 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE(_ft_) (((_ft_).u32_26 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for TE pipe 0 ADDR
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_WOFF (25U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_CLRMSK (0xF0000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_ADDR(_ft_,_x_) ((_ft_).u32_25 = (((_ft_).u32_25 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_ADDR(_ft_) (((_ft_).u32_25 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_ADDR_SHIFT)) & 0x0fffffffU)
+/*
+MMU catalogue base address for TE pipe 0 MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_WOFF (24U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_MAPPED(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_MAPPED(_ft_) (((_ft_).u32_24 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for TE pipe 0 INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_WOFF (24U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE(_ft_,_x_) ((_ft_).u32_24 = (((_ft_).u32_24 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE(_ft_) (((_ft_).u32_24 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_TE_PIPE0_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for ALIST LAST_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_WOFF (18U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_LAST_PAGE(_ft_,_x_) ((_ft_).u32_18 = (((_ft_).u32_18 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_LAST_PAGE(_ft_) (((_ft_).u32_18 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_LAST_PAGE_SHIFT)) & 0x000fffffU)
+/*
+MMU catalogue base address for ALIST ADDR
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_WOFF (17U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_CLRMSK (0xF0000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_ADDR(_ft_,_x_) ((_ft_).u32_17 = (((_ft_).u32_17 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_CLRMSK ) | (((_x_) & (0x0fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_ADDR(_ft_) (((_ft_).u32_17 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_ADDR_SHIFT)) & 0x0fffffffU)
+/*
+MMU catalogue base address for ALIST MAPPED
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_WOFF (16U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_MAPPED(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_MAPPED(_ft_) (((_ft_).u32_16 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_MAPPED_SHIFT)) & 0x00000001U)
+/*
+MMU catalogue base address for ALIST INIT_PAGE
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_WOFF (16U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_CLRMSK (0xFFF00000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MMU_CAT_BASE_ALIST_INIT_PAGE(_ft_,_x_) ((_ft_).u32_16 = (((_ft_).u32_16 & RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_CLRMSK ) | (((_x_) & (0x000fffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MMU_CAT_BASE_ALIST_INIT_PAGE(_ft_) (((_ft_).u32_16 >> (RGX_PM_RENDERSTATE_BUFFER_MMU_CAT_BASE_ALIST_INIT_PAGE_SHIFT)) & 0x000fffffU)
+/*
+Init Bit Sent Flag for TE3
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_WOFF (15U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE3_INIT(_ft_,_x_) ((_ft_).u32_15 = (((_ft_).u32_15 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE3_INIT(_ft_) (((_ft_).u32_15 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE3
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_WOFF (15U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE3(_ft_,_x_) ((_ft_).u32_15 = (((_ft_).u32_15 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE3(_ft_) (((_ft_).u32_15 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE3_SHIFT)) & 0x7fffffffU)
+/*
+Init Bit Sent Flag for VCE3
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_WOFF (14U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE3_INIT(_ft_,_x_) ((_ft_).u32_14 = (((_ft_).u32_14 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE3_INIT(_ft_) (((_ft_).u32_14 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE3
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_WOFF (14U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE3(_ft_,_x_) ((_ft_).u32_14 = (((_ft_).u32_14 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE3(_ft_) (((_ft_).u32_14 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE3_SHIFT)) & 0x7fffffffU)
+/*
+Init Bit Sent Flag for TE2
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_WOFF (13U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE2_INIT(_ft_,_x_) ((_ft_).u32_13 = (((_ft_).u32_13 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE2_INIT(_ft_) (((_ft_).u32_13 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE2
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_WOFF (13U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE2(_ft_,_x_) ((_ft_).u32_13 = (((_ft_).u32_13 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE2(_ft_) (((_ft_).u32_13 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE2_SHIFT)) & 0x7fffffffU)
+/*
+Init Bit Sent Flag for VCE2
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_WOFF (12U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE2_INIT(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE2_INIT(_ft_) (((_ft_).u32_12 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE2
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_WOFF (12U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE2(_ft_,_x_) ((_ft_).u32_12 = (((_ft_).u32_12 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE2(_ft_) (((_ft_).u32_12 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE2_SHIFT)) & 0x7fffffffU)
+/*
+Init Bit Sent Flag for TE1
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_WOFF (11U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE1_INIT(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE1_INIT(_ft_) (((_ft_).u32_11 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE1
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_WOFF (11U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE1(_ft_,_x_) ((_ft_).u32_11 = (((_ft_).u32_11 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE1(_ft_) (((_ft_).u32_11 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE1_SHIFT)) & 0x7fffffffU)
+/*
+Init Bit Sent Flag for VCE1
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_WOFF (10U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE1_INIT(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE1_INIT(_ft_) (((_ft_).u32_10 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE1
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_WOFF (10U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE1(_ft_,_x_) ((_ft_).u32_10 = (((_ft_).u32_10 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE1(_ft_) (((_ft_).u32_10 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE1_SHIFT)) & 0x7fffffffU)
+/*
+Init Bit Sent Flag for TE0
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_WOFF (9U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE0_INIT(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE0_INIT(_ft_) (((_ft_).u32_9 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for TE0
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_WOFF (9U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_TE0(_ft_,_x_) ((_ft_).u32_9 = (((_ft_).u32_9 & RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_TE0(_ft_) (((_ft_).u32_9 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_TE0_SHIFT)) & 0x7fffffffU)
+/*
+Init Bit Sent Flag for VCE0
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_WOFF (8U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE0_INIT(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE0_INIT(_ft_) (((_ft_).u32_8 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_INIT_SHIFT)) & 0x00000001U)
+/*
+16KB aligned virtual top pointer for VCE0
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_WOFF (8U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_VPTR_VCE0(_ft_,_x_) ((_ft_).u32_8 = (((_ft_).u32_8 & RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_CLRMSK ) | (((_x_) & (0x7fffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_VPTR_VCE0(_ft_) (((_ft_).u32_8 >> (RGX_PM_RENDERSTATE_BUFFER_VPTR_VCE0_SHIFT)) & 0x7fffffffU)
+/*
+A 16-bit macrotile mask indicating which macrotiles have been freed by the ISP.
+A '1' in a bit position indicates that the ISP has signalled that the corresponding macrotile can be freed.
+Only the least-significant 16 bits are valid.
+Only used in the 3D phase.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_WOFF (7U)
+#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MTILEFREE_STATUS(_ft_,_x_) ((_ft_).u32_7 = (((_ft_).u32_7 & RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MTILEFREE_STATUS(_ft_) (((_ft_).u32_7 >> (RGX_PM_RENDERSTATE_BUFFER_MTILEFREE_STATUS_SHIFT)) & 0xffffffffU)
+/*
+A 16-bit macrotile mask indicating which macrotiles have been freed by the PM.
+A '1' in a bit position indicates that the corresponding macrotile has been freed, and its pages released back to the appropriate free stack.
+Only the least-significant 16 bits are valid.
+Only used in the 3D phase.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_WOFF (6U)
+#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_DEALLOC_MASK_STATUS(_ft_,_x_) ((_ft_).u32_6 = (((_ft_).u32_6 & RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_DEALLOC_MASK_STATUS(_ft_) (((_ft_).u32_6 >> (RGX_PM_RENDERSTATE_BUFFER_DEALLOC_MASK_STATUS_SHIFT)) & 0xffffffffU)
+/*
+Reserved bits, un-used.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_WOFF (5U)
+#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_RSV_STUFF32(_ft_,_x_) ((_ft_).u32_5 = (((_ft_).u32_5 & RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_RSV_STUFF32(_ft_) (((_ft_).u32_5 >> (RGX_PM_RENDERSTATE_BUFFER_RSV_STUFF32_SHIFT)) & 0xffffffffU)
+/*
+The number of entries on the MLIST.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_WOFF (4U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_TAIL(_ft_,_x_) ((_ft_).u32_4 = (((_ft_).u32_4 & RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_CLRMSK ) | (((_x_) & (0xffffffffU)) << (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_TAIL(_ft_) (((_ft_).u32_4 >> (RGX_PM_RENDERSTATE_BUFFER_MLIST_TAIL_SHIFT)) & 0xffffffffU)
+/*
+The base address of the MLIST.
+Must be initialised to point to a block of memory where the PM can write the MLIST.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_WOFF (2U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_SHIFT (5U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_WOFF (3U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_SHIFT (5U)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK (0x0000001FU)
+#define RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_MLIST_BASE_ADDR(_ft_,_x_) { ((_ft_).u32_2 = (((_ft_).u32_2 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x0000000007ffffff))) << 5))); \
+ ((_ft_).u32_3 = (((_ft_).u32_3 & RGX_PM_RENDERSTATE_BUFFER_MLIST_BASE_ADDR_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x07fffffff8000000))) >> 27))); }
+#define RGX_PM_RENDERSTATE_BUFFER_GET_MLIST_BASE_ADDR(_ft_) (((_ft_).u32_2 >> (5)) | ((IMG_UINT64)((_ft_).u32_3 & 0xffffffffU ) << (27)))
+/*
+Init bit sent flag for ALIST
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_WOFF (1U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT (31U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_CLRMSK (0x7FFFFFFFU)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_INIT(_ft_,_x_) ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_CLRMSK ) | (((_x_) & (0x00000001U)) << (RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT))))
+#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_INIT(_ft_) (((_ft_).u32_1 >> (RGX_PM_RENDERSTATE_BUFFER_ALIST_INIT_SHIFT)) & 0x00000001U)
+/*
+The number of entries on the ALIST.
+*/
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_WOFF (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_WOFF (1U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_SHIFT (0U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK (0x00000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK (0x80000000U)
+#define RGX_PM_RENDERSTATE_BUFFER_SET_ALIST_TAIL(_ft_,_x_) { ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W0_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x00000000ffffffff))) << 0))); \
+ ((_ft_).u32_1 = (((_ft_).u32_1 & RGX_PM_RENDERSTATE_BUFFER_ALIST_TAIL_W1_CLRMSK ) | (((_x_) & (IMG_UINT64_C(0x7fffffff00000000))) >> 32))); }
+#define RGX_PM_RENDERSTATE_BUFFER_GET_ALIST_TAIL(_ft_) (((_ft_).u32_0 >> (0)) | ((IMG_UINT64)((_ft_).u32_1 & 0x7fffffffU ) << (32)))
+#endif /* !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_PM_REGISTER_CONFIG_MODE) && !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) && !defined(RGX_FEATURE_SINGLE_TE_VSPACE)) && !(defined(RGX_FEATURE_SINGLE_TE_VSPACE)) */
+
+
+/*
+Maximum range supported by hardware is 33 bits.
+*/
+#define RGX_PM_RENDERSTATE_ALIST_TAIL_ALIST_TAIL (0U)
+#define RGX_PM_RENDERSTATE_ALIST_TAIL_ALIST_TAIL_LOWER (0U)
+#define RGX_PM_RENDERSTATE_ALIST_TAIL_ALIST_TAIL_UPPER (8589934591ULL)
+
+
+#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)
+/*
+256-bit granular, lower bits ignored.
+Maximum addressable range supported by hardware is 1 TB.
+*/
+#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_ALIGNSHIFT (5U)
+#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_ALIGNSIZE (32U)
+#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR (0U)
+#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_LOWER (0U)
+#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_UPPER (68719476735ULL)
+#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */
+
+
+#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)
+/*
+128-bit aligned.
+Maximum addressable range supported by hardware is 1 TB.
+The 40-bit, 16-byte-aligned address is packed into bits 35:0 of the two DWORDs.
+*/
+#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_ALIGNSHIFT (4U)
+#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_ALIGNSIZE (16U)
+#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR (0U)
+#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_LOWER (0U)
+#define RGX_PM_RENDERSTATE_MLIST_BASE_ADDR_MLIST_BASE_ADDR_UPPER (68719476735ULL)
+#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */
+
+
+/*
+Maximum range supported by hardware is 33 bits.
+*/
+#define RGX_PM_RENDERSTATE_MLIST_TAIL_MLIST_TAIL (0U)
+#define RGX_PM_RENDERSTATE_MLIST_TAIL_MLIST_TAIL_LOWER (0U)
+#define RGX_PM_RENDERSTATE_MLIST_TAIL_MLIST_TAIL_UPPER (8589934591ULL)
+
+
+#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)
+/*
+128-bit granular, lower bits ignored.
+Maximum addressable range supported by hardware is 1 TB.
+*/
+#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_ALIGNSHIFT (4U)
+#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_ALIGNSIZE (16U)
+#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR (0U)
+#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_LOWER (0U)
+#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_UPPER (68719476735ULL)
+#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */
+
+
+#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)
+/*
+128-bit aligned.
+Maximum addressable range supported by hardware is 1 TB.
+The 40-bit, 16-byte-aligned address is packed into bits 35:0 of the two DWORDs.
+*/
+#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_ALIGNSHIFT (4U)
+#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_ALIGNSIZE (16U)
+#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR (0U)
+#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_LOWER (0U)
+#define RGX_PM_RENDERSTATE_VHEAP_BASE_ADDR_VHEAP_BASE_ADDR_UPPER (68719476735ULL)
+#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */
+
+
+/*
+Only the 16 least-significant bits are used
+*/
+#define RGX_PM_RENDERSTATE_DEALLOC_MASK_STATUS_DEALLOC_MASK_STATUS (0U)
+#define RGX_PM_RENDERSTATE_DEALLOC_MASK_STATUS_DEALLOC_MASK_STATUS_LOWER (0U)
+#define RGX_PM_RENDERSTATE_DEALLOC_MASK_STATUS_DEALLOC_MASK_STATUS_UPPER (65535U)
+
+
+/*
+Only the 16 least-significant bits are used
+*/
+#define RGX_PM_RENDERSTATE_MTILEFREE_STATUS_MTILEFREE_STATUS (0U)
+#define RGX_PM_RENDERSTATE_MTILEFREE_STATUS_MTILEFREE_STATUS_LOWER (0U)
+#define RGX_PM_RENDERSTATE_MTILEFREE_STATUS_MTILEFREE_STATUS_UPPER (65535U)
+
+
+#if defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)
+/*
+128-bit granular, lower bits ignored.
+Maximum addressable range supported by hardware is 1 TB.
+*/
+#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_ALIGNSHIFT (4U)
+#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_ALIGNSIZE (16U)
+#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR (0U)
+#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_LOWER (0U)
+#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_UPPER (68719476735ULL)
+#endif /* RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES */
+
+
+#if !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES)
+/*
+128-bit aligned.
+Maximum addressable range supported by hardware is 1 TB.
+The 40-bit, 16-byte-aligned address is packed into bits 35:0 of the two DWORDs.
+*/
+#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_ALIGNSHIFT (4U)
+#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_ALIGNSIZE (16U)
+#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR (0U)
+#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_LOWER (0U)
+#define RGX_PM_RENDERSTATE_VFP_BASE_ADDR_VFP_BASE_ADDR_UPPER (68719476735ULL)
+#endif /* !defined(RGX_FEATURE_PM_BYTE_ALIGNED_BASE_ADDRESSES) */
+
+
+#endif /* RGXPMDEFS_H */
+/*****************************************************************************
+ End of file (rgxpmdefs.h)
+*****************************************************************************/
*/
#define DEVMEM_ANNOTATION_MAX_LEN ((IMG_UINT32)PVR_ANNOTATION_MAX_LEN + 1U)
+
+/* Reserved VA space of a heap must always be multiple of DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY
+ * Granularity has been chosen to support the max possible practically used OS page size.
+ */
+#define DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY 0x10000 /* 64KB is MAX anticipated OS page size */
+
#endif /* #ifndef DEVICEMEM_TYPEDEFS_H */
/*!
Pointer to a linked list node
*/
-typedef struct DLLIST_NODE_ *PDLLIST_NODE;
+typedef struct DLLIST_NODE_TAG *PDLLIST_NODE;
/*!
* used in a structure shared between host and device.
* Consider such clients if any changes are made to this structure.
*/
-typedef struct DLLIST_NODE_
+typedef struct DLLIST_NODE_TAG
{
- struct DLLIST_NODE_ *psPrevNode;
- struct DLLIST_NODE_ *psNextNode;
+ struct DLLIST_NODE_TAG *psPrevNode;
+ struct DLLIST_NODE_TAG *psNextNode;
} DLLIST_NODE;
*/
/*****************************************************************************/
#define dllist_foreach(list_head) \
- for (DLLIST_NODE *_DllNode = (list_head).psNextNode, *_DllNext = _DllNode->psNextNode; \
- _DllNode != &(list_head); \
- _DllNode = _DllNext, _DllNext = _DllNode->psNextNode)
+ for (DLLIST_NODE *DllCurNode = (list_head).psNextNode, *DllNextNode = DllCurNode->psNextNode; \
+ DllCurNode != &(list_head); \
+ DllCurNode = DllNextNode, DllNextNode = DllCurNode->psNextNode)
#define dllist_foreach_backwards(list_head) \
- for (DLLIST_NODE *_DllNode = (list_head).psPrevNode, *_DllPrev = _DllNode->psPrevNode; \
- _DllNode != &(list_head); \
- _DllNode = _DllPrev, _DllPrev = _DllNode->psPrevNode)
+ for (DLLIST_NODE *DllCurNode = (list_head).psPrevNode, *DllPrevNode = DllCurNode->psPrevNode; \
+ DllCurNode != &(list_head); \
+ DllCurNode = DllPrevNode, DllPrevNode = DllCurNode->psPrevNode)
-#define dllist_cur(type, member) IMG_CONTAINER_OF(_DllNode, type, member)
+#define dllist_cur(type, member) IMG_CONTAINER_OF(DllCurNode, type, member)
/*************************************************************************/ /*!
@Function dllist_init
*/
typedef enum _IMG_ADDRESSMODE_
{
+ IMG_ADDRESSMODE_DONTCARE,
IMG_ADDRESSMODE_REPEAT, /**< Texture repeats continuously */
IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */
IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */
IMG_ADDRESSMODE_CLAMPBORDER,
IMG_ADDRESSMODE_OGL_CLAMP,
IMG_ADDRESSMODE_OVG_TILEFILL,
- IMG_ADDRESSMODE_DONTCARE,
} IMG_ADDRESSMODE;
/**
#define GCC_VERSION_AT_LEAST(major, minor) 0
#endif
+#if defined(__clang__)
+#define CLANG_VERSION_AT_LEAST(major) \
+ (__clang_major__ >= (major))
+#else
+#define CLANG_VERSION_AT_LEAST(major) 0
+#endif
+
/* Use Clang's __has_extension and __has_builtin macros if available. */
#if defined(__has_extension)
#define has_clang_extension(e) __has_extension(e)
#define IMG_INTERNAL
#define IMG_EXPORT
#define IMG_CALLCONV
- #elif defined(__linux__) || defined(__METAG) || defined(__mips) || defined(__QNXNTO__) || defined(__riscv)
+ #elif defined(__linux__) || defined(__METAG) || defined(__mips) || defined(__QNXNTO__) || defined(__riscv) || defined(__APPLE__)
#define IMG_CALLCONV
#define C_CALLCONV
#include <linux/compiler.h>
#if !defined(__fallthrough)
- #if GCC_VERSION_AT_LEAST(7, 0)
+ #if GCC_VERSION_AT_LEAST(7, 0) || CLANG_VERSION_AT_LEAST(10)
#define __fallthrough __attribute__((__fallthrough__))
#else
#define __fallthrough
#if defined(__cplusplus) && (__cplusplus >= 201703L)
#define __fallthrough [[fallthrough]]
- #elif GCC_VERSION_AT_LEAST(7, 0)
+ #elif GCC_VERSION_AT_LEAST(7, 0) || CLANG_VERSION_AT_LEAST(10)
#define __fallthrough __attribute__((__fallthrough__))
#else
#define __fallthrough
#define unlikely(x) (x)
#endif
+#if !defined(BITS_PER_BYTE)
+#define BITS_PER_BYTE (8)
+#endif /* BITS_PER_BYTE */
+
/* These two macros are also provided by the kernel */
#ifndef BIT
#define BIT(b) (1UL << (b))
#define SWAP(X, Y) (X) ^= (Y); (Y) ^= (X); (X) ^= (Y);
-
#if defined(__linux__) && defined(__KERNEL__)
#include <linux/kernel.h>
#include <linux/bug.h>
#define IMG_OFFSET_ADDR(addr, offset_in_bytes) \
(void*)&(((IMG_UINT8*)(void*)(addr))[offset_in_bytes])
+/* Get a new pointer with an offset (in bytes) from a base address, version
+ * for volatile memory.
+ */
+#define IMG_OFFSET_ADDR_VOLATILE(addr, offset_in_bytes) \
+ (volatile void*)&(((volatile IMG_UINT8*)(volatile void*)(addr))[offset_in_bytes])
+
/* Get a new pointer with an offset (in dwords) from a base address, useful
* when traversing byte buffers and accessing data in buffers through struct
* pointers.
#define NOLDSTOPT_VOID
#endif
+#if defined(SERVICES_SC) && !defined(DEBUG)
+#define PVR_PRE_DPF(...)
+#else
+#define PVR_PRE_DPF (void) printf
+#endif
+
#endif /* IMG_DEFS_H */
/*****************************************************************************
End of file (img_defs.h)
#include <linux/types.h>
#include "kernel_types.h"
#elif defined(__linux__) || defined(__METAG) || defined(__MINGW32__) || \
- defined(__QNXNTO__) || defined(INTEGRITY_OS) || defined(__riscv)
+ defined(__QNXNTO__) || defined(INTEGRITY_OS) || defined(__riscv) || defined(__APPLE__)
#include <stddef.h> /* NULL */
#include <stdint.h>
#include <inttypes.h> /* intX_t/uintX_t, format specifiers */
typedef int IMG_SECURE_TYPE;
-typedef enum tag_img_bool
-{
- IMG_FALSE = 0,
- IMG_TRUE = 1,
- IMG_FORCE_ALIGN = 0x7FFFFFFF
-} IMG_BOOL, *IMG_PBOOL;
+typedef bool IMG_BOOL;
+typedef bool* IMG_PBOOL;
+#define IMG_FALSE ((bool) 0)
+#define IMG_TRUE ((bool) 1)
#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
typedef IMG_CHAR const* IMG_PCCHAR;
typedef struct OSWR_LOCK_TAG *POSWR_LOCK;
#else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
typedef struct OSWR_LOCK_TAG {
- IMG_UINT32 ui32Dummy;
+ IMG_UINT32 ui32Unused;
} *POSWR_LOCK;
#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
-#if defined(__linux__)
+#if defined(__linux__) || defined(__APPLE__)
typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T;
#elif defined(__QNXNTO__)
typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T;
#elif defined(_WIN32)
/*
- * Dummy definition. WDDM doesn't use Services, but some headers
+ * Placeholder definition. WDDM doesn't use Services, but some headers
* still have to be shared. This is one such case.
*/
typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T;
{
uint32_t ui32log2 = 0;
- while ((n >>= 1) != 0U)
+ for (n >>= 1; n != 0U; n >>= 1)
{
ui32log2++;
}
{
uint32_t ui32log2 = 0;
- while ((n >>= 1) != 0U)
+ for (n >>= 1; n != 0U; n >>= 1)
{
ui32log2++;
}
{
static const uint32_t b[] =
{0xAAAAAAAAU, 0xCCCCCCCCU, 0xF0F0F0F0U, 0xFF00FF00U, 0xFFFF0000U};
- uint32_t r = (n & b[0]) != 0U;
+ uint32_t r = (((n & b[0]) != 0U) ? 1U : 0U);
- r |= (uint32_t) ((n & b[4]) != 0U) << 4;
- r |= (uint32_t) ((n & b[3]) != 0U) << 3;
- r |= (uint32_t) ((n & b[2]) != 0U) << 2;
- r |= (uint32_t) ((n & b[1]) != 0U) << 1;
+ r |= (uint32_t) (((n & b[4]) != 0U) ? (1U << 4) : 0U);
+ r |= (uint32_t) (((n & b[3]) != 0U) ? (1U << 3) : 0U);
+ r |= (uint32_t) (((n & b[2]) != 0U) ? (1U << 2) : 0U);
+ r |= (uint32_t) (((n & b[1]) != 0U) ? (1U << 1) : 0U);
return r;
}
static INLINE uint32_t __const_function ExactLog2_64(uint64_t n)
{
static const uint64_t b[] =
- {0xAAAAAAAAAAAAAAAAULL, 0xCCCCCCCCCCCCCCCCULL,
- 0xF0F0F0F0F0F0F0F0ULL, 0xFF00FF00FF00FF00ULL,
- 0xFFFF0000FFFF0000ULL, 0xFFFFFFFF00000000ULL};
- uint32_t r = (n & b[0]) != 0U;
-
- r |= (uint32_t) ((n & b[5]) != 0U) << 5;
- r |= (uint32_t) ((n & b[4]) != 0U) << 4;
- r |= (uint32_t) ((n & b[3]) != 0U) << 3;
- r |= (uint32_t) ((n & b[2]) != 0U) << 2;
- r |= (uint32_t) ((n & b[1]) != 0U) << 1;
+ {0xAAAAAAAAAAAAAAAAUL, 0xCCCCCCCCCCCCCCCCUL,
+ 0xF0F0F0F0F0F0F0F0UL, 0xFF00FF00FF00FF00UL,
+ 0xFFFF0000FFFF0000UL, 0xFFFFFFFF00000000UL};
+ uint32_t r = (((n & b[0]) != 0U) ? 1U : 0U);
+
+ r |= (uint32_t) (((n & b[5]) != 0U) ? (1U << 5) : 0U);
+ r |= (uint32_t) (((n & b[4]) != 0U) ? (1U << 4) : 0U);
+ r |= (uint32_t) (((n & b[3]) != 0U) ? (1U << 3) : 0U);
+ r |= (uint32_t) (((n & b[2]) != 0U) ? (1U << 2) : 0U);
+ r |= (uint32_t) (((n & b[1]) != 0U) ? (1U << 1) : 0U);
return r;
}
*/ /**************************************************************************/
#define OSDeviceMemSet(a,b,c) \
do { \
- if ((c) != 0) \
+ if ((c) != 0U) \
{ \
(void) memset((a), (b), (c)); \
(void) *(volatile IMG_UINT32*)((void*)(a)); \
*/ /**************************************************************************/
#define OSDeviceMemCopy(a,b,c) \
do { \
- if ((c) != 0) \
+ if ((c) != 0U) \
{ \
memcpy((a), (b), (c)); \
(void) *(volatile IMG_UINT32*)((void*)(a)); \
@Input c the number of bytes to be set to the given value
@Return Pointer to the destination memory.
*/ /**************************************************************************/
-#if !defined(SERVICES_SC)
#define OSCachedMemSetWMB(a,b,c) \
do { \
- if ((c) != 0) \
+ if ((c) != 0U) \
{ \
(void) memset((a), (b), (c)); \
OSWriteMemoryBarrier(a); \
} \
} while (false)
-#else
-#define OSCachedMemSetWMB(a,b,c) \
- do { \
- (void) memset((a), (b), (c)); \
- OSWriteMemoryBarrier(); \
- } while (false)
-#endif /* !defined(SERVICES_SC) */
/**************************************************************************/ /*!
@Function OSCachedMemCopy
@Description Copy values from one area of memory, to another, when both
@Input c the number of bytes to be copied
@Return Pointer to the destination memory.
*/ /**************************************************************************/
-#if !defined(SERVICES_SC)
#define OSCachedMemCopyWMB(a,b,c) \
do { \
- if ((c) != 0) \
+ if ((c) != 0U) \
{ \
(void) memcpy((a), (b), (c)); \
OSWriteMemoryBarrier(a); \
} \
} while (false)
-#else
-#define OSCachedMemCopyWMB(a,b,c) \
- do { \
- (void) memcpy((a), (b), (c)); \
- OSWriteMemoryBarrier(); \
- } while (false)
-#endif /* !defined(SERVICES_SC) */
#endif /* defined(__KERNEL__) */
/**************************************************************************/ /*!
#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT 8
#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE (1U << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_CONTROL_SHIFT 11
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_CONTROL_MASK 0x00000800
+
#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT 12
#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK 0x000FF000
#define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT 24
#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT 25
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT 26
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_MASK 0x0C000000
+
#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT 28
#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK 0xF0000000
#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED (11U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_ZTWIDDLED (12U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_25_50_75 (0U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_CONTROL_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_25_37_50 (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_GROUP_CONTROL_SHIFT)
+
#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE (0U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT (1U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT (2U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE (6U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE (7U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_NONE (0U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_75 (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_37 (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_50 (2U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_25 (3U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_TFBC_MODE_SHIFT)
+
#define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR (1U << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT)
#define PVRSRV_PDUMP_ADDRMODE_FBC_LOSSY (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT)
/*!
PDump MMU type
- (Maps to values listed in "PowerVR Tools.Pdump2 Script Functions.doc" Sec 2.13)
*/
typedef enum
{
#define PDUMP_STATE_CONNECTED (2U) /*!< Flag represents the PDump Client App being connected on not */
#define PDUMP_STATE_SUSPENDED (4U) /*!< Flag represents the PDump being suspended or not */
#define PDUMP_STATE_CAPTURE_IN_INTERVAL (8U) /*!< Flag represents the PDump being in a capture range interval */
+#define PDUMP_STATE_APP_TERMINATED (16U) /*!< Flag represents the PDump captured app has been terminated */
/*!
PDump Capture modes
*/
/*
- * Header type (IMGBv2) - 'IMGB' in hex + VERSION 2
+ * Header type (IMGBv3) - 'IMGB' in hex + VERSION 3
* Header size - 64 bytes
*/
#define IMAGE_HEADER_TYPE (0x42474D49)
#define IMAGE_HEADER_SIZE (64)
-#define IMAGE_HEADER_VERSION (2)
+#define IMAGE_HEADER_VERSION (3)
/*
* Image type-specific fields
#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_16x4_V13 fourcc_mod_code(PVR, 30)
#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_16x4_V13 fourcc_mod_code(PVR, 31)
+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY37_8x8_V13 fourcc_mod_code(PVR, 32)
+#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY37_16x4_V13 fourcc_mod_code(PVR, 33)
+
#endif /* IMG_DRM_FOURCC_H */
#define PVR_MAX_DEBUG_MESSAGE_LEN (512) /*!< Max length of a Debug Message */
/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */
-#define DBGPRIV_FATAL 0x001UL /*!< Debug-Fatal. Privately used by pvr_debug. */
-#define DBGPRIV_ERROR 0x002UL /*!< Debug-Error. Privately used by pvr_debug. */
-#define DBGPRIV_WARNING 0x004UL /*!< Debug-Warning. Privately used by pvr_debug. */
-#define DBGPRIV_MESSAGE 0x008UL /*!< Debug-Message. Privately used by pvr_debug. */
-#define DBGPRIV_VERBOSE 0x010UL /*!< Debug-Verbose. Privately used by pvr_debug. */
-#define DBGPRIV_CALLTRACE 0x020UL /*!< Debug-CallTrace. Privately used by pvr_debug. */
-#define DBGPRIV_ALLOC 0x040UL /*!< Debug-Alloc. Privately used by pvr_debug. */
-#define DBGPRIV_BUFFERED 0x080UL /*!< Debug-Buffered. Privately used by pvr_debug. */
-#define DBGPRIV_DEBUG 0x100UL /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */
-#define DBGPRIV_LAST 0x100UL /*!< Always set to highest mask value. Privately used by pvr_debug. */
+#define DBGPRIV_FATAL 0x001U /*!< Debug-Fatal. Privately used by pvr_debug. */
+#define DBGPRIV_ERROR 0x002U /*!< Debug-Error. Privately used by pvr_debug. */
+#define DBGPRIV_WARNING 0x004U /*!< Debug-Warning. Privately used by pvr_debug. */
+#define DBGPRIV_MESSAGE 0x008U /*!< Debug-Message. Privately used by pvr_debug. */
+#define DBGPRIV_VERBOSE 0x010U /*!< Debug-Verbose. Privately used by pvr_debug. */
+#define DBGPRIV_CALLTRACE 0x020U /*!< Debug-CallTrace. Privately used by pvr_debug. */
+#define DBGPRIV_ALLOC 0x040U /*!< Debug-Alloc. Privately used by pvr_debug. */
+#define DBGPRIV_BUFFERED 0x080U /*!< Debug-Buffered. Privately used by pvr_debug. */
+#define DBGPRIV_DEBUG 0x100U /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */
+#define DBGPRIV_LAST 0x100U /*!< Always set to highest mask value. Privately used by pvr_debug. */
/* Enable DPF logging for locally from some make targets */
#if defined(PVRSRV_NEED_PVR_DPF_LOCAL)
/* These levels are always on with PVRSRV_NEED_PVR_DPF */
/*! @cond Doxygen_Suppress */
- #define PVR_DPF_0x001UL(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__)
- #define PVR_DPF_0x002UL(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__)
- #define PVR_DPF_0x080UL(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__)
+ #define PVR_DPF_0x001U(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__)
+ #define PVR_DPF_0x002U(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__)
+ #define PVR_DPF_0x080U(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__)
/*
* The AdHoc-Debug level is only supported when enabled in the local
* builds. An error is generated in the formal build if it is checked in.
*/
#if defined(PVR_DPF_ADHOC_DEBUG_ON)
- #define PVR_DPF_0x100UL(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__)
+ #define PVR_DPF_0x100U(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__)
#else
/* Use an undefined token here to stop compilation dead in the offending module */
- #define PVR_DPF_0x100UL(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing
+ #define PVR_DPF_0x100U(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing
#endif
/* Some are compiled out completely in release builds */
#if defined(DEBUG) || defined(DOXYGEN)
- #define PVR_DPF_0x004UL(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__)
- #define PVR_DPF_0x008UL(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__)
- #define PVR_DPF_0x010UL(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__)
- #define PVR_DPF_0x020UL(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__)
- #define PVR_DPF_0x040UL(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__)
+ #define PVR_DPF_0x004U(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__)
+ #define PVR_DPF_0x008U(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__)
+ #define PVR_DPF_0x010U(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__)
+ #define PVR_DPF_0x020U(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__)
+ #define PVR_DPF_0x040U(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__)
#else
- #define PVR_DPF_0x004UL(...)
- #define PVR_DPF_0x008UL(...)
- #define PVR_DPF_0x010UL(...)
- #define PVR_DPF_0x020UL(...)
- #define PVR_DPF_0x040UL(...)
+ #define PVR_DPF_0x004U(...)
+ #define PVR_DPF_0x008U(...)
+ #define PVR_DPF_0x010U(...)
+ #define PVR_DPF_0x020U(...)
+ #define PVR_DPF_0x040U(...)
#endif
/* Translate the different log levels to separate macros
#if !defined(DOXYGEN)
#define PVR_DPF_FUNC__(lvl, message, ...) PVR_DPF((lvl, "%s: " message, __func__, ##__VA_ARGS__))
#define PVR_DPF_FUNC(x) PVR_DPF_FUNC__ x
-#endif /*!defined(DOXYGEN) */
+#endif
/* Note: Use only when a log message due to the error absolutely should not
* be printed. Otherwise use PVR_LOG_RETURN_IF_ERROR macro.
#endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
/*! @endcond */
-#if defined(__KERNEL__) || defined(DOXYGEN) || defined(__QNXNTO__)
+#if (defined(__KERNEL__) || defined(SUPPORT_SERVICES_SC_UNITTESTS_SERVER))|| defined(DOXYGEN) || defined(__QNXNTO__)
/*Use PVR_DPF() unless message is necessary in release build */
#define PVR_LOG(X) PVRSRVReleasePrintf X
#include "img_types.h"
-#define PVRSRV_MAX_DEVICES 16U /*!< Largest supported number of devices on the system */
+#define PVRSRV_MAX_DEVICES 16U /*!< Largest supported number of devices on the system */
+#define PVRSRV_HOST_DEVICE_ID 255U /*!< Device ID used for host (non-GPU) device. */
+
+static_assert(PVRSRV_MAX_DEVICES < PVRSRV_HOST_DEVICE_ID, "Invalid host device ID.");
#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__)
#define __pvrsrv_defined_struct_enum__
} PVRSRV_ERROR;
+/*!
+ * @Function PVRSRVIsRetryError
+ * @Description Checks if error code is one of the errors that require retry
+ * from the caller.
+ * @Input eError Error code.
+ * @Return IMG_TRUE if eError is one of the error codes that require the caller
+ * to retry.
+ */
+#define PVRSRVIsRetryError(eError) \
+ (((eError == PVRSRV_ERROR_RETRY) || (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)) ? \
+ IMG_TRUE : IMG_FALSE)
+
#endif /* !defined(PVRSRV_ERROR_H) */
PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED)
PVRE(PVRSRV_ERROR_PMR_PAGE_POISONING_FAILED)
PVRE(PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY)
+PVRE(PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK)
+PVRE(PVRSRV_ERROR_PMR_TOO_LARGE)
PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP)
PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE)
PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION)
PVRE(PVRSRV_ERROR_INVALID_CONTEXT)
PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT)
PVRE(PVRSRV_ERROR_INVALID_HEAP)
+PVRE(PVRSRV_ERROR_INVALID_NON4K_HEAP_PAGESIZE)
PVRE(PVRSRV_ERROR_INVALID_KERNELINFO)
PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE)
PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE)
PVRE(PVRSRV_ERROR_CONVERSION_FAILED)
PVRE(PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL)
PVRE(PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL)
+PVRE(PVRSRV_ERROR_RA_OUT_OF_RESOURCE)
+PVRE(PVRSRV_ERROR_RA_NO_RESOURCE_WITH_FLAGS)
PVRE(PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED)
PVRE(PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED)
+PVRE(PVRSRV_ERROR_RA_FREE_INVALID_CHUNK)
PVRE(PVRSRV_ERROR_OBJECT_STILL_REFERENCED)
PVRE(PVRSRV_ERROR_BVNC_UNSUPPORTED)
PVRE(PVRSRV_ERROR_INVALID_BVNC_PARAMS)
PVRE(PVRSRV_ERROR_ION_NO_CLIENT)
PVRE(PVRSRV_ERROR_ION_FAILED_TO_ALLOC)
PVRE(PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE)
-PVRE(PVRSRV_ERROR_REFCOUNT_OVERFLOW)
PVRE(PVRSRV_ERROR_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_OUT_OF_APP_POOL_MEMORY)
+PVRE(PVRSRV_ERROR_REFCOUNT_OVERFLOW)
+PVRE(PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY)
*
* NOTE: Enum order important, table in physheap.c must change if order changed.
*/
-typedef IMG_UINT32 PVRSRV_PHYS_HEAP;
-/* Services client accessible heaps */
-#define PVRSRV_PHYS_HEAP_DEFAULT 0U /* default phys heap for device memory allocations */
-#define PVRSRV_PHYS_HEAP_GPU_LOCAL 1U /* used for buffers with more GPU access than CPU */
-#define PVRSRV_PHYS_HEAP_CPU_LOCAL 2U /* used for buffers with more CPU access than GPU */
-#define PVRSRV_PHYS_HEAP_GPU_PRIVATE 3U /* used for buffers that only required GPU read/write access, not visible to the CPU. */
-
-#define HEAPSTR(x) #x
-static inline const IMG_CHAR *PVRSRVGetClientPhysHeapName(PVRSRV_PHYS_HEAP ePhysHeapID)
+#define PHYS_HEAP_LIST \
+ X(DEFAULT) /* Client: default phys heap for device memory allocations */ \
+ X(GPU_LOCAL) /* Client: used for buffers with more GPU access than CPU */ \
+ X(CPU_LOCAL) /* Client: used for buffers with more CPU access than GPU */ \
+ X(GPU_PRIVATE) /* Client: used for buffers that only required GPU read/write access, not visible to the CPU. */ \
+ X(FW_MAIN) /* Internal: runtime data, e.g. CCBs, sync objects */ \
+ X(EXTERNAL) /* Internal: used by some PMR import/export factories where the physical memory heap is not managed by the pvrsrv driver */ \
+ X(GPU_COHERENT) /* Internal: used for a cache coherent region */ \
+ X(GPU_SECURE) /* Internal: used by security validation */ \
+ X(FW_CONFIG) /* Internal: subheap of FW_MAIN, configuration data for FW init */ \
+ X(FW_CODE) /* Internal: used by security validation or dedicated fw */ \
+ X(FW_PRIV_DATA) /* Internal: internal FW data (like the stack, FW control data structures, etc.) */ \
+ X(FW_PREMAP_PT) /* Internal: page tables for premapped firmware memory */ \
+ X(FW_PREMAP0) /* Internal: Host OS premap fw heap */ \
+ X(FW_PREMAP1) /* Internal: Guest OS 1 premap fw heap */ \
+ X(FW_PREMAP2) /* Internal: Guest OS 2 premap fw heap */ \
+ X(FW_PREMAP3) /* Internal: Guest OS 3 premap fw heap */ \
+ X(FW_PREMAP4) /* Internal: Guest OS 4 premap fw heap */ \
+ X(FW_PREMAP5) /* Internal: Guest OS 5 premap fw heap */ \
+ X(FW_PREMAP6) /* Internal: Guest OS 6 premap fw heap */ \
+ X(FW_PREMAP7) /* Internal: Guest OS 7 premap fw heap */ \
+ X(WRAP) /* External: Wrap memory */ \
+ X(DISPLAY) /* External: Display memory */ \
+ X(LAST)
+
+typedef enum _PVRSRV_PHYS_HEAP_
{
- switch (ePhysHeapID)
- {
- case PVRSRV_PHYS_HEAP_DEFAULT:
- return HEAPSTR(PVRSRV_PHYS_HEAP_DEFAULT);
- case PVRSRV_PHYS_HEAP_GPU_LOCAL:
- return HEAPSTR(PVRSRV_PHYS_HEAP_GPU_LOCAL);
- case PVRSRV_PHYS_HEAP_CPU_LOCAL:
- return HEAPSTR(PVRSRV_PHYS_HEAP_CPU_LOCAL);
- case PVRSRV_PHYS_HEAP_GPU_PRIVATE:
- return HEAPSTR(PVRSRV_PHYS_HEAP_GPU_PRIVATE);
- default:
- return "Unknown Heap";
- }
-}
+#define X(_name) PVRSRV_PHYS_HEAP_ ## _name,
+ PHYS_HEAP_LIST
+#undef X
-/* Services internal heaps */
-#define PVRSRV_PHYS_HEAP_FW_MAIN 4U /* runtime data, e.g. CCBs, sync objects */
-#define PVRSRV_PHYS_HEAP_EXTERNAL 5U /* used by some PMR import/export factories where the physical memory heap is not managed by the pvrsrv driver */
-#define PVRSRV_PHYS_HEAP_GPU_COHERENT 6U /* used for a cache coherent region */
-#define PVRSRV_PHYS_HEAP_GPU_SECURE 7U /* used by security validation */
-#define PVRSRV_PHYS_HEAP_FW_CONFIG 8U /* subheap of FW_MAIN, configuration data for FW init */
-#define PVRSRV_PHYS_HEAP_FW_CODE 9U /* used by security validation or dedicated fw */
-#define PVRSRV_PHYS_HEAP_FW_PRIV_DATA 10U /* internal FW data (like the stack, FW control data structures, etc.) */
-#define PVRSRV_PHYS_HEAP_FW_PREMAP0 11U /* Host OS premap fw heap */
-#define PVRSRV_PHYS_HEAP_FW_PREMAP1 12U /* Guest OS 1 premap fw heap */
-#define PVRSRV_PHYS_HEAP_FW_PREMAP2 13U /* Guest OS 2 premap fw heap */
-#define PVRSRV_PHYS_HEAP_FW_PREMAP3 14U /* Guest OS 3 premap fw heap */
-#define PVRSRV_PHYS_HEAP_FW_PREMAP4 15U /* Guest OS 4 premap fw heap */
-#define PVRSRV_PHYS_HEAP_FW_PREMAP5 16U /* Guest OS 5 premap fw heap */
-#define PVRSRV_PHYS_HEAP_FW_PREMAP6 17U /* Guest OS 6 premap fw heap */
-#define PVRSRV_PHYS_HEAP_FW_PREMAP7 18U /* Guest OS 7 premap fw heap */
-#define PVRSRV_PHYS_HEAP_LAST 19U
+ PVRSRV_PHYS_HEAP_INVALID = 0x7FFFFFFF
+} PVRSRV_PHYS_HEAP;
+/* Defines the number of user mode physheaps. These physheaps are: DEFAULT, GPU_LOCAL,
+ * CPU_LOCAL, GPU_PRIVATE, GPU_SECURE. */
+#define MAX_USER_MODE_ALLOC_PHYS_HEAPS 5
static_assert(PVRSRV_PHYS_HEAP_LAST <= (0x1FU + 1U), "Ensure enum fits in memalloc flags bitfield.");
PHYS_HEAP_TYPE_DMA, /*!< Heap represents a physical memory pool managed by
Services, alias of LMA and is only used on
VZ non-native system configurations for
- a heap used for PHYS_HEAP_USAGE_FW_MAIN tagged
- buffers */
+ a heap used for allocations tagged with
+ PVRSRV_PHYS_HEAP_FW_MAIN or
+ PVRSRV_PHYS_HEAP_FW_CONFIG */
#if defined(SUPPORT_WRAP_EXTMEMOBJECT)
PHYS_HEAP_TYPE_WRAP, /*!< Heap used to group UM buffers given
to Services. Integrity OS port only. */
#define PVRSRV_PHYS_HEAP_FLAGS_TYPE_MASK (0x7U << 0)
#define PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT (0x1U << 7)
+/* Force PHYS_HEAP_MEM_STATS size to be a multiple of 8 bytes
+ * (as type is a parameter in bridge calls)
+ */
typedef struct PHYS_HEAP_MEM_STATS_TAG
{
IMG_UINT64 ui64TotalSize;
IMG_UINT64 ui64FreeSize;
IMG_UINT32 ui32PhysHeapFlags;
+ IMG_UINT32 ui32UnusedPadding;
}PHYS_HEAP_MEM_STATS, *PHYS_HEAP_MEM_STATS_PTR;
-typedef struct PHYS_HEAP_MEM_STATS_PKD_TAG
-{
- IMG_UINT64 ui64TotalSize;
- IMG_UINT64 ui64FreeSize;
- IMG_UINT32 ui32PhysHeapFlags;
- IMG_UINT32 ui32Dummy;
-}PHYS_HEAP_MEM_STATS_PKD, *PHYS_HEAP_MEM_STATS_PKD_PTR;
+#if defined(PHYSHEAP_STRINGS)
+
+static const char *const _pszPhysHeapStrings[] = {
+#define X(_name) #_name,
+ PHYS_HEAP_LIST
+#undef X
+};
+
+/*************************************************************************/ /*!
+@Function PVRSRVGetClientPhysHeapTypeName
+@Description Returns the phys heap type as a string.
+
+@Input ePhysHeapType The physheap type.
+@Return const IMG_CHAR pointer.
+*/ /**************************************************************************/
static inline const IMG_CHAR *PVRSRVGetClientPhysHeapTypeName(PHYS_HEAP_TYPE ePhysHeapType)
{
+#define HEAPSTR(x) #x
switch (ePhysHeapType)
{
case PHYS_HEAP_TYPE_UMA:
default:
return "Unknown Heap Type";
}
-}
#undef HEAPSTR
+}
+
+/*************************************************************************/ /*!
+@Function PVRSRVGetPhysHeapName
+@Description Returns the name of a PhysHeap.
+
+@Input ePhysHeap The enum value of the physheap.
+
+@Return const IMG_CHAR pointer.
+*/ /**************************************************************************/
+static inline const IMG_CHAR *PVRSRVGetPhysHeapName(PVRSRV_PHYS_HEAP ePhysHeap)
+{
+ if (ePhysHeap < 0 || ePhysHeap >= PVRSRV_PHYS_HEAP_LAST)
+ {
+ return "Undefined";
+ }
+
+ return _pszPhysHeapStrings[ePhysHeap];
+}
+
+/*************************************************************************/ /*!
+@Function PVRSRVGetClientPhysHeapName
+@Description Returns the name of a client PhysHeap.
+
+@Input ePhysHeap The enum value of the physheap.
+
+@Return const IMG_CHAR pointer.
+*/ /**************************************************************************/
+static inline const IMG_CHAR *PVRSRVGetClientPhysHeapName(PVRSRV_PHYS_HEAP ePhysHeap)
+{
+ if (ePhysHeap > PVRSRV_PHYS_HEAP_GPU_PRIVATE)
+ {
+ return "Unknown Heap";
+ }
+
+ return PVRSRVGetPhysHeapName(ePhysHeap);
+}
+#endif /* PHYSHEAP_STRINGS */
#endif /* PVRSRV_MEMALLOC_PHYSHEAP_H */
* | GPU-RW | CPU-RW | GPU-Caching | CPU-Caching | KM-Mappable |
*
* --- MISC FLAGS 15..23 (9-bits) ---
- * | 15 | 17 | 18 | 19 | 20 |
- * | Defer | SVM | Sparse-Dummy-Page | CPU-Cache-Clean | Sparse-Zero-Page |
+ * | 15 | 16 | 17 | 18 | 19 | 20 |
+ * | Defer | Alloc-Now | SVM | Dummy-Pg | CPU-Cache-Clean | Zero-Pg |
*
* --- DEV CONTROL FLAGS 26..27 (2-bits) ---
- * | 26-27 |
- * | Device-Flags |
+ * | 21-25 | 26-27 |
+ * | ..... | Device-Flags |
*
* --- MISC FLAGS 28..31 (4-bits) ---
* | 28 | 29 | 30 | 31 |
* This distinction becomes important when (a) we export allocations;
* and (b) when we separate the creation of the PMR from the mapping.
*/
-#define PVRSRV_MEMALLOCFLAG_GPU_READABLE (1ULL<<0)
+#define PVRSRV_MEMALLOCFLAG_GPU_READABLE (IMG_UINT64_C(1)<<0)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READABLE flag is set.
* N.B. This flag has no relevance to the CPU's MMU mapping, if any,
* and would therefore not enforce read-only mapping on CPU.
*/
-#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE (1ULL<<1)
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE (IMG_UINT64_C(1)<<1)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE flag is set.
/*!
The flag indicates whether an allocation can be mapped as GPU readable in another GPU memory context.
*/
-#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED (1ULL<<2)
+#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED (IMG_UINT64_C(1)<<2)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED flag is set.
/*!
The flag indicates whether an allocation can be mapped as GPU writable in another GPU memory context.
*/
-#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (1ULL<<3)
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (IMG_UINT64_C(1)<<3)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED flag is set.
/*!
The flag indicates that an allocation is mapped as readable to the CPU.
*/
-#define PVRSRV_MEMALLOCFLAG_CPU_READABLE (1ULL<<4)
+#define PVRSRV_MEMALLOCFLAG_CPU_READABLE (IMG_UINT64_C(1)<<4)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READABLE flag is set.
/*!
The flag indicates that an allocation is mapped as writable to the CPU.
*/
-#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE (1ULL<<5)
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE (IMG_UINT64_C(1)<<5)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE flag is set.
/*!
The flag indicates whether an allocation can be mapped as CPU readable in another CPU memory context.
*/
-#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED (1ULL<<6)
+#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED (IMG_UINT64_C(1)<<6)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED flag is set.
/*!
The flag indicates whether an allocation can be mapped as CPU writable in another CPU memory context.
*/
-#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (1ULL<<7)
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (IMG_UINT64_C(1)<<7)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED flag is set.
allocated with this flag are written straight to memory and thus are
coherent for any device in the system.
*/
-#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED (1ULL<<8)
+#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED (IMG_UINT64_C(1)<<8)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED mode is set.
GPU domain. Use write combiner (if supported) to combine sequential writes
together to reduce memory access by doing burst writes.
*/
-#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC (0ULL<<8)
+#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC (IMG_UINT64_C(0)<<8)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC mode is set.
GPU cache is snooping the CPU cache. If coherency is not supported the
caller is responsible to ensure the caches are up to date.
*/
-#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT (2ULL<<8)
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT (IMG_UINT64_C(2)<<8)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT mode is set.
Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
expansion.
*/
-#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT (3ULL<<8)
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT (IMG_UINT64_C(3)<<8)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT mode is set.
that the underlying allocation should be cached on the GPU after all
the snooping and coherent checks have been done
*/
-#define PVRSRV_MEMALLOCFLAG_GPU_CACHED (7ULL<<8)
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHED (IMG_UINT64_C(7)<<8)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHED mode is set.
/*!
GPU domain. GPU cache mode mask.
*/
-#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK (7ULL<<8)
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK (IMG_UINT64_C(7)<<8)
/*!
@Description A helper macro to obtain just the GPU cache bit field from the flags.
CPU domain. Use write combiner (if supported) to combine sequential writes
together to reduce memory access by doing burst writes.
*/
-#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC (0ULL<<11)
+#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC (IMG_UINT64_C(0)<<11)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC mode is set.
CPU cache is snooping the GPU cache. If coherency is not supported the
caller is responsible to ensure the caches are up to date.
*/
-#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT (2ULL<<11)
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT (IMG_UINT64_C(2)<<11)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT mode is set.
Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
expansion.
*/
-#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT (3ULL<<11)
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT (IMG_UINT64_C(3)<<11)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT mode is set.
that the underlying allocation should be cached on the CPU
after all the snooping and coherent checks have been done
*/
-#define PVRSRV_MEMALLOCFLAG_CPU_CACHED (7ULL<<11)
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHED (IMG_UINT64_C(7)<<11)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHED mode is set.
/*!
CPU domain. CPU cache mode mask
*/
-#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK (7ULL<<11)
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK (IMG_UINT64_C(7)<<11)
/*!
@Description A helper macro to obtain just the CPU cache bit field from the flags.
in-kernel CPU mappings. Only privileged processes may use this flag as
it may cause wastage of precious kernel virtual memory on some platforms.
*/
-#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE (1ULL<<14)
+#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE (IMG_UINT64_C(1)<<14)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE flag is set.
* * ALLOC MEMORY FLAGS *
* * *
* **********************************************************
- *
- * (Bits 15)
- *
*/
-#define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC (1ULL<<15)
-#define PVRSRV_CHECK_ON_DEMAND(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0U)
+/*! ----- Bit 15
+
+ Indicates when the allocation of physical memory pages backing the PMR
+ is carried out. When set, pages are not allocated at PMR creation but are
+ instead deferred until they are first needed, i.e. "on demand".
+ When unset, the pages may be allocated at the same time the PMR is created
+ or deferred (at the KM/Server's discretion).
+ See also PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW (below). Note that at most one
+ of these two flags may be set.
+ */
+#define PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC (IMG_UINT64_C(1)<<15)
/*!
+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC flag is set.
+ @Input uiFlags Allocation flags.
+ @Return True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_ON_DEMAND(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC) != 0U)
+
+/*! ----- Bit 16
+
+ Indicates when the allocation of physical memory pages backing the PMR
+ is carried out. When set, pages are allocated at PMR creation.
+ When unset, the pages may be allocated at the same time the PMR is created
+ or deferred (at the KM/Server's discretion).
+ See also PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC (above). Note that at most one
+ of these two flags may be set.
+ */
+#define PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW (IMG_UINT64_C(1)<<16)
+/*!
+ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW flag is set.
+ @Input uiFlags Allocation flags.
+ @Return True if the flag is set, false otherwise
+ */
+#define PVRSRV_CHECK_PHYS_ALLOC_NOW(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW) != 0U)
+
+/*! ----- Bit 17
+
Indicates that the allocation will be accessed by the CPU and GPU using
the same virtual address, i.e. for all SVM allocs,
IMG_CPU_VIRTADDR == IMG_DEV_VIRTADDR
*/
-#define PVRSRV_MEMALLOCFLAG_SVM_ALLOC (1ULL<<17)
+#define PVRSRV_MEMALLOCFLAG_SVM_ALLOC (IMG_UINT64_C(1)<<17)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SVM_ALLOC flag is set.
*/
#define PVRSRV_CHECK_SVM_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SVM_ALLOC) != 0U)
-/*!
+/*! ----- Bit 18
+
Indicates the particular memory that's being allocated is sparse and the
sparse regions should not be backed by dummy page
*/
-#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING (1ULL << 18)
+#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING (IMG_UINT64_C(1) << 18)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING flag is set.
*/
#define PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) == 0U)
-/*!
+/*! ----- Bit 19
+
Used to force Services to carry out at least one CPU cache invalidate on a
CPU cached buffer during allocation of the memory. Applicable to incoherent
systems, it must be used for buffers which are CPU cached and which will not
reasons, avoid usage if the whole buffer that is allocated is written to by
the CPU anyway before the next GPU kick, or if the system is coherent.
*/
-#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN (1ULL<<19)
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN (IMG_UINT64_C(1)<<19)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN flag is set.
*/
#define PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN) != 0U)
-/*! PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING
+/*! ----- Bit 20
Indicates the particular memory that's being allocated is sparse and the
sparse regions should be backed by zero page. This is different with
The zero backed page is always with read only attribute irrespective of its
original attributes.
*/
-#define PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING (1ULL << 20)
+#define PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING (IMG_UINT64_C(1) << 20)
#define PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiFlags) (((uiFlags) & \
PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) == PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING)
-/*!
- @Description Macro extracting the OS id from a variable containing memalloc flags
- @Input uiFlags Allocation flags
- @Return returns the value of the FW_ALLOC_OSID bitfield
+/*! ----- Bit 21
+ *
+ Not used.
*/
-#define PVRSRV_FW_RAW_ALLOC_OSID(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \
- >> PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT)
-/*!
- @Description Macro converting an OS id value into a memalloc bitfield
- @Input uiFlags OS id
- @Return returns a shifted bitfield with the OS id value
+ /*! ----- Bit 22
+ *
+ Not used.
+ */
+
+ /*! ----- Bit 23
+ *
+ Not used.
+ */
+
+ /*! ----- Bit 24
+ *
+ Not used.
+ */
+
+ /*! ----- Bit 25
+ *
+ Not used.
*/
-#define PVRSRV_MEMALLOCFLAG_FW_RAW_ALLOC_OSID(osid) (((osid) << PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT) \
- & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \
/*
*
/*!
Ensures that the memory allocated is initialised with zeroes.
*/
-#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC (1ULL<<31)
+#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC (IMG_UINT64_C(1)<<31)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC flag is set.
a simulation to cry loudly if the initialised data propagates to a
result.
*/
-#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC (1ULL<<30)
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC (IMG_UINT64_C(1)<<30)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC flag is set.
*/
#define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0U)
-#if defined(DEBUG) || defined(SERVICES_SC)
+#if defined(DEBUG)
/*!
Causes memory to be trashed when freed, used when debugging only, not to be used
as a security measure.
*/
-#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (1ULL<<29)
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (IMG_UINT64_C(1)<<29)
/*!
@Description Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_FREE flag is set.
/*!
Avoid address alignment to a CPU or GPU cache line size.
*/
-#define PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN (1ULL<<28)
+#define PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN (IMG_UINT64_C(1)<<28)
/*!
@Description Macro checking whether the PVRSRV_CHECK_NO_CACHE_LINE_ALIGN flag is set.
/*!
* Mask for retrieving device specific MMU flags.
*/
-#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK (0x3ULL << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET)
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK (IMG_UINT64_C(3) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET)
/*!
@Description Helper macro for setting device specific MMU flags.
PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER
*/
-#define PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER (1ULL<<35)
+#define PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER (IMG_UINT64_C(1)<<35)
#define PVRSRV_CHECK_SHARED_BUFFER(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER) != 0U)
/*
* i.e. PVRSRV_PHYS_HEAP_DEFAULT (value 0) used, the system layer defined default physical heap is used.
*/
#define PVRSRV_PHYS_HEAP_HINT_SHIFT (59)
-#define PVRSRV_PHYS_HEAP_HINT_MASK (0x1FULL << PVRSRV_PHYS_HEAP_HINT_SHIFT)
+#define PVRSRV_PHYS_HEAP_HINT_MASK (IMG_UINT64_C(0x1F) << PVRSRV_PHYS_HEAP_HINT_SHIFT)
/*!
@Input uiFlags Allocation flags
@Return returns the value of the PHYS_HEAP_HINT bitfield
*/
-#define PVRSRV_GET_PHYS_HEAP_HINT(uiFlags) (((uiFlags) & PVRSRV_PHYS_HEAP_HINT_MASK) \
- >> PVRSRV_PHYS_HEAP_HINT_SHIFT)
+#define PVRSRV_GET_PHYS_HEAP_HINT(uiFlags) ((PVRSRV_PHYS_HEAP)(((uiFlags) & PVRSRV_PHYS_HEAP_HINT_MASK) \
+ >> PVRSRV_PHYS_HEAP_HINT_SHIFT))
/*!
@Description Macro converting a Phys Heap value into a memalloc bitfield
* Trusted device mask -- Flags in the mask are allowed for trusted device
* because the driver cannot access the memory
*/
-#if defined(DEBUG) || defined(SERVICES_SC)
+#if defined(DEBUG)
#define PVRSRV_MEMALLOCFLAGS_TDFWMASK ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
that will be passed down and stored with the PMR, this also includes the
MMU flags which the PMR has to pass down to mm_common.c at PMRMap time.
*/
-#if defined(DEBUG) || defined(SERVICES_SC)
+#if defined(DEBUG)
#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
- PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW | \
PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \
PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \
PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \
PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
- PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW | \
PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \
PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \
PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \
memory might be reused.
*/
-#if defined(DEBUG) || defined(SERVICES_SC)
+#if defined(DEBUG)
#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \
& \
~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
#define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
- PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC | \
PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \
PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING)
/*!
Flags that affect _physical allocations_ in the DevMemX API
*/
-#if defined(DEBUG) || defined(SERVICES_SC)
+#if defined(DEBUG)
#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \
PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \
PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
+ PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW | \
PVRSRV_PHYS_HEAP_HINT_MASK)
#else
#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW | \
PVRSRV_PHYS_HEAP_HINT_MASK)
#endif
/*! Packet lengths are always rounded up to a multiple of 8 bytes */
#define PVRSRVTL_PACKET_ALIGNMENT 8U
-#define PVRSRVTL_ALIGN(x) (((x)+PVRSRVTL_PACKET_ALIGNMENT-1U) & ~(PVRSRVTL_PACKET_ALIGNMENT-1U))
+#define PVRSRVTL_ALIGN(x) PVR_ALIGN(x, PVRSRVTL_PACKET_ALIGNMENT)
/*! A packet is made up of a header structure followed by the data bytes.
/*! Check if packets were dropped before this packet.
* p is of type PVRSRVTL_PPACKETHDR.
*/
-#define CHECK_PACKETS_DROPPED(p) (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK)>>PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET)
+#define CHECK_PACKETS_DROPPED(p) ((((p)->uiTypeSize & PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK)>>PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET) != 0U)
/*! Flags for use with PVRSRVTLOpenStream
* 0x01 - Do not block in PVRSRVTLAcquireData() when no bytes are available
#define PVRVERSION_H
#define PVRVERSION_MAJ 1U
-#define PVRVERSION_MIN 17U
+#define PVRVERSION_MIN 19U
#define PVRVERSION_FAMILY "rogueddk"
-#define PVRVERSION_BRANCHNAME "1.17"
-#define PVRVERSION_BUILD 6210866
+#define PVRVERSION_BRANCHNAME "1.19"
+#define PVRVERSION_BUILD 6345021
#define PVRVERSION_BSCONTROL "Rogue_DDK_Linux_WS"
-#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 1.17@6210866"
-#define PVRVERSION_STRING_SHORT "1.17@6210866"
+#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 1.19@6345021"
+#define PVRVERSION_STRING_SHORT "1.19@6345021"
#define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved."
-#define PVRVERSION_BUILD_HI 621
-#define PVRVERSION_BUILD_LO 866
-#define PVRVERSION_STRING_NUMERIC "1.17.621.866"
+#define PVRVERSION_BUILD_HI 634
+#define PVRVERSION_BUILD_LO 5021
+#define PVRVERSION_STRING_NUMERIC "1.19.634.5021"
#define PVRVERSION_PACK(MAJOR,MINOR) (((IMG_UINT32)((IMG_UINT32)(MAJOR) & 0xFFFFU) << 16U) | (((MINOR) & 0xFFFFU) << 0U))
#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16U) & 0xFFFFU)
/* Virtualisation validation builds are meant to test the VZ-related hardware without a fully virtualised platform.
* As such a driver can support either the vz-validation code or real virtualisation.
- * Note: PVRSRV_VZ_NUM_OSID is the external build option, while RGX_NUM_OS_SUPPORTED is the internal symbol used in the DDK */
-#if defined(SUPPORT_GPUVIRT_VALIDATION) && (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1))
+ * Note: PVRSRV_VZ_NUM_OSID is the external build option, while RGX_NUM_DRIVERS_SUPPORTED is the internal symbol used in the DDK */
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1))
#error "Invalid build configuration: Virtualisation support (PVRSRV_VZ_NUM_OSID > 1) and virtualisation validation code (SUPPORT_GPUVIRT_VALIDATION) are mutually exclusive."
#endif
* @{
*/
#define RGX_CTX_PRIORITY_REALTIME (INT32_MAX)
-#define RGX_CTX_PRIORITY_HIGH (2U) /*!< HIGH priority */
-#define RGX_CTX_PRIORITY_MEDIUM (1U) /*!< MEDIUM priority */
+#define RGX_CTX_PRIORITY_HIGH (2) /*!< HIGH priority */
+#define RGX_CTX_PRIORITY_MEDIUM (1) /*!< MEDIUM priority */
#define RGX_CTX_PRIORITY_LOW (0) /*!< LOW priority */
/*!
* @} End of AddToGroup WorkloadContexts
#define RGX_CONTEXT_FLAGS_WRITEABLE_MASK (RGX_CONTEXT_FLAG_DISABLESLR)
/* List of attributes that may be set for a context */
-typedef enum _RGX_CONTEXT_PROPERTY_
-{
- RGX_CONTEXT_PROPERTY_FLAGS = 0, /*!< Context flags */
-} RGX_CONTEXT_PROPERTY;
+typedef IMG_UINT32 RGX_CONTEXT_PROPERTY;
+#define RGX_CONTEXT_PROPERTY_FLAGS 0U /*!< Context flags */
#if defined(__cplusplus)
}
/*id, gid, id name, string, # arguments */ \
X( 0, RGXFW_GROUP_NULL, RGXFW_SF_FIRST, "You should not use this string", 0) \
\
-X( 1, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d", 6) \
+X( 1, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8x @ %u, RTD 0x%08x. Partial render:%u, CSW resume:%u, prio:%d", 6) \
X( 2, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%x, HWRTData1State=%x", 2) \
-X( 3, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d, prio: %d", 4) \
+X( 3, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8x @ %u, CSW resume:%u, prio: %d", 4) \
X( 4, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished", 0) \
-X( 5, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8x @ %d, prio: %d", 3) \
+X( 5, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8x @ %u, prio: %d", 3) \
X( 6, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED, "Compute finished", 0) \
-X( 7, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d", 7) \
+X( 7, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8x @ %u, RTD 0x%08x. First kick:%u, Last kick:%u, CSW resume:%u, prio:%d", 7) \
X( 8, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED, "TA finished", 0) \
X( 9, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render", 0) \
X( 10, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render", 0) \
X( 11, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x", 2) \
-X( 12, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8x @ %d, prio:%d", 3) \
+X( 12, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8x @ %u, prio:%d", 3) \
X( 13, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TLA_FINISHED, "TLA finished", 0) \
-X( 14, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8x", 3) \
-X( 16, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx 0x%08.8x @ %d", 2) \
+X( 14, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %u, DM = %u, FWCtx = 0x%08.8x", 3) \
+X( 16, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx 0x%08.8x @ %u", 2) \
X( 17, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK, "UFO Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \
X( 18, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded", 0) \
X( 19, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \
X( 20, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx 0x%08.8x", 1) \
X( 21, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= ????????, [0x%08.8x] is ???????? requires 0x%08.8x", 4) \
-X( 22, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx 0x%08.8x @ %d", 2) \
+X( 22, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx 0x%08.8x @ %u", 2) \
X( 23, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \
-X( 24, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %d of:", 1) \
-X( 25, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \
-X( 26, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \
+X( 24, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %u of:", 1) \
+X( 25, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%u, FWCtx: 0x%08.8x", 2) \
+X( 26, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%u, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \
X( 27, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW", 0) \
X( 28, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.", 0) \
X( 29, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u", 1) \
-X( 30, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %d failed: host = 0x%x, fw = 0x%x", 3) \
+X( 30, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %u failed: host = 0x%x, fw = 0x%x", 3) \
X( 31, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered", 0) \
X( 32, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler", 2) \
X( 33, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8x", 1) \
X( 38, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_EST_POWER_DEPRECATED, "Estimated Power 0x%x", 1) \
X( 39, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_TARGET, "RTA render target %u", 1) \
X( 40, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u", 2) \
-X( 41, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK_DEPRECATED, "HWR sizes check %d failed: addresses = %d, sizes = %d", 3) \
+X( 41, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK_DEPRECATED, "HWR sizes check %u failed: addresses = %u, sizes = %u", 3) \
X( 42, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%x", 1) \
-X( 43, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %d, Units: 0x%08.8x", 2) \
-X( 44, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %d to %d", 2) \
+X( 43, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %u, Units: 0x%08.8x", 2) \
+X( 44, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %u to %u", 2) \
X( 45, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down", 0) \
-X( 46, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)", 2) \
+X( 46, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %u (bPowRascalDust=%u)", 2) \
X( 47, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)", 0) \
X( 48, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)", 0) \
-X( 49, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%d FWCtx: 0x%08.8x", 2) \
-X( 50, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d", 7) \
-X( 51, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%d checker: MList[%d] = 0x%08x", 3) \
-X( 52, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%d OK", 1) \
-X( 53, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%d is empty", 1) \
-X( 54, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%d checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%d", 8) \
+X( 49, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%u FWCtx: 0x%08.8x", 2) \
+X( 50, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%u checker: CatBase TE=0x%08x (%u Pages), VCE=0x%08x (%u Pages), ALIST=0x%08x, IsTA=%u", 7) \
+X( 51, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%u checker: MList[%u] = 0x%08x", 3) \
+X( 52, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%u OK", 1) \
+X( 53, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%u is empty", 1) \
+X( 54, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%u checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%u", 8) \
X( 55, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_40480KICK, "3D OQ flush kick", 0) \
X( 56, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device", 1) \
X( 57, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED2, "Setting breakpoint: Addr 0x%08.8x DM%u", 2) \
-X( 58, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d", 3) \
+X( 58, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8x @ %u, prio: %d", 3) \
X( 59, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_FINISHED_DEPRECATED, "RDM finished on context %u", 1) \
-X( 60, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8x @ %d, prio: %d", 3) \
+X( 60, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8x @ %u, prio: %d", 3) \
X( 61, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SHG_FINISHED_DEPRECATED, "SHG finished", 0) \
X( 62, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBA_FINISHED_DEPRECATED, "FBA finished on context %u", 1) \
X( 63, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed", 0) \
-X( 64, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%d start", 1) \
-X( 65, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%d complete", 1) \
+X( 64, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%u start", 1) \
+X( 65, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%u complete", 1) \
X( 66, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FC_CCB_UPDATE_DEPRECATED, "FC%u cCCB Woff update = %u", 2) \
-X( 67, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \
+X( 67, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8x @ %u, prio: %d, Frame Context: %u", 4) \
X( 68, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_INIT, "GPU init", 0) \
X( 69, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_INIT, "GPU Units init (# mask: 0x%x)", 1) \
-X( 70, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d", 3) \
+X( 70, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %u cycles, write: %u cycles, iterations: %u", 3) \
X( 71, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x", 3) \
-X( 72, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)", 1) \
+X( 72, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %u. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)", 1) \
X( 73, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.", 0) \
X( 74, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DEPRECATED, "GPU has locked up (see HWR logs for more info)", 0) \
X( 75, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)", 0) \
X( 78, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \
X( 79, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \
X( 80, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [0x%08.8x]", 1) \
-X( 81, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx 0x%08.8x @ %d", 2) \
+X( 81, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx 0x%08.8x @ %u", 2) \
X( 82, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \
-X( 83, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8x @ %d", 2) \
-X( 84, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM_DEPRECATED, "RPM Out of memory! Context 0x%08x, SH requestor %d", 2) \
-X( 85, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD_DEPRECATED, "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \
-X( 86, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \
-X( 87, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN_DEPRECATED, "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)", 4) \
-X( 88, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \
+X( 83, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8x @ %u", 2) \
+X( 84, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM_DEPRECATED, "RPM Out of memory! Context 0x%08x, SH requestor %u", 2) \
+X( 85, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD_DEPRECATED, "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %u, prio: %d, Frame Context: %u", 4) \
+X( 86, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %u (deferred DMs = 0x%08x)", 4) \
+X( 87, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN_DEPRECATED, "Deferring DM%u from running context 0x%08x @ %u to let other deferred DMs run (deferred DMs = 0x%08x)", 4) \
+X( 88, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %u (deferred DMs = 0x%08x)", 4) \
X( 89, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \
-X( 90, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \
-X( 91, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %d", 1) \
-X( 92, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %d", 1) \
-X( 93, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %d Hz", 1) \
+X( 90, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for Driver ID %u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \
+X( 91, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %u", 1) \
+X( 92, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %u", 1) \
+X( 93, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %u Hz", 1) \
X( 94, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \
X( 95, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DEPRECATED, "Signal check failed, Required Data: 0x%x, Address: 0x%08x%08x", 3) \
X( 96, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_DEPRECATED, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x", 5) \
X(101, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x", 4) \
X(102, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_OSID_DM_DEPRECATED, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x", 4) \
X(103, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM_DEPRECATED, "DM: %u signal check failed", 1) \
-X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8x @ %d, prio:%d", 3) \
+X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8x @ %u, prio:%d", 3) \
X(105, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED, "TDM finished", 0) \
-X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS_DEPRECATED, "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]: 0x%08x 0x%08x)", 4) \
+X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS_DEPRECATED, "MMU_PM_CAT_BASE_TE[%u]_PIPE[%u]: 0x%08x 0x%08x)", 4) \
X(107, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_HIT_DEPRECATED, "BRN 54141 HIT", 0) \
X(108, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA_DEPRECATED, "BRN 54141 Dummy TA kicked", 0) \
X(109, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_RESUME_TA_DEPRECATED, "BRN 54141 resume TA", 0) \
X(112, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%x, Current Data: 0x%x, Address: 0x%08x%08x", 4) \
X(113, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL_DEPRECATED, "TDM stalled (Roff = %u, Woff = %u)", 2) \
X(114, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx 0x%08.8x", 1) \
-X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED, "Changing OSid %d's priority from %u to %u", 3) \
+X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED, "Changing OSid %u's priority from %u to %u", 3) \
X(116, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed", 0) \
-X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \
-X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \
-X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \
-X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \
-X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \
-X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 6) \
-X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED3, "Kick RTU: FWCtx 0x%08.8x @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \
-X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED2, "Kick SHG: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \
-X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %d.", 1) \
-X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %d.", 1) \
-X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %d.", 1) \
-X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%d failed", 1) \
+X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 7) \
+X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 7) \
+X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8x @ %u, RTD 0x%08x, First kick:%u, Last kick:%u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 11) \
+X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8x @ %u, RTD 0x%08x, Partial render:%u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 10) \
+X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8x @ %u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8) \
+X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, ext:0x%08x, int:0x%08x)", 6) \
+X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED3, "Kick RTU: FWCtx 0x%08.8x @ %u, Frame Context:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8) \
+X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED2, "Kick SHG: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 7) \
+X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %u.", 1) \
+X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %u.", 1) \
+X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %u.", 1) \
+X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%u failed", 1) \
X(130, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \
-X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \
-X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs).", 1) \
-X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET_DEPRECATED, "HCS changed to %d ms", 1) \
-X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT_DEPRECATED, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)", 4) \
-X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, " Phantom %d: USCTiles=%d", 2) \
+X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %u (Roff = %u, Woff = %u)", 3) \
+X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %u failed to Context Switch on time. Triggered HCS (see HWR logs).", 1) \
+X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET_DEPRECATED, "HCS changed to %u ms", 1) \
+X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT_DEPRECATED, "Updating Tiles In Flight (Dusts=%u, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)", 4) \
+X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, " Phantom %u: USCTiles=%u", 2) \
X(136, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_OFF_DEPRECATED, "Isolation grouping is disabled", 0) \
-X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_DEPRECATED, "Isolation group configured with a priority threshold of %d", 1) \
-X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE_DEPRECATED, "OS %d has come online", 1) \
-X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE_DEPRECATED, "OS %d has gone offline", 1) \
+X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_DEPRECATED, "Isolation group configured with a priority threshold of %u", 1) \
+X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE_DEPRECATED, "OS %u has come online", 1) \
+X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE_DEPRECATED, "OS %u has gone offline", 1) \
X(140, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \
X(141, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS_DEPRECATED, "TDM Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \
X(142, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes 0, Woff = %u, Size = %u)", 6) \
X(143, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 5) \
X(144, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_DEINIT, "GPU deinit", 0) \
X(145, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_DEINIT, "GPU units deinit", 0) \
-X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %d with config flags 0x%08x", 2) \
-X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_LIMIT, "UFO limit exceeded %d/%d", 2) \
+X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %u with config flags 0x%08x", 2) \
+X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_LIMIT, "UFO limit exceeded %u/%u", 2) \
X(148, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store", 0) \
-X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x", 3) \
+X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %u with config flags 0x%08x and extended config flags 0x%08x", 3) \
X(150, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_COMMAND_DEPRECATED, "Unknown Command (eCmdType=0x%08x)", 1) \
-X(151, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x", 4) \
-X(152, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x, reason %d", 5) \
+X(151, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx 0x%08.8x @ %u [0x%08.8x] = 0x%08.8x", 4) \
+X(152, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx 0x%08.8x @ %u [0x%08.8x] = 0x%08.8x, reason %u", 5) \
X(153, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66075_CHECK, "TDM context switch check: Roff %u points to 0x%08x, Match=%u", 3) \
-X(154, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x", 6) \
+X(154, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS, "Driver ID %u CCB init status: %u (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x", 6) \
X(155, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWIRQ, "FW IRQ # %u @ %u", 2) \
X(156, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 3) \
X(157, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_DEPRECATED, "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x", 3) \
-X(158, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x", 3) \
-X(159, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT, "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x", 4) \
+X(158, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for Driver ID %u @ KCCB 0x%08x", 3) \
+X(159, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT, "FW FAULT: At line %u in file 0x%08x%08x, additional data=0x%08x", 4) \
X(160, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_INVALID, "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 4) \
X(161, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID_DEPRECATED, "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x", 3) \
X(162, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE_DEPRECATED, "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x", 4) \
X(163, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD_DEPRECATED, "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u", 4) \
-X(164, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FULL_CHPTCCB, "Checkpoint CCB for OSid %u is full, signalling host for full check state (Roff = %u, Woff = %u)", 3) \
-X(165, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS_DEPRECATED, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x", 8) \
-X(166, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_STATE_CHANGE, "OSid %d fw state transition request: from %d to %d (0-offline 1-ready 2-active 3-offloading). Status %d (1-ok 0-fail)", 4) \
-X(167, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_STALE_KCCB_CMDS, "OSid %u has %u stale commands in its KCCB", 2) \
+X(164, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FULL_CHPTCCB, "Checkpoint CCB for Driver ID %u is full, signalling host for full check state (Roff = %u, Woff = %u)", 3) \
+X(165, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS_DEPRECATED, "OSid %u CCB init status: %u (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x", 8) \
+X(166, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_STATE_CHANGE, "Driver ID %u fw state transition request: from %u to %u (0-offline 1-ready 2-active 3-offloading). Status %u (1-ok 0-fail)", 4) \
+X(167, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_STALE_KCCB_CMDS, "Driver ID %u has %u stale commands in its KCCB", 2) \
X(168, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_VCE_PAUSE, "Applying VCE pause", 0) \
X(169, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KCCB_UPDATE_RTN_SLOT_DEPRECATED, "OSid %u KCCB slot %u value updated to %u", 3) \
X(170, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_KCCB_COMMAND, "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x", 7) \
X(171, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND1, "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \
X(172, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND2, "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \
-X(173, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for OSid %u with WOff %u", 2) \
-X(174, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for OSid %u, FWCtx 0x%08x", 2) \
-X(175, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from OSid %u: FWCtx 0x%08x, MemCtx 0x%08x", 3) \
+X(173, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for Driver ID %u with WOff %u", 2) \
+X(174, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for Driver ID %u, FWCtx 0x%08x", 2) \
+X(175, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from Driver ID %u: FWCtx 0x%08x, MemCtx 0x%08x", 3) \
X(176, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_INIT_CONFIG, "Initialised Firmware with config flags 0x%08x and extended config flags 0x%08x", 2) \
-X(177, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_CONFIG, "Set Periodic Hardware Reset Mode: %d", 1) \
-X(179, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_TRIG, "PHR mode %d, FW state: 0x%08x, HWR flags: 0x%08x", 3) \
-X(180, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_RESET_DEPRECATED, "PHR mode %d triggered a reset", 1) \
+X(177, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_CONFIG, "Set Periodic Hardware Reset Mode: %u", 1) \
+X(179, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_TRIG, "PHR mode %u, FW state: 0x%08x, HWR flags: 0x%08x", 3) \
+X(180, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_RESET_DEPRECATED, "PHR mode %u triggered a reset", 1) \
X(181, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, Signal Id: %u", 2) \
-X(182, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEV_SERIES8_DEPRECATED, "WARNING: Skipping FW KCCB Cmd type %d which is not yet supported on Series8.", 1) \
+X(182, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEV_SERIES8_DEPRECATED, "WARNING: Skipping FW KCCB Cmd type %u which is not yet supported on Series8.", 1) \
X(183, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INCONSISTENT_MMU_FLAGS, "MMU context cache data NULL, but cache flags=0x%x (sync counter=%u, update value=%u) OSId=%u", 4) \
-X(184, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SLC_FLUSH, "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%d", 5) \
+X(184, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SLC_FLUSH, "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%u", 5) \
X(185, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBSC_INVAL, "FBSC invalidate for Context Set [0x%08x]: Entry mask 0x%08x%08x.", 3) \
X(186, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66284_UPDATE, "TDM context switch check: Roff %u was not valid for kick starting at %u, moving back to %u", 3) \
X(187, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SPFILTER_UPDATES, "Signal updates: FIFO: %u, Signals: 0x%08x", 2) \
X(192, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULL_RTAS, "Invalid RTA Set-up. The ValidRenderTargets array in RTACtl is Null!", 0) \
X(193, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_COUNTER, "Block 0x%x / Counter 0x%x INVALID and ignored", 2) \
X(194, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_DEPRECATED, "ECC fault GPU=0x%08x FW=0x%08x", 2) \
-X(195, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PROCESS_XPU_EVENT, "Processing XPU event on DM = %d", 1) \
-X(196, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VZ_WDG_TRIGGER, "OSid %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u", 2) \
+X(195, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PROCESS_XPU_EVENT, "Processing XPU event on DM = %u", 1) \
+X(196, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VZ_WDG_TRIGGER, "Driver ID %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u", 2) \
X(197, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP, "GPU-%u has locked up (see HWR logs for more info)", 1) \
-X(198, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x)", 3) \
+X(198, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%u, PartitionMask=0x%08x, ISPCtl=0x%08x)", 3) \
X(199, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DM, "GPU has locked up (see HWR logs for more info)", 0) \
X(200, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REPROCESS_XPU_EVENTS, "Reprocessing outstanding XPU events from cores 0x%02x", 1) \
-X(201, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SECONDARY_XPU_EVENT, "Secondary XPU event on DM=%d, CoreMask=0x%02x, Raised=0x%02x", 3) \
+X(201, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SECONDARY_XPU_EVENT, "Secondary XPU event on DM=%u, CoreMask=0x%02x, Raised=0x%02x", 3) \
X(202, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS, "TDM Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \
X(203, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled Core %u (Roff = %u, Woff = %u)", 3) \
X(204, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_OFFSETS, "Compute Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \
X(206, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_CORE_READ_OFFSET, "User Mode Queue mismatched stream start: Core %u, FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 6) \
X(207, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_RESUMED_FROM_STALL, "TDM resumed core %u (Roff = %u, Woff = %u)", 3) \
X(208, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_RESUMED_FROM_STALL, "Compute resumed core %u (Roff = %u, Woff = %u, Size = %u)", 4) \
-X(209, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_MTS_PERMISSION_CHANGED, " Updated permission for OSid %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)", 2) \
+X(209, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_MTS_PERMISSION_CHANGED, " Updated permission for Driver ID %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)", 2) \
X(210, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST1, "Mask = 0x%X, mask2 = 0x%X", 2) \
X(211, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST2, " core %u, reg = %u, mask = 0x%X)", 3) \
X(212, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_SAFETY_BUS, "ECC fault received from safety bus: 0x%08x", 1) \
X(213, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_CONFIG, "Safety Watchdog threshold period set to 0x%x clock cycles", 1) \
X(214, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_TRIGGER, "MTS Safety Event trigged by the safety watchdog.", 0) \
-X(215, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_USC_TASKS_RANGE, "DM%d USC tasks range limit 0 - %d, stride %d", 3) \
+X(215, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_USC_TASKS_RANGE, "DM%u USC tasks range limit 0 - %u, stride %u", 3) \
X(216, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_ECC_FAULT, "ECC fault GPU=0x%08x", 1) \
X(217, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_SAFETY_RESET, "GPU Hardware units reset to prevent transient faults.", 0) \
-X(218, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ABORTCMD, "Kick Abort cmd: FWCtx 0x%08.8x @ %d", 2) \
-X(219, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_DEPRECATED, "Kick Ray: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7)\
+X(218, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ABORTCMD, "Kick Abort cmd: FWCtx 0x%08.8x @ %u", 2) \
+X(219, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_DEPRECATED, "Kick Ray: FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 7)\
X(220, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_DEPRECATED, "Ray finished", 0) \
-X(221, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWDATA_INIT_STATUS, "State of firmware's private data at boot time: %d (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X", 2) \
-X(222, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT, "CFI Timeout detected (%d increasing to %d)", 2) \
-X(223, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT_FBM, "CFI Timeout detected for FBM (%d increasing to %d)", 2) \
+X(221, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWDATA_INIT_STATUS, "State of firmware's private data at boot time: %u (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X", 2) \
+X(222, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT, "CFI Timeout detected (%u increasing to %u)", 2) \
+X(223, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT_FBM, "CFI Timeout detected for FBM (%u increasing to %u)", 2) \
X(224, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GEOM_OOM_DISALLOWED, "Geom OOM event not allowed", 0) \
-X(225, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing OSid %d's priority from %u to %u; Isolation = %u (0 = off; 1 = on)", 4) \
-X(226, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SKIP_ALREADY_RUN_GEOM, "Skipping already executed TA FWCtx 0x%08.8x @ %d", 2) \
-X(227, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ATTEMPT_TO_RUN_AHEAD_GEOM, "Attempt to execute TA FWCtx 0x%08.8x @ %d ahead of time on other GEOM", 2) \
-X(228, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED2, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \
-X(229, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_PIPELINE, "Kick TA: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 12) \
-X(230, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_PIPELINE, "Kick 3D: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \
-X(231, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_PIPELINE, "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 7) \
+X(225, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED2, "Changing Driver ID %u's priority from %u to %u; Isolation = %u (0 = off; 1 = on)", 4) \
+X(226, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SKIP_ALREADY_RUN_GEOM, "Skipping already executed TA FWCtx 0x%08.8x @ %u", 2) \
+X(227, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ATTEMPT_TO_RUN_AHEAD_GEOM, "Attempt to execute TA FWCtx 0x%08.8x @ %u ahead of time on other GEOM", 2) \
+X(228, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED2, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8) \
+X(229, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_PIPELINE, "Kick TA: Kick ID %u FWCtx 0x%08.8x @ %u, RTD 0x%08x, First kick:%u, Last kick:%u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 12) \
+X(230, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_PIPELINE, "Kick 3D: Kick ID %u FWCtx 0x%08.8x @ %u, RTD 0x%08x, Partial render:%u, CSW resume:%u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 11) \
+X(231, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_PIPELINE, "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, ext:0x%08x, int:0x%08x)", 7) \
X(232, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED_PIPELINE, "TDM finished: Kick ID %u ", 1) \
X(233, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED_PIPELINE, "TA finished: Kick ID %u ", 1) \
X(234, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED_PIPELINE, "3D finished: Kick ID %u , HWRTData0State=%x, HWRTData1State=%x", 3) \
X(235, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED_PIPELINE, "Compute finished: Kick ID %u ", 1) \
-X(236, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_PIPELINE, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d, Base 0x%08x%08x. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \
-X(237, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_PIPELINE, "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8)\
+X(236, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_PIPELINE, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %u, Base 0x%08x%08x. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 10) \
+X(237, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_PIPELINE, "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %u. (PID:%u, prio:%d, frame:%u, ext:0x%08x, int:0x%08x)", 8)\
X(238, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_PIPELINE, "Ray finished: Kick ID %u ", 1) \
+X(239, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RT_UNITS_INIT, "GPU RT Units init (# mask: 0x%08x%08x)", 2) \
+X(240, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_PENDING_PASS, "UFO Check: [0x%08.8x] is pending update to 0x%08.8x and therefore passes", 2) \
+X(241, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK_PENDING_PASS, "UFO PR-Check: [0x%08.8x] is pending update to 0x%08.8x and therefore passes", 2) \
+X(242, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DELAY_DM_TO_OVERLAP_PIPES, "Holding kick of DM %u pipe %u to encourage pipeline overlap", 2) \
+X(243, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RELEASE_DM_PIPE, "Releasing kick for DM %u pipe %u", 2) \
+X(244, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing Driver ID %u's priority from %u to %u", 3) \
+X(245, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ISOLATION_GROUP_CHANGE, "Changing Driver ID %u's priority group from %u to %u", 3) \
+X(246, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VK_TIMESTAMP, "VK Timestamp: addr=0x%08x%08x, avail=0x%08x%08x stamp=0x%08x%08x", 6) \
\
-X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %d", 2) \
+X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %u", 2) \
X( 2, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u", 1) \
-X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK_DEPRECATED, "Irq Task DM = %u, Breq = %d, SBIrq = 0x%x", 3) \
+X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK_DEPRECATED, "Irq Task DM = %u, Breq = %u, SBIrq = 0x%x", 3) \
X( 4, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u", 1) \
X( 5, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_BG_ALL_DEPRECATED, "Kick MTS Bg task DM=All", 0) \
-X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%d", 1) \
-X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %d", 2) \
+X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%u", 1) \
+X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %u", 2) \
X( 8, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x", 2) \
X( 9, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = 0x%x, cmd = 0x%x", 3) \
-X( 10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x", 3) \
-X( 11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED2, "Ready queue debug DM = %u, celltype = %d, OSid = %u", 3) \
-X( 12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED2, "Bg Task DM = %u, counted = %d, OSid = %u", 3) \
+X( 10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug Driver ID = %u, DM = %u, item = 0x%x", 3) \
+X( 11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED2, "Ready queue debug DM = %u, celltype = %u, OSid = %u", 3) \
+X( 12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED2, "Bg Task DM = %u, counted = %u, OSid = %u", 3) \
X( 13, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u", 1) \
X( 14, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.", 0) \
-X( 15, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %d PID = %d.", 7) \
+X( 15, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %u OS ID = %u PID = %u context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %u PID = %u.", 7) \
X( 16, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC_DEPRECATED, "KCCB Slot %u: DM=%u, Cmd=0x%08x, OSid=%u", 4) \
X( 17, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_RTN_VALUE, "KCCB Slot %u: Return value %u", 2) \
-X( 18, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task OSid = %u", 1) \
-X( 19, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC, "KCCB Slot %u: Cmd=0x%08x, OSid=%u", 3) \
+X( 18, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task Driver ID = %u", 1) \
+X( 19, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC, "KCCB Slot %u: Cmd=0x%08x, Driver ID=%u", 3) \
X( 20, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK, "Irq Task (EVENT_STATUS=0x%08x)", 1) \
-X( 21, RGXFW_GROUP_MTS, RGXFW_SF_MTS_VZ_SIDEBAND, "VZ sideband test, kicked with OSid=%u from MTS, OSid for test=%u", 2) \
+X( 21, RGXFW_GROUP_MTS, RGXFW_SF_MTS_VZ_SIDEBAND, "VZ sideband test, kicked with DriverID=%u from MTS, OSid for test=%u", 2) \
\
X( 1, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned", 1) \
-X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d", 3) \
-X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP_DEPRECATED, "HWRTData [0x%08x] for DM=%d, received cleanup request", 2) \
-X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM_DEPRECATED, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d", 3) \
+X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %u, WriteOffset = %u", 3) \
+X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP_DEPRECATED, "HWRTData [0x%08x] for DM=%u, received cleanup request", 2) \
+X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM_DEPRECATED, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %u", 3) \
X( 5, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy", 2) \
X( 6, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_DEPRECATED, "HWRTData [0x%08x] HW Context %u cleaned", 2) \
X( 7, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned", 1) \
X( 8, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned", 1) \
-X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d", 3) \
-X( 10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED2, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d", 4) \
-X( 11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP_DEPRECATED, "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request", 2) \
-X( 12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM_DEPRECATED, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d", 3) \
-X( 13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY_DEPRECATED, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d", 4) \
+X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %u, executed = %u", 3) \
+X( 10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED2, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %u, executed = %u", 4) \
+X( 11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP_DEPRECATED, "HW Ray Frame data [0x%08x] for DM=%u, received cleanup request", 2) \
+X( 12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM_DEPRECATED, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %u", 3) \
+X( 13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY_DEPRECATED, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %u, executed = %u", 4) \
X( 14, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_DEPRECATED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned", 2) \
X( 15, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_INVALID_REQUEST, "Discarding invalid cleanup request of type 0x%x", 1) \
X( 16, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP, "Received cleanup request for HWRTData [0x%08x]", 1) \
-X( 17, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context is busy: submitted = %d, executed = %d", 3) \
-X( 18, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %d", 3) \
+X( 17, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context is busy: submitted = %u, executed = %u", 3) \
+X( 18, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %u", 3) \
\
X( 1, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8x needs resume", 1) \
X( 2, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_DEPRECATED, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \
X( 17, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_START, "*** TA context store start", 0) \
X( 18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED, "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d", 3) \
X( 19, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u", 2) \
-X( 20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%d state: 0x%08.8x", 2) \
-X( 21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%d state: 0x%08.8x", 2) \
+X( 20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%u state: 0x%08.8x", 2) \
+X( 21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%u state: 0x%08.8x", 2) \
X( 22, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_NEEDS_RESUME_DEPRECATED, "SHG FWCtx 0x%08.8x needs resume", 1) \
X( 23, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_RESUME_DEPRECATED, "*** SHG FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \
X( 24, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_SHARED_DEPRECATED, "SHG context shared alloc size store 0x%x, load 0x%x", 2) \
X( 25, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_COMPLETE_DEPRECATED, "*** SHG context store complete", 0) \
X( 26, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_START_DEPRECATED, "*** SHG context store start", 0) \
-X( 27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %d", 1) \
+X( 27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %u", 1) \
X( 28, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.", 0) \
X( 29, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x, shader state %u", 4) \
-X( 30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%d->%d)", 2) \
+X( 30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%u->%u)", 2) \
X( 31, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_52563_HIT_DEPRECATED, "TA context store hit BRN 52563: vertex store tasks outstanding", 0) \
-X( 32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %d)", 1) \
+X( 32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %u)", 1) \
X( 33, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_DEFERRED_DEPRECATED, "TA context store deferred due to BRN 54141.", 0) \
X( 34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED2, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u", 7) \
X( 35, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_START, "*** TDM context store start", 0) \
X( 36, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete", 0) \
X( 37, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_NEEDS_RESUME_DEPRECATED, "TDM context needs resume, header [0x%08.8x, 0x%08.8x]", 2) \
-X( 38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u", 8) \
+X( 38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (Driver ID, Driver ID Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u", 8) \
X( 39, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe %2d (%2d) state: 0x%08.8x", 3) \
X( 40, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe %2d (%2d) state: 0x%08.8x", 3) \
-X( 41, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START_VOLCANIC, "*** 3D context store start version %d (1=IPP_TILE, 2=ISP_TILE)", 1) \
-X( 42, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_VOLCANIC, "3D context store pipe%d state: 0x%08.8x%08x", 3) \
-X( 43, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_VOLCANIC, "3D context resume pipe%d state: 0x%08.8x%08x", 3) \
+X( 41, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START_VOLCANIC, "*** 3D context store start version %u (1=IPP_TILE, 2=ISP_TILE)", 1) \
+X( 42, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_VOLCANIC, "3D context store pipe%u state: 0x%08.8x%08x", 3) \
+X( 43, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_VOLCANIC, "3D context resume pipe%u state: 0x%08.8x%08x", 3) \
X( 44, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_IPP_STATE, "3D context resume IPP state: 0x%08.8x%08x", 2) \
X( 45, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_PIPES_EMPTY, "All 3D pipes empty after ISP tile mode store! IPP_status: 0x%08x", 1) \
-X( 46, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE_DEPRECATED, "TDM context resume pipe%d state: 0x%08.8x%08x", 3) \
+X( 46, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE_DEPRECATED, "TDM context resume pipe%u state: 0x%08.8x%08x", 3) \
X( 47, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_LEVEL4_STORE_START, "*** 3D context store start version 4", 0) \
-X( 48, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RESUME_MULTICORE, "Multicore context resume on DM%d active core mask 0x%04.4x", 2) \
-X( 49, RGXFW_GROUP_CSW, RGXFW_SF_CSW_STORE_MULTICORE, "Multicore context store on DM%d active core mask 0x%04.4x", 2) \
-X( 50, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE, "TDM context resume Core %d, pipe%d state: 0x%08.8x%08x%08x", 5) \
+X( 48, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RESUME_MULTICORE, "Multicore context resume on DM%u active core mask 0x%04.4x", 2) \
+X( 49, RGXFW_GROUP_CSW, RGXFW_SF_CSW_STORE_MULTICORE, "Multicore context store on DM%u active core mask 0x%04.4x", 2) \
+X( 50, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE, "TDM context resume Core %u, pipe%u state: 0x%08.8x%08x%08x", 5) \
X( 51, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_COMPLETE, "*** RDM FWCtx store complete", 0) \
X( 52, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_START, "*** RDM FWCtx store start", 0) \
X( 53, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_NEEDS_RESUME, "RDM FWCtx 0x%08.8x needs resume", 1) \
X( 54, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_RESUME, "RDM FWCtx 0x%08.8x resume", 1) \
\
-X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_BIFREQ_DEPRECATED, "Activate MemCtx=0x%08x BIFreq=%d secure=%d", 3) \
+X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_BIFREQ_DEPRECATED, "Activate MemCtx=0x%08x BIFreq=%u secure=%u", 3) \
X( 2, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x", 1) \
-X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC_DEPRECATED, "Alloc PC reg %d", 1) \
-X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_GRAB, "Grab reg set %d refcount now %d", 2) \
-X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_UNGRAB, "Ungrab reg set %d refcount now %d", 2) \
-X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_BIFREQ_DEPRECATED, "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \
-X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DEPRECATED, "Trust enabled:%d, for BIFreq=%d", 2) \
-X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG_DEPRECATED, "BIF Tiling Cfg %d base 0x%08x%08x len 0x%08x%08x enable %d stride %d --> 0x%08x%08x", 9) \
-X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now 0x%08x 0x%08x", 4) \
-X( 10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %d to OSID1, Context %d, Register's contents are now 0x%04x", 3) \
-X( 11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 7) \
+X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC_DEPRECATED, "Alloc PC reg %u", 1) \
+X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_GRAB, "Grab reg set %u refcount now %u", 2) \
+X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_UNGRAB, "Ungrab reg set %u refcount now %u", 2) \
+X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_BIFREQ_DEPRECATED, "Setup reg=%u BIFreq=%u, expect=0x%08x%08x, actual=0x%08x%08x", 6) \
+X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DEPRECATED, "Trust enabled:%u, for BIFreq=%u", 2) \
+X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG_DEPRECATED, "BIF Tiling Cfg %u base 0x%08x%08x len 0x%08x%08x enable %u stride %u --> 0x%08x%08x", 9) \
+X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %u to OSID0, Cat Base %u, Register's contents are now 0x%08x 0x%08x", 4) \
+X( 10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %u to OSID1, Context %u, Register's contents are now 0x%04x", 3) \
+X( 11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx_DEPRECATED, "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 7) \
X( 12, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_BIFREQ_DEPRECATED, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u", 5) \
X( 13, RGXFW_GROUP_BIF, RGXFW_SF_BIF_UNMAP_GPU_MEMORY, "Unmap GPU memory (event status 0x%x)", 1) \
-X( 14, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM, "Activate MemCtx=0x%08x DM=%d secure=%d", 3) \
-X( 15, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM_DEPRECATED, "Setup reg=%d DM=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \
+X( 14, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM, "Activate MemCtx=0x%08x DM=%u secure=%u", 3) \
+X( 15, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM_DEPRECATED, "Setup reg=%u DM=%u, expect=0x%08x%08x, actual=0x%08x%08x", 6) \
X( 16, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u", 4) \
-X( 17, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DM, "Trust enabled:%d, for DM=%d", 2) \
+X( 17, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DM, "Trust enabled:%u, for DM=%u", 2) \
X( 18, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_DM, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, DM %u", 5) \
-X( 19, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM, "Setup register set=%d DM=%d, PC address=0x%08x%08x, OSid=%u, NewPCRegRequired=%d", 6) \
-X( 20, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_ALLOC, "Alloc PC set %d as register range [%u - %u]", 3) \
+X( 19, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM, "Setup register set=%u DM=%u, PC address=0x%08x%08x, OSid=%u, NewPCRegRequired=%u", 6) \
+X( 20, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_ALLOC, "Alloc PC set %u as register range [%u - %u]", 3) \
+X( 21, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32CoreID = %u, ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 8) \
+X( 22, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM_SECURE, "Setup secure register=%u DM=%u, PC address=0x%08x%08x, OSid=%u, NewContext=%u", 6) \
+X( 23, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM_DEPRECATED, "Activate MemCtx=0x%08x DM=%u secure=%u CtxFlags=0x%08x", 4) \
+X( 24, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE_DEPRECATED, "Deactivate MemCtx=0x%08x CtxFlags=0x%08x", 2) \
\
X( 1, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_WRITE, "GPIO write 0x%02x", 1) \
X( 2, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_READ, "GPIO read 0x%02x", 1) \
X( 3, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ENABLED, "GPIO enabled", 0) \
X( 4, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_DISABLED, "GPIO disabled", 0) \
-X( 5, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_STATUS, "GPIO status=%d (0=OK, 1=Disabled)", 1) \
-X( 6, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%d byte(s))", 2) \
-X( 7, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%d byte(s))", 2) \
+X( 5, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_STATUS, "GPIO status=%u (0=OK, 1=Disabled)", 1) \
+X( 6, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%u byte(s))", 2) \
+X( 7, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%u byte(s))", 2) \
X( 8, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_TIMEOUT, "GPIO_AP timeout!", 0) \
-X( 9, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)", 1) \
+X( 9, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%u (0=OK, 1=Disabled)", 1) \
X( 10, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ALREADY_READ, "GPIO already read 0x%02x", 1) \
-X( 11, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %d available returned %d", 2) \
-X( 12, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %d", 1) \
-X( 13, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %d (after %d ticks)", 2) \
-X( 14, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)", 2) \
-X( 15, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %d in frame", 1) \
-X( 16, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %d is a new frame", 1) \
-X( 17, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)", 1) \
-X( 18, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %d", 1) \
-X( 19, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %d)", 1) \
-X( 20, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %d)", 1) \
-X( 21, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %d)", 1) \
-X( 22, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE, "TRP state: %d", 1) \
-X( 23, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE, "TRP failure: %d", 1) \
-X( 24, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_STATE, "SW TRP State: %d", 1) \
-X( 25, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_FAILURE, "SW TRP failure: %d", 1) \
+X( 11, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %u available returned %u", 2) \
+X( 12, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %u", 1) \
+X( 13, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %u (after %u ticks)", 2) \
+X( 14, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %u returned %u (0=No skip, 1=Skip frame)", 2) \
+X( 15, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %u in frame", 1) \
+X( 16, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %u is a new frame", 1) \
+X( 17, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %u ticks)", 1) \
+X( 18, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %u", 1) \
+X( 19, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %u)", 1) \
+X( 20, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %u)", 1) \
+X( 21, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %u)", 1) \
+X( 22, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE_DEPRECATED, "TRP state: %u", 1) \
+X( 23, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE, "TRP failure: %u", 1) \
+X( 24, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_STATE, "SW TRP State: %u", 1) \
+X( 25, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_FAILURE_DEPRECATED, "SW TRP failure: %u", 1) \
X( 26, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HW_KICK, "HW kick event (%u)", 1) \
X( 27, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_CHECKSUMS, "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x", 4) \
X( 28, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_UNIT_CHECKSUMS, "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x", 6) \
X( 29, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_CHECK_REG, "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \
X( 30, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_SLOTS_CHECK, "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u", 4) \
X( 31, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_REG_CHECK, "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \
+X( 32, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE_HWRTDATA, "TRP HWRTData: 0x%08x, state: %u", 2) \
+X( 33, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE_CNTX, "TRP Context: 0x%08x, state: %u", 2) \
+X( 34, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE_CNTX, "TRP Context: 0x%08x, failure: %u", 2) \
\
-X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)", 10) \
-X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d", 8) \
+X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%u SP = %u, MLIST%u SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)", 10) \
+X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %u, finished: %u on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%u, local:%u, mmu:%u", 8) \
X( 3, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_DEPRECATED, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \
X( 4, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_DEPRECATED, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \
X( 5, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_COMPLETE_DEPRECATED, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \
X( 6, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_DENIED_DEPRECATED, "Grow for freelist ID=0x%08x denied by host", 1) \
X( 7, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \
X( 8, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed", 1) \
-X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d", 2) \
-X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %d", 2)\
+X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %u, operation(0-unpause, 1-pause): %u", 2) \
+X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %u", 2)\
X( 11, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \
X( 12, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \
X( 13, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \
X( 14, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \
X( 15, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x", 1) \
-X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d", 7) \
+X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %u, finished: %u on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%u, local:%u", 7) \
X( 17, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \
X( 18, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \
X( 19, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE_VOLCANIC, "Freelist update completed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \
X( 24, RGXFW_GROUP_PM, RGXFW_SF_PM_ANALYSE_FL_GROW, "Analysis of FL grow: Pause=(%u,%u) Paused+Valid(%u,%u) PMStateBuffer=0x%x", 5) \
X( 25, RGXFW_GROUP_PM, RGXFW_SF_PM_ATTEMPT_FL_GROW, "Attempt FL grow for FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \
X( 26, RGXFW_GROUP_PM, RGXFW_SF_PM_DEFER_FL_GROW, "Deferring FL grow for non-loaded FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \
-X( 27, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_ALBIORIX, "Is GEOM: %d, finished: %d (HWRTData = 0x%08x, MemCtx = 0x%08x)", 4) \
+X( 27, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_ALBIORIX, "Is GEOM: %u, finished: %u (HWRTData = 0x%08x, MemCtx = 0x%08x)", 4) \
X( 28, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT, "3D Timeout Now for FWCtx 0x%08.8x", 1) \
X( 29, RGXFW_GROUP_PM, RGXFW_SF_PM_RECYCLE, "GEOM PM Recycle for FWCtx 0x%08.8x", 1) \
-X( 30, RGXFW_GROUP_PM, RGXFW_SF_PM_PRIMARY_CONFIG, "PM running primary config (Core %d)", 1) \
-X( 31, RGXFW_GROUP_PM, RGXFW_SF_PM_SECONDARY_CONFIG, "PM running secondary config (Core %d)", 1) \
-X( 32, RGXFW_GROUP_PM, RGXFW_SF_PM_TERTIARY_CONFIG, "PM running tertiary config (Core %d)", 1) \
-X( 33, RGXFW_GROUP_PM, RGXFW_SF_PM_QUATERNARY_CONFIG, "PM running quaternary config (Core %d)", 1) \
+X( 30, RGXFW_GROUP_PM, RGXFW_SF_PM_PRIMARY_CONFIG, "PM running primary config (Core %u)", 1) \
+X( 31, RGXFW_GROUP_PM, RGXFW_SF_PM_SECONDARY_CONFIG, "PM running secondary config (Core %u)", 1) \
+X( 32, RGXFW_GROUP_PM, RGXFW_SF_PM_TERTIARY_CONFIG, "PM running tertiary config (Core %u)", 1) \
+X( 33, RGXFW_GROUP_PM, RGXFW_SF_PM_QUATERNARY_CONFIG, "PM running quaternary config (Core %u)", 1) \
\
X( 1, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_DYNAMIC_STATUS_DEPRECATED, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \
X( 2, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_STATIC_STATUS_DEPRECATED, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \
X( 3, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_GROW_DEPRECATED, "RPM request failed. Waiting for freelist grow.", 0) \
X( 4, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_ABORT_DEPRECATED, "RPM request failed. Aborting the current frame.", 0) \
X( 5, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW_DEPRECATED, "RPM waiting for pending grow on freelist 0x%08x", 1) \
-X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW_DEPRECATED, "Request freelist grow [0x%08x] current pages %d, grow size %d", 3) \
+X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW_DEPRECATED, "Request freelist grow [0x%08x] current pages %u, grow size %u", 3) \
X( 7, RGXFW_GROUP_RPM, RGXFW_SF_RPM_FREELIST_LOAD_DEPRECATED, "Freelist load: SHF = 0x%08x, SHG = 0x%08x", 2) \
X( 8, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08x.0x%08x", 2) \
X( 9, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08x.0x%08x", 2) \
-X( 10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST_DEPRECATED, "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)", 5) \
+X( 10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST_DEPRECATED, "Kernel requested RPM grow on freelist (type %u) at 0x%08x from current size %u to new size %u, RPM restart: %u (1=Yes)", 5) \
X( 11, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_RESTART_DEPRECATED, "Restarting SHG", 0) \
X( 12, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_ABORTED_DEPRECATED, "Grow failed, aborting the current frame.", 0) \
X( 13, RGXFW_GROUP_RPM, RGXFW_SF_RPM_ABORT_COMPLETE_DEPRECATED, "RPM abort complete on HWFrameData [0x%08x].", 1) \
\
X( 1, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u", 2) \
X( 2, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u", 2) \
-X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d, mmu: %d", 4) \
+X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %u, global: %u, mmu: %u", 4) \
X( 4, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_3D_DEPRECATED, "Loading VFP table 0x%08x%08x for 3D", 2) \
X( 5, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_TA_DEPRECATED, "Loading VFP table 0x%08x%08x for TA", 2) \
-X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \
+X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%x type: %u (0:local,1:global,2:mmu) for DM%u: TotalPMPages = %u, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \
X( 7, RGXFW_GROUP_RTD, RGXFW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store", 0) \
-X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%d: Load=No, Store=No", 2) \
-X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No", 2) \
-X( 10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \
-X( 11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \
-X( 12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x", 5) \
-X( 13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \
+X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%u: Load=No, Store=No", 2) \
+X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%u: Load=Yes, Store=No", 2) \
+X( 10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%u: Load=Yes, Store=Yes", 3) \
+X( 11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%u: Load=Yes, Store=Yes", 3) \
+X( 12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %u (0:MidTA,1:3D) on context %u, MLIST = 0x%08x, ALIST = 0x%08x%08x", 5) \
+X( 13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%x type: %u (0:local,1:global,2:mmu) for DM%u: TotalPMPages = %u, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \
X( 14, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u", 2) \
X( 15, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u", 2) \
-X( 16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
-X( 17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
+X( 16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%x type: %u (0:local,1:global,2:mmu) for DM%u: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
+X( 17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load Freelist 0x%x type: %u (0:local,1:global,2:mmu) for DM%u: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
X( 18, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG_DEPRECATED, "Freelist 0x%x RESET!!!!!!!!", 1) \
X( 19, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG2_DEPRECATED, "Freelist 0x%x stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 5) \
-X( 20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%x type: %d (0:local,1:global,2:mmu) on HW context %u", 3) \
+X( 20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%x type: %u (0:local,1:global,2:mmu) on HW context %u", 3) \
X( 21, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)", 1) \
X( 22, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed", 0) \
-X( 23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d", 3) \
-X( 24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)", 3) \
+X( 23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%u", 3) \
+X( 24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %u)", 3) \
X( 25, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS_DEPRECATED, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 8) \
X( 26, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_LOADED_DEPRECATED, "3D RTData 0x%08x loaded on HW context %u", 2) \
X( 27, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)", 4) \
X( 28, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=0x%x, HWRTData1State=0x%x", 2) \
-X( 29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d", 3) \
-X( 30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
-X( 31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
-X( 32, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED2, "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 5) \
+X( 29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %u, global: %u", 3) \
+X( 30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%x type: %u (0:local,1:global) for PMDM%u: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
+X( 31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load Freelist 0x%x type: %u (0:local,1:global) for PMDM%u: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \
+X( 32, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED2, "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %u, (MemCtx 0x%08x)", 5) \
X( 33, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 7) \
-X( 34, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 4) \
-X( 35, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_V2, "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u)", 6) \
+X( 34, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %u, (MemCtx 0x%08x)", 4) \
+X( 35, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_V2, "Load Freelist 0x%x type: %u (0:local,1:global) for PMDM%u: FL Total Pages %u (max=%u,grow size=%u)", 6) \
X( 36, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_TA, "TA RTData 0x%08x marked as killed.", 1) \
X( 37, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_3D, "3D RTData 0x%08x marked as killed.", 1) \
X( 38, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILL_TA_AFTER_RESTART, "RTData 0x%08x will be killed after TA restart.", 1) \
X( 5, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_GLOBAL_DEPRECATED, "3D MemFree: Global FL 0x%08x", 1) \
X( 6, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_DEPRECATED, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x, HardwareSync Fence [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 6) \
X( 7, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x", 3) \
-X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL_DEPRECATED, "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x", 5) \
+X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL_DEPRECATED, "OOM TA_cmd=0x%08x, OOM MMU:%u, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x", 5) \
X( 9, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_AVOIDED_DEPRECATED, "Partial render avoided", 0) \
X( 10, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_DISCARDED_DEPRECATED, "Partial render discarded", 0) \
X( 11, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_FINISHED, "Partial Render finished", 0) \
X( 43, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FL_GROW_DEBUG, "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u", 5) \
X( 44, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA_WITH_SP, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u", 4) \
X( 45, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE_DEPRECATED, "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u", 5) \
-X( 46, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%d) Buffer provided", 1) \
+X( 46, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%u) Buffer provided", 1) \
X( 47, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_POP_UNNEEDED, "No need to populate PR Buffer (ID=0x%08x)", 1) \
X( 48, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNPOP_UNNEEDED, "No need to unpopulate PR Buffer (ID=0x%08x)", 1) \
X( 49, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST, "Send PR Buffer backing request to host (ID=0x%08x)", 1) \
X( 50, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST, "Send PR Buffer unbacking request to host (ID=0x%08x)", 1) \
X( 51, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST_PENDING, "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \
X( 52, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST_PENDING, "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \
-X( 53, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)", 2) \
+X( 53, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %u type to be backed (ID=0x%08x)", 2) \
X( 54, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE, "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u", 4) \
X( 66, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \
X( 67, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u", 3) \
-X( 68, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PR_DEADLOCK_UNBLOCKED, "OOM TA/3D PR deadlock unblocked reordering DM%d runlist head from Context 0x%08x to 0x%08x", 3) \
+X( 68, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PR_DEADLOCK_UNBLOCKED, "OOM TA/3D PR deadlock unblocked reordering DM%u runlist head from Context 0x%08x to 0x%08x", 3) \
X( 69, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_FORCEFREE, "SPM State = PR force free", 0) \
\
-X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state DM%d int: 0x%x, ext: 0x%x, pow flags: 0x%x", 4) \
+X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state DM%u int: 0x%x, ext: 0x%x, pow flags: 0x%x", 4) \
X( 2, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_IDLE, "GPU idle (might be powered down). Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
-X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ_DEPRECATED, "OS requested pow off (forced = %d), DM%d, pow flags: 0x%x", 3) \
-X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %d %d %d %d", 4) \
-X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \
+X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ_DEPRECATED, "OS requested pow off (forced = %u), DM%u, pow flags: 0x%x", 3) \
+X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %u %u %u %u", 4) \
+X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %u, Any RD-DM Active? %u", 2) \
X( 6, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_OFF, "GPU ready to be powered down. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
-X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %d, Units: 0x%08.8x", 2) \
-X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %d (Power flags=%d)", 2) \
-X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %d to %d", 2) \
+X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %u, Units: 0x%08.8x", 2) \
+X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %u (Power flags=%u)", 2) \
+X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %u to %u", 2) \
X( 11, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init", 0) \
X( 12, RGXFW_GROUP_POW, RGXFW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: 0x%x)", 1) \
X( 13, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.", 0) \
X( 14, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.", 0) \
-X( 15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \
-X( 16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %d, TLA-DM Active? %d", 2) \
+X( 15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %u, Any RD-DM Active? %u", 2) \
+X( 16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %u, TLA-DM Active? %u", 2) \
X( 17, RGXFW_GROUP_POW, RGXFW_SF_POW_BRN37270_DEPRECATED, "Request power up due to BRN37270. Pow stat int: 0x%x", 1) \
X( 18, RGXFW_GROUP_POW, RGXFW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%x, ext: 0x%x, pow flags: 0x%x", 3) \
X( 19, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%x", 1) \
X( 20, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%x", 1) \
X( 21, RGXFW_GROUP_POW, RGXFW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
X( 22, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
-X( 23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %dms. Core clock: %d Hz", 2) \
-X( 24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%x, %d dusts powered.", 2) \
+X( 23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %ums. Core clock: %u Hz", 2) \
+X( 24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%x, %u dusts powered.", 2) \
X( 25, RGXFW_GROUP_POW, RGXFW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.", 0) \
X( 26, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_ENERGY, "Power monitor: Estimate of dynamic energy %u", 1) \
X( 27, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED2, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x", 3) \
X( 40, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: %u", 1) \
X( 41, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: %u", 1) \
X( 42, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.", 0) \
-X( 43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %d, Ticks: %d", 2) \
-X( 44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042_DEPRECATED, "Allowed number of dusts is %d due to BRN59042.", 1) \
+X( 43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %u, Ticks: %u", 2) \
+X( 44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042_DEPRECATED, "Allowed number of dusts is %u due to BRN59042.", 1) \
X( 45, RGXFW_GROUP_POW, RGXFW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \
X( 46, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x, Fence Counters: Check: %u - Update: %u", 5) \
X( 47, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x", 2) \
X( 53, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE_NOT_IDLE, "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \
X( 54, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_POW_OFF_NOT_IDLE, "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \
X( 55, RGXFW_GROUP_POW, RGXFW_SF_POW_NUMDUST_CHANGE_NOT_IDLE, "Detected attempt to change dust count while not forced idle (pow state 0x%x)", 1) \
-X( 56, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESULT, "Power monitor: Type = %d (0 = power, 1 = energy), Estimate result = 0x%08x%08x", 3) \
+X( 56, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESULT, "Power monitor: Type = %u (0 = power, 1 = energy), Estimate result = 0x%08x%08x", 3) \
X( 57, RGXFW_GROUP_POW, RGXFW_SF_POW_MINMAX_CONFLICT, "Conflicting clock frequency range: OPP min = %u, max = %u", 2) \
X( 58, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_FLOOR, "Proactive DVFS: Set floor to a minimum = 0x%x", 1) \
-X( 59, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %d), pow flags: 0x%x", 2) \
+X( 59, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %u), pow flags: 0x%x", 2) \
X( 60, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST, "Discarding invalid power request: type 0x%x", 1) \
X( 61, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x. Pow flags: 0x%x", 3) \
X( 62, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x", 2) \
X( 63, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_CHANGE_NOT_IDLE, "Detected attempt to change SPU power state mask while not forced idle (pow state 0x%x)", 1) \
X( 64, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_SPU_POWER_MASK, "Invalid SPU power mask 0x%x! Changing to 1", 1) \
X( 65, RGXFW_GROUP_POW, RGXFW_SF_POW_CLKDIV_UPDATE, "Proactive DVFS: Send OPP %u with clock divider value %u", 2) \
-X( 66, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_PERF_MODE, "PPA block started in perf validation mode.", 0) \
+X( 66, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_PERF_MODE, "Power counters in raw/validation mode.", 0) \
X( 67, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESET, "Reset PPA block state %u (1=reset, 0=recalculate).", 1) \
-X( 68, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT_WITH_CORE, "Power controller returned ABORT for Core-%d last request so retrying.", 1) \
-X( 69, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ64BIT, "HW Request On(1)/Off(0): %d, Units: 0x%08x%08x", 3) \
+X( 68, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT_WITH_CORE, "Power controller returned ABORT for Core-%u last request so retrying.", 1) \
+X( 69, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ64BIT, "HW Request On(1)/Off(0): %u, Units: 0x%08x%08x", 3) \
X( 70, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_RAC_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x. Pow flags: 0x%x", 5) \
X( 71, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_RAC_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x", 4) \
-X( 72, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RAC, "RAC pending? %d, RAC Active? %d", 2) \
+X( 72, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RAC, "RAC pending? %u, RAC Active? %u", 2) \
X( 73, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RAC, "Initiate powoff query for RAC.", 0) \
+X( 74, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEFER_REACTIVE_UPDATE, "Proactive DVFS: Defer reactive update to meet next deadline 0x%08x%08x", 2) \
\
-X( 1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \
-X( 2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \
+X( 1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%u, FWCtx: 0x%08.8x", 2) \
+X( 2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%u, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \
X( 3, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW", 0) \
X( 4, RGXFW_GROUP_HWR, RGXFW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.", 0) \
-X( 5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%d FWCtx: 0x%08.8x", 2) \
-X( 6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \
-X( 7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%d->%d), PER-DM(0x%08x)", 3) \
-X( 8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08x->0x%08x)", 3) \
-X( 9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \
-X( 10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \
-X( 11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08x)", 3) \
-X( 12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08x)", 4) \
+X( 5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%u FWCtx: 0x%08.8x", 2) \
+X( 6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%u->%u), PER-DM(0x%08x->0x%08x)", 4) \
+X( 7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%u->%u), PER-DM(0x%08x)", 3) \
+X( 8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%u), PER-DM(0x%08x->0x%08x)", 3) \
+X( 9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%u->%u), PER-DM(0x%08x->0x%08x)", 4) \
+X( 10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%u->%u), PER-DM(0x%08x->0x%08x)", 4) \
+X( 11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%u->%u), PER-DM(0x%08x)", 3) \
+X( 12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%u->%u), PER-DM(0x%08x)", 4) \
X( 13, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08x val:0x%08x)", 3) \
X( 14, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08x", 2) \
-X( 15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %d), FWCtx 0x%08x @ %d", 6) \
-X( 16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE_DEPRECATED, "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d", 2) \
-X( 17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %d, global (0x%08.8x): %d", 5) \
-X( 18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8x, discard: %d - local (0x%08.8x): s%d?=c%d, global (0x%08.8x): s%d?=c%d", 8) \
-X( 19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8x c%d", 2) \
-X( 20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8x @ %d, RTD 0x%08x.", 3) \
-X( 21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%d, extmem: %d)", 2) \
-X( 22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %d bytes)", 4) \
+X( 15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %u), FWCtx 0x%08x @ %u", 6) \
+X( 16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE_DEPRECATED, "PM fence WA could not be applied, Valid TA Setup: %u, RD powered off: %u", 2) \
+X( 17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %u, global (0x%08.8x): %u", 5) \
+X( 18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8x, discard: %u - local (0x%08.8x): s%u?=c%u, global (0x%08.8x): s%u?=c%u", 8) \
+X( 19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8x c%u", 2) \
+X( 20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8x @ %u, RTD 0x%08x.", 3) \
+X( 21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%u, extmem: %u)", 2) \
+X( 22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %u bytes)", 4) \
X( 23, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED2, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08x", 2) \
X( 24, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08x", 5) \
X( 25, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered", 1) \
X( 38, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline", 0) \
X( 39, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll", 0) \
X( 40, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x", 2) \
-X( 41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED3, "Reset HW (loop:%d, poll failures: 0x%08x)", 2) \
+X( 41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED3, "Reset HW (loop:%u, poll failures: 0x%08x)", 2) \
X( 42, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08x", 1) \
X( 43, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08x)", 1) \
X( 44, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).", 1) \
X( 46, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_DEPRECATED, "Fast CRC Check result for DM%u is HWRNeeded=%u", 2) \
X( 47, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK_DEPRECATED, "Full Signature Check result for DM%u is HWRNeeded=%u", 2) \
X( 48, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u", 3) \
-X( 49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK_DEPRECATED, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 3) \
+X( 49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK_DEPRECATED, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%u", 3) \
X( 50, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK_DEPRECATED, "Deadline counter for DM%u is HWRDeadline=%u", 2) \
X( 51, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction", 1) \
-X( 52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%x (ID=%d)", 2) \
-X( 53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%d complete", 1) \
-X( 54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u", 4) \
-X( 55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%d failed", 1) \
+X( 52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%x (ID=%u)", 2) \
+X( 53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%u complete", 1) \
+X( 54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%x (ID=%u) type: %u (0:local,1:global,2:mmu) on HW context %u", 4) \
+X( 55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%u failed", 1) \
X( 56, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \
X( 57, RGXFW_GROUP_HWR, RGXFW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \
X( 58, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u", 2) \
X( 59, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty", 1) \
-X( 60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%d's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x", 5) \
-X( 61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%d)", 1) \
-X( 62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%d)", 1) \
-X( 63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction", 2) \
+X( 60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%u's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x", 5) \
+X( 61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%u)", 1) \
+X( 62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%u)", 1) \
+X( 63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for Driver ID %u due to pending freelist reconstruction", 2) \
X( 64, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes StreamStartOffset = %u)", 5) \
-X( 65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED2, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) on HW context %u", 4) \
+X( 65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED2, "Reconstruction needed for freelist 0x%x (ID=%u) type: %u (0:local,1:global) on HW context %u", 4) \
X( 66, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)", 3) \
X( 67, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance", 1) \
-X( 68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%x (ID=%d)", 2) \
-X( 69, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %d bytes)", 4) \
-X( 70, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED3, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 5) \
+X( 68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%x (ID=%u)", 2) \
+X( 69, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %u bytes)", 4) \
+X( 70, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED3, "Reconstruction needed for freelist 0x%x (ID=%u) type: %u (0:local,1:global) phase: %u (0:TA, 1:3D) on HW context %u", 5) \
X( 71, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_LONG_HW_POLL, "Start long HW poll %u (0-Unset 1-Set) for (reg:0x%08x val:0x%08x)", 3) \
-X( 72, RGXFW_GROUP_HWR, RGXFW_SF_HWR_END_LONG_HW_POLL, "End long HW poll (result=%d)", 1) \
-X( 73, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "DM%u has taken %d ticks and deadline is %d ticks", 3) \
+X( 72, RGXFW_GROUP_HWR, RGXFW_SF_HWR_END_LONG_HW_POLL, "End long HW poll (result=%u)", 1) \
+X( 73, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "DM%u has taken %u ticks and deadline is %u ticks", 3) \
X( 74, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK_DEPRECATED, "USC Watchdog result for DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 5) \
-X( 75, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%x (ID=%d) OSid: %d type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 6) \
+X( 75, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%x (ID=%u) Driver ID: %u type: %u (0:local,1:global) phase: %u (0:TA, 1:3D) on HW context %u", 6) \
X( 76, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP, "GPU-%u has locked up", 1) \
X( 77, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DM, "DM%u has locked up", 1) \
-X( 78, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_EVENT_STATUS_REG, "Core %d RGX_CR_EVENT_STATUS=0x%08x", 2) \
+X( 78, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_EVENT_STATUS_REG, "Core %u RGX_CR_EVENT_STATUS=0x%08x", 2) \
X( 79, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MULTICORE_EVENT_STATUS_REG, "RGX_CR_MULTICORE_EVENT_STATUS%u=0x%08x", 2) \
-X( 80, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_BIF0_FAULT, "BIF0 page fault detected (Core %d MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)", 5) \
-X( 81, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT_S7, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x)", 3) \
-X( 82, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x 0x%08x)", 4) \
-X( 83, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (core:%d of %d, loop:%d, poll failures: 0x%08x)", 4) \
+X( 80, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_BIF0_FAULT, "BIF0 page fault detected (Core %u MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)", 5) \
+X( 81, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT_S7, "MMU page fault detected (Core %u MMU Status: 0x%08x%08x)", 3) \
+X( 82, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT, "MMU page fault detected (Core %u MMU Status: 0x%08x%08x 0x%08x)", 4) \
+X( 83, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (core:%u of %u, loop:%u, poll failures: 0x%08x)", 4) \
X( 84, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK, "Fast CRC Check result for Core%u, DM%u is HWRNeeded=%u", 3) \
X( 85, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK, "Full Signature Check result for Core%u, DM%u is HWRNeeded=%u", 3) \
-X( 86, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 4) \
+X( 86, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%u", 4) \
X( 87, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK, "USC Watchdog result for Core%u DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 6) \
X( 88, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_RISCV_FAULT, "RISC-V MMU page fault detected (FWCORE MMU Status 0x%08x Req Status 0x%08x%08x)", 3) \
-X( 89, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS1_PFS_DEPRECATED, "TEXAS1_PFS poll failed on core %d with value 0x%08x", 2) \
-X( 90, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_PFS, "BIF_PFS poll failed on core %d with value 0x%08x", 2) \
-X( 91, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS set poll failed on core %d with value 0x%08x", 2) \
-X( 92, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_UNSET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS unset poll failed on core %d with value 0x%08x", 2) \
-X( 93, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLC_INVAL, "MMU_CTRL_INVAL poll (all but fw) failed on core %d with value 0x%08x", 2) \
-X( 94, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLCMMU_INVAL, "MMU_CTRL_INVAL poll (all) failed on core %d with value 0x%08x", 2) \
-X( 95, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS_PFS, "TEXAS%d_PFS poll failed on core %d with value 0x%08x", 3) \
+X( 89, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS1_PFS_DEPRECATED, "TEXAS1_PFS poll failed on core %u with value 0x%08x", 2) \
+X( 90, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_PFS, "BIF_PFS poll failed on core %u with value 0x%08x", 2) \
+X( 91, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS set poll failed on core %u with value 0x%08x", 2) \
+X( 92, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_UNSET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS unset poll failed on core %u with value 0x%08x", 2) \
+X( 93, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLC_INVAL, "MMU_CTRL_INVAL poll (all but fw) failed on core %u with value 0x%08x", 2) \
+X( 94, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLCMMU_INVAL, "MMU_CTRL_INVAL poll (all) failed on core %u with value 0x%08x", 2) \
+X( 95, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS_PFS, "TEXAS%u_PFS poll failed on core %u with value 0x%08x", 3) \
X( 96, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EXTRA_CHECK, "Extra Registers Check result for Core%u, DM%u is HWRNeeded=%u", 3) \
X( 97, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WRITE_TO_GPU_READONLY_ADDR, "FW attempted to write to read-only GPU address 0x%08x", 1) \
\
X( 16, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: 0x%x value: 0x%x", 2) \
X( 17, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u ID:0x%x", 2) \
X( 18, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID 0x%x is not allowed. The package [b:%u, n:%u] will be discarded", 3) \
-X( 19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS_CUSTOM, "Custom Counters filter status %d", 1) \
-X( 20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded", 2) \
-X( 21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %d counters IDs while the upper limit is %d", 2) \
+X( 19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS_CUSTOM, "Custom Counters filter status %u", 1) \
+X( 20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %u is not allowed. Use only blocks lower than %u. The package will be discarded", 2) \
+X( 21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %u counters IDs while the upper limit is %u", 2) \
X( 22, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHECK_FILTER, "Check Filter 0x%x is 0x%x ?", 2) \
X( 23, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset", 1) \
-X( 24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD_DEPRECATED, "Encountered an invalid command (%d)", 1) \
+X( 24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD_DEPRECATED, "Encountered an invalid command (%u)", 1) \
X( 25, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \
-X( 26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \
-X( 27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %d", 1) \
+X( 26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %u (Roff = %u, Woff = %u)", 3) \
+X( 27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %u", 1) \
X( 28, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKENA, "Block 0x%x ENABLED", 1) \
X( 29, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKDIS, "Block 0x%x DISABLED", 1) \
X( 30, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK_INSTANCE, "Accessing Indirect block 0x%x, instance %u", 2) \
X( 31, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTRVAL, "Counter register 0x%x, Value 0x%x", 2) \
-X( 32, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Counters filter status %d", 1) \
+X( 32, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Counters filter status %u", 1) \
X( 33, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTLBLK, "Block 0x%x mapped to Ctl Idx %u", 2) \
X( 34, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_WORKEST_EN, "Block(s) in use for workload estimation.", 0) \
X( 35, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CYCCTR, "GPU %u Cycle counter 0x%x, Value 0x%x", 3) \
*/
#define RGX_FIRMWARE_NUMBER_OF_FW_HEAPS (IMG_UINT32_C(2))
#define RGX_FIRMWARE_HEAP_SHIFT RGX_FW_HEAP_SHIFT
-#define RGX_FIRMWARE_RAW_HEAP_BASE (0xE1C0000000ULL)
+#define RGX_FIRMWARE_RAW_HEAP_BASE (IMG_UINT64_C(0xE1C0000000))
#define RGX_FIRMWARE_RAW_HEAP_SIZE (IMG_UINT32_C(1) << RGX_FIRMWARE_HEAP_SHIFT)
/* To enable the firmware to compute the exact address of structures allocated by the KM
RGX_FIRMWARE_RAW_HEAP_SIZE - \
RGX_FIRMWARE_CONFIG_HEAP_SIZE)
+
+/* 1 Mb can hold the maximum amount of page tables for the memory shared between the firmware and all KM drivers:
+ * MAX(RAW_HEAP_SIZE) = 32 Mb; MAX(NUMBER_OS) = 8; Total shared memory = 256 Mb;
+ * MMU objects required: 65536 PTEs; 16 PDEs; 1 PCE; */
+#define RGX_FIRMWARE_MAX_PAGETABLE_SIZE (1 * 1024 * 1024)
+
/*
* The maximum configurable size via RGX_FW_HEAP_SHIFT is 32MiB (1<<25) and
* the minimum is 4MiB (1<<22); the default firmware heap size is set to
#define RGX_HWPERF_M_CORE_SHIFT 20U
/*! OSID bit-shift macro used for encoding OSID into type field of a packet */
#define RGX_HWPERF_OSID_SHIFT 24U
-typedef enum {
- RGX_HWPERF_STREAM_ID0_FW, /*!< Events from the Firmware/GPU */
- RGX_HWPERF_STREAM_ID1_HOST, /*!< Events from the Server host driver component */
- RGX_HWPERF_STREAM_ID2_CLIENT, /*!< Events from the Client host driver component */
- RGX_HWPERF_STREAM_ID_LAST,
-} RGX_HWPERF_STREAM_ID;
+
+/*! Origin or source of the event */
+typedef IMG_UINT32 RGX_HWPERF_STREAM_ID;
+/*! Events from the Firmware/GPU */
+#define RGX_HWPERF_STREAM_ID0_FW 0U
+/*! Events from the Server host driver component */
+#define RGX_HWPERF_STREAM_ID1_HOST 1U
+/*! Events from the Client host driver component */
+#define RGX_HWPERF_STREAM_ID2_CLIENT 2U
+#define RGX_HWPERF_STREAM_ID_LAST 3U
/* Checks if all stream IDs can fit under RGX_HWPERF_TYPEID_STREAM_MASK. */
static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_STREAM_MASK >> RGX_HWPERF_STREAM_SHIFT),
#define RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000U)
#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U)
-
-#if defined(SECURE_FW_CODE_OSID) && defined(RGX_FEATURE_META)
-#error "SECURE_FW_CODE_OSID is not supported on META cores"
-#endif
-
-
/******************************************************************************
* RGX FW Bootloader defaults
******************************************************************************/
#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12)
#define RGXMIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT (RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \
RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT)
-
-#if defined(SECURE_FW_CODE_OSID) && (SECURE_FW_CODE_OSID + 1 > 2)
-#define MIPS_FW_CODE_OSID (SECURE_FW_CODE_OSID)
-#elif defined(SECURE_FW_CODE_OSID)
-#define MIPS_FW_CODE_OSID (1U)
-#endif
-
-
/*
* Pages to trampoline problematic physical addresses:
* - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000
#define RGXMIPSFW_SENSITIVE_ADDR(a) (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN == (~((1UL << RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE)-1U) & a))
+#define RGXMIPSFW_C0_PAGEMASK_4K (0x00001800)
+#define RGXMIPSFW_C0_PAGEMASK_16K (0x00007800)
+#define RGXMIPSFW_C0_PAGEMASK_64K (0x0001F800)
+#define RGXMIPSFW_C0_PAGEMASK_256K (0x0007F800)
+#define RGXMIPSFW_C0_PAGEMASK_1MB (0x001FF800)
+#define RGXMIPSFW_C0_PAGEMASK_4MB (0x007FF800)
+
+#if defined(RGX_FEATURE_GPU_MULTICORE_SUPPORT)
+/* GPU_COUNT: number of physical cores in the system
+ * NUM_OF_REGBANKS = GPU_COUNT + 1 //XPU BROADCAST BANK
+ * RGXMIPSFW_REGISTERS_PAGE_SIZE = NUM_OF_REGBANKS * REGBANK_SIZE(64KB) * NUM_OF_OSID(8)
+ * For RGXMIPSFW_REGISTERS_PAGE_SIZE = 4MB, NUM_OF_REGBANKS = 8 so supports upto GPU_COUNT = 7 cores
+ */
+#define RGXMIPSFW_C0_PAGEMASK_REGISTERS (RGXMIPSFW_C0_PAGEMASK_4MB)
+#define RGXMIPSFW_REGISTERS_PAGE_SIZE (RGXMIPSFW_PAGE_SIZE_4MB)
+#define RGXMIPSFW_REGISTERS_REMAP_RANGE_CONFIG_REGION_SIZE (RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB)
+#elif (RGX_NUM_DRIVERS_SUPPORTED == 1)
+#define RGXMIPSFW_C0_PAGEMASK_REGISTERS (RGXMIPSFW_C0_PAGEMASK_64K)
+#define RGXMIPSFW_REGISTERS_PAGE_SIZE (RGXMIPSFW_PAGE_SIZE_64K)
+#define RGXMIPSFW_REGISTERS_REMAP_RANGE_CONFIG_REGION_SIZE (RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB)
+#elif (RGX_NUM_DRIVERS_SUPPORTED <= 4)
+#define RGXMIPSFW_C0_PAGEMASK_REGISTERS (RGXMIPSFW_C0_PAGEMASK_256K)
+#define RGXMIPSFW_REGISTERS_PAGE_SIZE (RGXMIPSFW_PAGE_SIZE_256K)
+#define RGXMIPSFW_REGISTERS_REMAP_RANGE_CONFIG_REGION_SIZE (RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB)
+#elif (RGX_NUM_DRIVERS_SUPPORTED <= 8)
+#define RGXMIPSFW_C0_PAGEMASK_REGISTERS (RGXMIPSFW_C0_PAGEMASK_1MB)
+#define RGXMIPSFW_REGISTERS_PAGE_SIZE (RGXMIPSFW_PAGE_SIZE_1MB)
+#define RGXMIPSFW_REGISTERS_REMAP_RANGE_CONFIG_REGION_SIZE (RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB)
+#else
+#error "MIPS TLB invalid params"
+#endif
+
+#define RGXMIPSFW_DECODE_REMAP_CONFIG_REGION_SIZE(r) ((1U << (((r >> 7) + 1U) << 1U))*0x400)
+
/*
* Firmware virtual layout and remap configuration
*/
#define RGXMIPSFW_PT_VIRTUAL_BASE (0xCF000000)
#define RGXMIPSFW_REGISTERS_VIRTUAL_BASE (0xCF800000)
#define RGXMIPSFW_STACK_VIRTUAL_BASE (0xCF600000)
+#define RGXMIPSFW_MIPS_STATE_VIRTUAL_BASE (RGXMIPSFW_REGISTERS_VIRTUAL_BASE + RGXMIPSFW_REGISTERS_PAGE_SIZE)
+/* Offset inside the bootloader data page where the general_exception handler saves the error state.
+ * The error value is then copied by the NMI handler to the MipsState struct in shared memory.
+ * This is done because it's difficult to obtain the address of MipsState inside the general
+ * exception handler. */
+#define RGXMIPSFW_ERROR_STATE_BASE (0x100)
/*
* Bootloader configuration data
* within the bootloader/NMI data page */
#define RGXMIPSFW_BOOTLDR_CONF_OFFSET (0x0U)
-
-/*
- * NMI shared data
- */
-/* Base address of the shared data within the bootloader/NMI data page */
-#define RGXMIPSFW_NMI_SHARED_DATA_BASE (0x100)
-/* Size used by Debug dump data */
-#define RGXMIPSFW_NMI_SHARED_SIZE (0x2B0)
-/* Offsets in the NMI shared area in 32-bit words */
-#define RGXMIPSFW_NMI_SYNC_FLAG_OFFSET (0x0)
-#define RGXMIPSFW_NMI_STATE_OFFSET (0x1)
-#define RGXMIPSFW_NMI_ERROR_STATE_SET (0x1)
-
/*
* MIPS boot stage
*/
typedef struct {
IMG_UINT32 ui32ErrorState; /* This must come first in the structure */
+ IMG_UINT32 ui32Sync;
IMG_UINT32 ui32ErrorEPC;
IMG_UINT32 ui32StatusRegister;
IMG_UINT32 ui32CauseRegister;
IMG_UINT32 ui32BadInstr;
IMG_UINT32 ui32UnmappedAddress;
RGX_MIPS_TLB_ENTRY asTLB[RGXMIPSFW_NUMBER_OF_TLB_ENTRIES];
- RGX_MIPS_REMAP_ENTRY asRemap[RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES];
+ IMG_UINT64 aui64Remap[RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES];
} RGX_MIPS_STATE;
+static_assert(offsetof(RGX_MIPS_STATE, ui32ErrorState) == 0,
+ "ui32ErrorState is not the first member of the RGX_MIPS_STATE struct");
+
+#if defined(SUPPORT_MIPS_64K_PAGE_SIZE)
+static_assert(RGXMIPSFW_REGISTERS_PAGE_SIZE >= RGXMIPSFW_PAGE_SIZE_64K,
+ "Register page size must be greater or equal to MIPS page size");
+#else
+static_assert(RGXMIPSFW_REGISTERS_PAGE_SIZE >= RGXMIPSFW_PAGE_SIZE_4K,
+ "Register page size must be greater or equal to MIPS page size");
+#endif
+
+
#endif /* RGXMIPSFW_ASSEMBLY_CODE */
#endif /* RGX_MIPS_H */
#define RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT (2U)
/* Abstract command error codes (descriptions from RISC-V debug spec v0.13) */
-typedef enum
-{
- /* No error. */
- RISCV_ABSTRACT_CMD_NO_ERROR = 0,
-
- /*
- * An abstract command was executing while command, abstractcs, or abstractauto
- * was written, or when one of the data or progbuf registers was read or
- * written. This status is only written if cmderr contains 0.
- */
- RISCV_ABSTRACT_CMD_BUSY = 1,
-
- /*
- * The requested command is not supported, regardless of whether
- * the hart is running or not.
- */
- RISCV_ABSTRACT_CMD_NOT_SUPPORTED = 2,
-
- /*
- * An exception occurred while executing the command
- * (e.g. while executing the Program Buffer).
- */
- RISCV_ABSTRACT_CMD_EXCEPTION = 3,
-
- /*
- * The abstract command couldn't execute because the hart wasn't in the required
- * state (running/halted), or unavailable.
- */
- RISCV_ABSTRACT_CMD_HALT_RESUME = 4,
-
- /*
- * The abstract command failed due to a bus error
- * (e.g. alignment, access size, or timeout).
- */
- RISCV_ABSTRACT_CMD_BUS_ERROR = 5,
-
- /* The command failed for another reason. */
- RISCV_ABSTRACT_CMD_OTHER_ERROR = 7
-
-} RGXRISCVFW_ABSTRACT_CMD_ERR;
+typedef IMG_UINT32 RGXRISCVFW_ABSTRACT_CMD_ERR;
+
+/* No error. */
+#define RISCV_ABSTRACT_CMD_NO_ERROR 0U
+
+/*
+ * An abstract command was executing while command, abstractcs, or abstractauto
+ * was written, or when one of the data or progbuf registers was read or
+ * written. This status is only written if cmderr contains 0.
+ */
+#define RISCV_ABSTRACT_CMD_BUSY 1U
+
+/*
+ * The requested command is not supported, regardless of whether
+ * the hart is running or not.
+ */
+#define RISCV_ABSTRACT_CMD_NOT_SUPPORTED 2U
+
+/*
+ * An exception occurred while executing the command
+ * (e.g. while executing the Program Buffer).
+ */
+#define RISCV_ABSTRACT_CMD_EXCEPTION 3U
+
+/*
+ * The abstract command couldn't execute because the hart wasn't in the required
+ * state (running/halted), or unavailable.
+ */
+#define RISCV_ABSTRACT_CMD_HALT_RESUME 4U
+
+/*
+ * The abstract command failed due to a bus error
+ * (e.g. alignment, access size, or timeout).
+ */
+#define RISCV_ABSTRACT_CMD_BUS_ERROR 5U
+
+/* The command failed for another reason. */
+#define RISCV_ABSTRACT_CMD_OTHER_ERROR 7U
+
/* System Bus error codes (descriptions from RISC-V debug spec v0.13) */
-typedef enum
-{
- /* There was no bus error. */
- RISCV_SYSBUS_NO_ERROR = 0,
+typedef IMG_UINT32 RGXRISCVFW_SYSBUS_ERR;
+
+/* There was no bus error. */
+#define RISCV_SYSBUS_NO_ERROR 0U
- /* There was a timeout. */
- RISCV_SYSBUS_TIMEOUT = 1,
+/* There was a timeout. */
+#define RISCV_SYSBUS_TIMEOUT 1U
- /* A bad address was accessed. */
- RISCV_SYSBUS_BAD_ADDRESS = 2,
+/* A bad address was accessed. */
+#define RISCV_SYSBUS_BAD_ADDRESS 2U
- /* There was an alignment error. */
- RISCV_SYSBUS_BAD_ALIGNMENT = 3,
+/* There was an alignment error. */
+#define RISCV_SYSBUS_BAD_ALIGNMENT 3U
- /* An access of unsupported size was requested. */
- RISCV_SYSBUS_UNSUPPORTED_SIZE = 4,
+/* An access of unsupported size was requested. */
+#define RISCV_SYSBUS_UNSUPPORTED_SIZE 4U
- /* Other. */
- RISCV_SYSBUS_OTHER_ERROR = 7
+/* Other. */
+#define RISCV_SYSBUS_OTHER_ERROR 7U
-} RGXRISCVFW_SYSBUS_ERR;
#endif /* RGXRISCVFW_ASSEMBLY_CODE */
#define RGXFW_ALIGN_CHECKS_UM_MAX 128U
#define RGXFW_ALIGN_CHECKS_INIT0 \
- sizeof(RGXFWIF_TRACEBUF), \
+ (IMG_UINT32)sizeof(RGXFWIF_TRACEBUF), \
offsetof(RGXFWIF_TRACEBUF, ui32LogType), \
offsetof(RGXFWIF_TRACEBUF, sTraceBuf), \
offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), \
offsetof(RGXFWIF_TRACEBUF, ui32TracebufFlags), \
\
- sizeof(RGXFWIF_SYSDATA), \
+ (IMG_UINT32)sizeof(RGXFWIF_SYSDATA), \
offsetof(RGXFWIF_SYSDATA, ePowState), \
offsetof(RGXFWIF_SYSDATA, ui32HWPerfDropCount), \
offsetof(RGXFWIF_SYSDATA, ui32LastDropOrdinal), \
offsetof(RGXFWIF_SYSDATA, ui32FWFaults), \
offsetof(RGXFWIF_SYSDATA, ui32HWRStateFlags), \
\
- sizeof(RGXFWIF_OSDATA), \
+ (IMG_UINT32)sizeof(RGXFWIF_OSDATA), \
offsetof(RGXFWIF_OSDATA, ui32HostSyncCheckMark), \
offsetof(RGXFWIF_OSDATA, ui32KCCBCmdsExecuted), \
\
- sizeof(RGXFWIF_HWRINFOBUF), \
+ (IMG_UINT32)sizeof(RGXFWIF_HWRINFOBUF), \
offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmLockedUpCount), \
offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmOverranCount), \
offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmRecoveredCount), \
offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmFalseDetectCount), \
\
/* RGXFWIF_CMDTA checks */ \
- sizeof(RGXFWIF_CMDTA), \
+ (IMG_UINT32)sizeof(RGXFWIF_CMDTA), \
offsetof(RGXFWIF_CMDTA, sGeomRegs), \
\
/* RGXFWIF_CMD3D checks */ \
- sizeof(RGXFWIF_CMD3D), \
+ (IMG_UINT32)sizeof(RGXFWIF_CMD3D), \
offsetof(RGXFWIF_CMD3D, s3DRegs), \
\
/* RGXFWIF_CMDTRANSFER checks */ \
- sizeof(RGXFWIF_CMDTRANSFER), \
+ (IMG_UINT32)sizeof(RGXFWIF_CMDTRANSFER), \
offsetof(RGXFWIF_CMDTRANSFER, sTransRegs), \
\
\
/* RGXFWIF_CMD_COMPUTE checks */ \
- sizeof(RGXFWIF_CMD_COMPUTE), \
+ (IMG_UINT32)sizeof(RGXFWIF_CMD_COMPUTE), \
offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs), \
\
/* RGXFWIF_FREELIST checks */ \
- sizeof(RGXFWIF_FREELIST), \
+ (IMG_UINT32)sizeof(RGXFWIF_FREELIST), \
offsetof(RGXFWIF_FREELIST, psFreeListDevVAddr), \
offsetof(RGXFWIF_FREELIST, ui32MaxPages), \
offsetof(RGXFWIF_FREELIST, ui32CurrentPages), \
\
/* RGXFWIF_HWRTDATA checks */ \
- sizeof(RGXFWIF_HWRTDATA), \
+ (IMG_UINT32)sizeof(RGXFWIF_HWRTDATA), \
offsetof(RGXFWIF_HWRTDATA, psVHeapTableDevVAddr), \
offsetof(RGXFWIF_HWRTDATA, psPMMListDevVAddr), \
offsetof(RGXFWIF_HWRTDATA, apsFreeLists), \
offsetof(RGXFWIF_HWRTDATA, eState), \
\
/* RGXFWIF_HWRTDATA_COMMON checks */ \
- sizeof(RGXFWIF_HWRTDATA_COMMON), \
+ (IMG_UINT32)sizeof(RGXFWIF_HWRTDATA_COMMON), \
offsetof(RGXFWIF_HWRTDATA_COMMON, bTACachesNeedZeroing),\
\
/* RGXFWIF_HWPERF_CTL_BLK checks */ \
- sizeof(RGXFWIF_HWPERF_CTL_BLK), \
+ (IMG_UINT32)sizeof(RGXFWIF_HWPERF_CTL_BLK), \
offsetof(RGXFWIF_HWPERF_CTL_BLK, aui64CounterCfg), \
\
/* RGXFWIF_HWPERF_CTL checks */ \
- sizeof(RGXFWIF_HWPERF_CTL), \
+ (IMG_UINT32)sizeof(RGXFWIF_HWPERF_CTL), \
offsetof(RGXFWIF_HWPERF_CTL, SelCntr)
#if defined(RGX_FEATURE_TLA)
#define RGXFW_ALIGN_CHECKS_INIT1 \
RGXFW_ALIGN_CHECKS_INIT0, \
/* RGXFWIF_CMD2D checks */ \
- sizeof(RGXFWIF_CMD2D), \
+ (IMG_UINT32)sizeof(RGXFWIF_CMD2D), \
offsetof(RGXFWIF_CMD2D, s2DRegs)
#else
#define RGXFW_ALIGN_CHECKS_INIT1 RGXFW_ALIGN_CHECKS_INIT0
#define RGXFW_ALIGN_CHECKS_INIT \
RGXFW_ALIGN_CHECKS_INIT1, \
/* RGXFWIF_CMDTDM checks */ \
- sizeof(RGXFWIF_CMDTDM), \
+ (IMG_UINT32)sizeof(RGXFWIF_CMDTDM), \
offsetof(RGXFWIF_CMDTDM, sTDMRegs)
#else
#define RGXFW_ALIGN_CHECKS_INIT RGXFW_ALIGN_CHECKS_INIT1
*****************************************************************************/
#define RGXFW_ALIGN_CHECKS_INIT_KM \
- sizeof(RGXFWIF_SYSINIT), \
+ (IMG_UINT32)sizeof(RGXFWIF_SYSINIT), \
offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr), \
offsetof(RGXFWIF_SYSINIT, sPDSExecBase), \
offsetof(RGXFWIF_SYSINIT, sUSCExecBase), \
offsetof(RGXFWIF_SYSINIT, asSigBufCtl), \
offsetof(RGXFWIF_SYSINIT, sTraceBufCtl), \
offsetof(RGXFWIF_SYSINIT, sFwSysData), \
- sizeof(RGXFWIF_OSINIT), \
+ (IMG_UINT32)sizeof(RGXFWIF_OSINIT), \
offsetof(RGXFWIF_OSINIT, psKernelCCBCtl), \
offsetof(RGXFWIF_OSINIT, psKernelCCB), \
offsetof(RGXFWIF_OSINIT, psFirmwareCCBCtl), \
offsetof(RGXFWIF_OSINIT, sRGXCompChecks), \
\
/* RGXFWIF_FWRENDERCONTEXT checks */ \
- sizeof(RGXFWIF_FWRENDERCONTEXT), \
+ (IMG_UINT32)sizeof(RGXFWIF_FWRENDERCONTEXT), \
offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), \
offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), \
\
- sizeof(RGXFWIF_FWCOMMONCONTEXT), \
+ (IMG_UINT32)sizeof(RGXFWIF_FWCOMMONCONTEXT), \
offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext), \
offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode), \
offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB), \
\
- sizeof(RGXFWIF_MMUCACHEDATA), \
+ (IMG_UINT32)sizeof(RGXFWIF_MMUCACHEDATA), \
offsetof(RGXFWIF_MMUCACHEDATA, ui32CacheFlags), \
offsetof(RGXFWIF_MMUCACHEDATA, sMMUCacheSync), \
offsetof(RGXFWIF_MMUCACHEDATA, ui32MMUCacheSyncUpdateValue)
* and the IRQ context when applying a configuration request. */
typedef struct
{
- IMG_BOOL bValid;
- IMG_BOOL bEnabled;
+ /* Few members could be booleans but padded to IMG_UINT32
+ * to workaround pdump alignment requirements */
+ IMG_UINT32 ui32Valid;
+ IMG_UINT32 ui32Enabled;
IMG_UINT32 eBlockID;
IMG_UINT32 uiCounterMask;
IMG_UINT64 RGXFW_ALIGN aui64CounterCfg[RGX_CNTBLK_MUX_COUNTERS_MAX];
/* Structure used to hold a Direct-Addressable block's parameters for passing
* between the BG context and the IRQ context when applying a configuration
- * request. RGX_FEATURE_HWPERF_OCEANIC use only.
+ * request. HWPERF_UNIFIED use only.
*/
typedef struct
{
return &psHWPerfInitData->sBlkCfg[ui32Idx];
}
-/* Stub routine for rgxfw_hwperf_get_da_block_ctl() for non
- * RGX_FEATURE_HWPERF_OCEANIC systems. Just return a NULL.
+/* Stub routine for rgxfw_hwperf_get_da_block_ctl(). Just return a NULL.
*/
#ifdef INLINE_IS_PRAGMA
#pragma inline(rgxfw_hwperf_get_da_block_ctl)
#include "img_types.h"
#include "rgx_fwif_shared.h"
-#include "km/rgxdefs_km.h"
+#include "rgxdefs_km.h"
#include "dllist.h"
#include "rgx_hwperf.h"
+#include "rgx_mips.h"
+#include "rgxheapconfig.h"
/*************************************************************************/ /*!
* Trace Buffer
*****************************************************************************/
-/*! Default size of RGXFWIF_TRACEBUF_SPACE in DWords */
-#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U
+/*! Min, Max, and Default size of RGXFWIF_TRACEBUF_SPACE in DWords */
+#define RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS 8192U /* 32KB */
+#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U /* ~48KB */
+#define RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS 32768U /* 128KB */
+
#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U
#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
#define RGXFW_THREAD_NUM 2U
#define RGXFW_POLL_TYPE_SET 0x80000000U
+#define RGXFW_PROCESS_NAME_LEN (16)
+
typedef struct
{
IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
IMG_UINT32 ui32LineNum;
} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_FILE_INFO_BUF) == 408,
+ "RGXFWIF_FILE_INFO_BUF is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*!
* @Defgroup SRVAndFWTracing Services and Firmware Tracing data interface
* @Brief The document groups/lists the data structures and the interfaces related to Services and Firmware Tracing
*/
typedef struct
{
- IMG_UINT32 ui32TracePointer; /*!< Trace pointer (write index into Trace Buffer)*/
+ IMG_UINT32 ui32TracePointer; /*!< Trace pointer (write index into Trace Buffer) */
+ IMG_UINT32 ui32WrapCount; /*!< Number of times the Trace Buffer has wrapped */
#if defined(RGX_FIRMWARE)
- IMG_UINT32 *pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address), to be used by firmware for writing into trace buffer */
+ IMG_UINT32 *pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address), to be used by firmware for writing into trace buffer */
#else
- RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address)*/
+ RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address)*/
#endif
- IMG_PUINT32 pui32TraceBuffer; /*!< Trace buffer address (Host address), to be used by host when reading from trace buffer */
+ IMG_PUINT32 RGXFW_ALIGN pui32TraceBuffer; /*!< Trace buffer address (Host address), to be used by host when reading from trace buffer */
- RGXFWIF_FILE_INFO_BUF sAssertBuf;
+ RGXFWIF_FILE_INFO_BUF RGXFW_ALIGN sAssertBuf;
} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE;
/*! @} End of Defgroup SRVAndFWTracing */
IMG_UINT bfOsState : 3;
IMG_UINT bfFLOk : 1;
IMG_UINT bfFLGrowPending : 1;
- IMG_UINT bfIsolatedOS : 1;
- IMG_UINT bfReserved : 26;
+ IMG_UINT bfReserved : 27;
} RGXFWIF_OS_RUNTIME_FLAGS;
typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS;
} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY;
#endif
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+#define MAX_THREAD_NUM 2
+
+static_assert(RGXFW_THREAD_NUM <= MAX_THREAD_NUM,
+ "RGXFW_THREAD_NUM is outside of allowable range for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*!
* @InGroup SRVAndFWTracing
* @Brief Firmware trace control data
typedef struct
{
IMG_UINT32 ui32LogType; /*!< FW trace log group configuration */
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+ RGXFWIF_TRACEBUF_SPACE sTraceBuf[MAX_THREAD_NUM]; /*!< FW Trace buffer */
+#else
RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; /*!< FW Trace buffer */
+#endif
IMG_UINT32 ui32TraceBufSizeInDWords; /*!< FW Trace buffer size in dwords, Member initialised only when sTraceBuf is actually allocated
(in RGXTraceBufferInitOnDemandResources) */
IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */
} UNCACHED_ALIGN RGXFWIF_TRACEBUF;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_TRACEBUF) == 880,
+ "RGXFWIF_TRACEBUF is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*! @Brief Firmware system data shared with the Host driver */
typedef struct
{
IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */
IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */
IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */
- RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OS];/*!< State flags for each Operating System mirrored from Fw coremem */
+ RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OSIDS];/*!< State flags for each Operating System mirrored from Fw coremem */
RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; /*!< Firmware fault info */
IMG_UINT32 ui32FWFaults; /*!< Firmware faults count */
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+ IMG_UINT32 aui32CrPollAddr[MAX_THREAD_NUM]; /*!< Failed poll address */
+ IMG_UINT32 aui32CrPollMask[MAX_THREAD_NUM]; /*!< Failed poll mask */
+ IMG_UINT32 aui32CrPollCount[MAX_THREAD_NUM]; /*!< Failed poll count */
+#else
IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; /*!< Failed poll address */
IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; /*!< Failed poll mask */
IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; /*!< Failed poll count */
- IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime;
-#if defined(SUPPORT_POWMON_COMPONENT)
-#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
- RGXFWIF_TRACEBUF_SPACE sPowerMonBuf;
- IMG_UINT32 ui32PowerMonBufSizeInDWords;
-#endif
#endif
+ IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime;
-#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK)
+#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) && !defined(SUPPORT_OPEN_SOURCE_DRIVER)
#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8)
#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE)
IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX];
IMG_UINT32 ui32McConfig; /*!< Identify whether MC config is P-P or P-S */
} UNCACHED_ALIGN RGXFWIF_SYSDATA;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_SYSDATA) == 3624,
+ "RGXFWIF_SYSDATA is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER == 3624");
+#endif
+
+
/*!
* @InGroup ContextSwitching
* @Brief Firmware per-os data and configuration
IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */
IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */
IMG_UINT32 ui32HostSyncCheckMark; /*!< Markers to signal that the Firmware should perform a full sync check */
-#if defined(PVRSRV_STALLED_CCB_ACTION)
+#if defined(PVRSRV_STALLED_CCB_ACTION) || defined(SUPPORT_OPEN_SOURCE_DRIVER)
IMG_UINT32 ui32ForcedUpdatesRequested;
IMG_UINT8 ui8SLRLogWp;
RGXFWIF_SLR_ENTRY sSLRLogFirst;
RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES];
IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime;
#endif
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+ volatile IMG_UINT32 aui32InterruptCount[MAX_THREAD_NUM]; /*!< Interrupt count from Threads > */
+#else
volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */
+#endif
IMG_UINT32 ui32KCCBCmdsExecuted; /*!< Executed Kernel CCB command count */
RGXFWIF_DEV_VIRTADDR sPowerSync; /*!< Sync prim used to signal the host the power off state */
IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */
} UNCACHED_ALIGN RGXFWIF_OSDATA;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_OSDATA) == 584,
+ "RGXFWIF_OSDATA is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/* Firmware trace time-stamp field breakup */
/* RGX_CR_TIMER register read (48 bits) value*/
IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; /*!< HW reset start time */
IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; /*!< HW reset stop time */
IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; /*!< freelist ready time on the last HWR */
- IMG_UINT64 RGXFW_ALIGN ui64Reserved[2];
+ IMG_CHAR RGXFW_ALIGN szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */
} UNCACHED_ALIGN RGX_HWRINFO;
#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */
IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_MAX]; /*!< False lockup detection count for each DM */
} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_HWRINFOBUF) == 2336,
+ "RGXFWIF_HWRINFOBUF is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*! @} End of HWRInfo */
#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (IMG_UINT32_C(0x1))
#define RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN (IMG_UINT32_C(0x1))
#define RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (IMG_UINT32_C(0x2))
-#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1))
-#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2))
+#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1))
+#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2))
/*!
******************************************************************************
* RGX firmware Init Config Data
#define RGX_ACTIVEPM_FORCE_ON 1U
#define RGX_ACTIVEPM_DEFAULT 2U
-typedef enum
-{
- RGX_RD_POWER_ISLAND_FORCE_OFF = 0,
- RGX_RD_POWER_ISLAND_FORCE_ON = 1,
- RGX_RD_POWER_ISLAND_DEFAULT = 2
-} RGX_RD_POWER_ISLAND_CONF;
+typedef IMG_UINT32 RGX_RD_POWER_ISLAND_CONF;
+#define RGX_RD_POWER_ISLAND_FORCE_OFF 0U
+#define RGX_RD_POWER_ISLAND_FORCE_ON 1U
+#define RGX_RD_POWER_ISLAND_DEFAULT 2U
#if defined(RGX_FW_IRQ_OS_COUNTERS)
/* Unused registers re-purposed for storing counters of the Firmware's
typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TIMESTAMP_ADDR;
typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD;
+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY)
+/*!
+ * @Brief Buffer to store KM active client contexts
+ */
+typedef struct
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */
+} RGXFWIF_ACTIVE_CONTEXT_BUF_DATA;
+#endif
+
/*!
* This number is used to represent an invalid page catalogue physical address
*/
#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU
/*!
- * This number is used to represent unallocated page catalog base register
+ * This number is used to represent an unallocated set of page catalog base registers
*/
#define RGXFW_BIF_INVALID_PCSET 0xFFFFFFFFU
/*!
- Firmware memory context.
-*/
+ * Firmware memory context.
+ */
typedef struct
{
IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */
IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */
IMG_UINT32 ui32FwMemCtxFlags; /*!< Compatibility and other flags */
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(SUPPORT_CUSTOM_OSID_EMISSION) && !defined(SUPPORT_OPEN_SOURCE_DRIVER)
IMG_UINT32 ui32OSid;
IMG_BOOL bOSidAxiProt;
#endif
} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_FWMEMCONTEXT) == 32,
+ "RGXFWIF_FWMEMCONTEXT is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*!
* FW context state flags
*/
/*!
* @InGroup ContextSwitching
- * @Brief Firmware GEOM/TA context suspend state
+ * @Brief Firmware GEOM/TA context suspend state (per GEOM core)
*/
typedef struct
{
IMG_UINT16 ui16TACurrentIdx;
} UNCACHED_ALIGN RGXFWIF_TACTX_STATE_PER_GEOM;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+#define MAX_GEOM_CORE_SIZE 4
+
+static_assert(RGX_NUM_GEOM_CORES <= MAX_GEOM_CORE_SIZE,
+ "RGX_NUM_GEOM_CORES is outside of allowable range for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
+/*!
+ * @InGroup ContextSwitching
+ * @Brief Firmware GEOM/TA context suspend states for all GEOM cores
+ */
typedef struct
{
- /* FW-accessible TA state which must be written out to memory on context store */
+ /*! FW-accessible TA state which must be written out to memory on context store */
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+ RGXFWIF_TACTX_STATE_PER_GEOM asGeomCore[MAX_GEOM_CORE_SIZE];
+#else
RGXFWIF_TACTX_STATE_PER_GEOM asGeomCore[RGX_NUM_GEOM_CORES];
+#endif
} UNCACHED_ALIGN RGXFWIF_TACTX_STATE;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_TACTX_STATE) == 160,
+ "RGXFWIF_TACTX_STATE is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*!
* @InGroup ContextSwitching
* @Brief Firmware FRAG/3D context suspend state
static_assert(sizeof(RGXFWIF_3DCTX_STATE) <= 16U,
"Size of structure RGXFWIF_3DCTX_STATE exceeds maximum expected size.");
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_3DCTX_STATE) == 16,
+ "RGXFWIF_3DCTX_STATE is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
#define RGXFWIF_CTX_USING_BUFFER_A (0)
#define RGXFWIF_CTX_USING_BUFFER_B (1U)
IMG_UINT32 ui32CtxStateFlags; /*!< Target buffer and other flags */
} RGXFWIF_COMPUTECTX_STATE;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_COMPUTECTX_STATE) == 4,
+ "RGXFWIF_COMPUTECTX_STATE is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
+#define RGXFWIF_CONTEXT_MISC_FLAGS_STATS_PENDING (1U << 0)
+#define RGXFWIF_CONTEXT_MISC_FLAGS_HAS_DEFER_COUNT (1U << 1)
+
/*!
* @InGroup WorkloadContexts
* @Brief Firmware Common Context (or FWCC)
/* Framework state */
PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */
+ /* Misc and compatibility flags */
+ IMG_UINT32 ui32MiscFlags;
+
/* Statistic updates waiting to be passed back to the host... */
- IMG_BOOL bStatsPending; /*!< True when some stats are pending */
IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */
IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */
IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */
IMG_UINT32 ui32PID; /*!< associated process ID */
IMG_BOOL bGeomOOMDisabled; /*!< True when Geom DM OOM is not allowed */
+ IMG_CHAR szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */
+ IMG_UINT32 ui32DeferCount; /*!< Number of context defers before forced scheduling of context */
} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT;
static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) <= 256U,
"Size of structure RGXFWIF_FWCOMMONCONTEXT exceeds maximum expected size.");
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) == 168,
+ "RGXFWIF_FWCOMMONCONTEXT is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_TQ[RGX_TRP_MAX_NUM_CORES][1];
typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_2D[RGX_TRP_MAX_NUM_CORES][2];
typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_3D[RGX_TRP_MAX_NUM_CORES][4];
IMG_UINT32 ui32FwRenderCtxFlags; /*!< Compatibility and other flags */
#if defined(SUPPORT_TRP)
- RGXFWIF_TRP_CHECKSUM_3D aui64TRPChecksums3D;
- RGXFWIF_TRP_CHECKSUM_GEOM aui64TRPChecksumsGeom;
+ RGXFWIF_TRP_CHECKSUM_3D aui64TRPChecksums3D; /*!< Used by Firmware to store checksums during 3D WRR */
+ RGXFWIF_TRP_CHECKSUM_GEOM aui64TRPChecksumsGeom; /*!< Used by Firmware to store checksums during TA WRR */
+ RGXFWIF_DM eTRPGeomCoreAffinity; /* !< Represent the DM affinity for pending 2nd TRP pass of GEOM otherwise points RGXFWIF_DM_MAX. */
#endif
} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT;
IMG_UINT32 ui32ComputeCtxFlags; /*!< Compatibility and other flags */
IMG_UINT32 ui32WGPState;
- IMG_UINT32 ui32WGPChecksum;
- IMG_UINT32 ui32CoreMaskA;
- IMG_UINT32 ui32CoreMaskB;
+ IMG_UINT32 aui32WGPChecksum[RGX_WGP_MAX_NUM_CORES];
} UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT;
/*!
RGXFWIF_FWCOMMONCONTEXT sTQContext; /*!< Firmware context for TQ3D */
#if defined(SUPPORT_TRP)
- IMG_UINT32 ui32TRPState;
- RGXFWIF_TRP_CHECKSUM_TQ aui64TRPChecksumsTQ;
+ IMG_UINT32 ui32TRPState; /*!< Used by Firmware to track current state of a protected kick */
+ RGXFWIF_TRP_CHECKSUM_TQ aui64TRPChecksumsTQ;/*!< Used by Firmware to store checksums during TQ WRR */
#endif
} UNCACHED_ALIGN RGXFWIF_FWTRANSFERCONTEXT;
IMG_UINT32 ui32CmdSize; /*!< size of each command in bytes */
} UNCACHED_ALIGN RGXFWIF_CCB_CTL;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_CCB_CTL) == 16,
+ "RGXFWIF_CCB_CTL is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*!
* @Defgroup KCCBTypes Kernel CCB data interface
* @Brief Types grouping data structures and defines used in realising the KCCB functionality
#define RGXFWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */
#define RGXFWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */
-#if !defined(__KERNEL)
+#if !defined(__KERNEL__)
#if !defined(RGX_FEATURE_SLC_VIVT)
#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10U) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
-#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE < 2)
#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8U) /* BIF_CTRL_INVAL_TLB1_EN */
-#else
-#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB)
-#endif
#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x0U) /* not used */
#else /* RGX_FEATURE_SLC_VIVT */
-#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x0) /* not used */
-#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (0x0) /* not used */
-#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x0U) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (0x0U) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800U) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
#endif
#else
-#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
-#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */
-#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10U) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8U) /* BIF_CTRL_INVAL_TLB1_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800U) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
#endif
#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U) /* indicates FW should interrupt the host */
typedef struct
{
- PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */
- IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */
- IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */
- IMG_UINT32 ui32BPDM; /*!< Breakpoint control */
- IMG_UINT32 ui32BPDataFlags;
- IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */
- IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */
- RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */
+ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */
+ IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */
+ IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */
+ IMG_UINT32 ui32BPDM; /*!< Breakpoint control */
+ IMG_UINT32 ui32BPDataFlags;
+ IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */
+ IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */
+ IMG_UINT64 RGXFW_ALIGN ui64SpillAddr;
+ RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */
} RGXFWIF_BPDATA;
#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS (RGXFWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */
IMG_UINT32 ui32CWrapMaskUpdate; /*!< Client CCB wrap mask update after CCCB growth */
IMG_UINT32 ui32NumCleanupCtl; /*!< number of CleanupCtl pointers attached */
PRGXFWIF_CLEANUP_CTL apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
IMG_UINT32 ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */
+#endif
} RGXFWIF_KCCB_CMD_KICK_DATA;
/*!
*/
typedef struct
{
- IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32DriverID;
RGXFWIF_OS_STATE_CHANGE eNewOSState;
} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA;
RGXFWIF_KCCB_CMD_KICK = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< DM workload kick command */
RGXFWIF_KCCB_CMD_MMUCACHE = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< MMU cache invalidation request */
RGXFWIF_KCCB_CMD_BP = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED,
- RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */
- RGXFWIF_KCCB_CMD_CLEANUP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */
- RGXFWIF_KCCB_CMD_POW = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request */
- RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */
- RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */
- RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */
- RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */
- /* RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE */
- RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */
- RGXFWIF_KCCB_CMD_HEALTH_CHECK = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */
- RGXFWIF_KCCB_CMD_FORCE_UPDATE = 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */
-
- RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 117U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */
- RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 118U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. */
+ RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */
+ RGXFWIF_KCCB_CMD_CLEANUP = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */
+ RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */
+ RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */
+ RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */
+ RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */
+ RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */
+ RGXFWIF_KCCB_CMD_HEALTH_CHECK = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */
+ RGXFWIF_KCCB_CMD_FORCE_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */
+ RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 113U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */
+ RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. */
/* Commands only permitted to the native or host OS */
- RGXFWIF_KCCB_CMD_REGCONFIG = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED,
- RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */
- /* RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS */
- RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */
- RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */
- /* RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT*/
- RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */
- RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a maximum frequency/OPP point */
- RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the relative scheduling priority for a particular OSid. It can only be serviced for the Host DDK */
- RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */
- /* RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE */
- /*RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE */
- RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */
- RGXFWIF_KCCB_CMD_PHR_CFG = 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */
+ RGXFWIF_KCCB_CMD_POW = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request */
+ RGXFWIF_KCCB_CMD_REGCONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 202U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */
+ RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */
+ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a maximum frequency/OPP point */
+ RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE = 205U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the priority/group for a particular driver. It can only be serviced for the Host DDK */
+ RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */
+ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */
+ RGXFWIF_KCCB_CMD_PHR_CFG = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */
#if defined(SUPPORT_VALIDATION)
- RGXFWIF_KCCB_CMD_RGXREG = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Read RGX Register from FW */
+ RGXFWIF_KCCB_CMD_RGXREG = 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Read RGX Register from FW */
#endif
- RGXFWIF_KCCB_CMD_WDG_CFG = 215U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */
- RGXFWIF_KCCB_CMD_COUNTER_DUMP = 216U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */
- RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 217U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */
- RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 218U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure the custom counters for HWPerf */
+ RGXFWIF_KCCB_CMD_WDG_CFG = 210U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */
+ RGXFWIF_KCCB_CMD_COUNTER_DUMP = 211U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */
#if defined(SUPPORT_VALIDATION)
- RGXFWIF_KCCB_CMD_GPUMAP = 219U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Request a FW GPU mapping which is written into by the FW with a pattern */
+ RGXFWIF_KCCB_CMD_GPUMAP = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Request a FW GPU mapping which is written into by the FW with a pattern */
#endif
- RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 220U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure directly addressable counters for HWPerf */
+
+ /* HWPerf commands */
+ RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 300U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */
+ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 301U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure directly addressable counters for HWPerf */
+ RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 302U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */
+ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 303U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */
+ RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 304U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure the custom counters for HWPerf */
+
} RGXFWIF_KCCB_CMD_TYPE;
-#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_REGCONFIG - 1)
+#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_POW - 1)
/*! @Brief Kernel CCB command packet */
typedef struct
RGXFWIF_DEV_VIRTADDR sTBIBuffer; /*!< Dev address for TBI buffer allocated on demand */
RGXFWIF_COUNTER_DUMP_DATA sCounterDumpConfigData; /*!< Data for dumping of register ranges */
RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA sForceUpdateData; /*!< Data for signalling all unmet fences for a given CCB */
-#if defined(SUPPORT_VALIDATION)
+#if defined(SUPPORT_VALIDATION) && !defined(SUPPORT_OPEN_SOURCE_DRIVER)
RGXFWIF_RGXREG_DATA sFwRgxData; /*!< Data for reading off an RGX register */
RGXFWIF_GPUMAP_DATA sGPUMapData; /*!< Data for requesting a FW GPU mapping which is written into by the FW with a pattern */
#endif
RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD);
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_KCCB_CMD) == 64,
+ "RGXFWIF_KCCB_CMD is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*! @} End of KCCBTypes */
/*!
RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */
RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange;
RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA sCmdFWPagefault; /*!< Data for context reset notification */
-#if defined(SUPPORT_VALIDATION)
+#if defined(SUPPORT_VALIDATION) && !defined(SUPPORT_OPEN_SOURCE_DRIVER)
RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA sCmdRgxRegReadData;
#if defined(SUPPORT_SOC_TIMER)
RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA sCmdTimers;
RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD);
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_FWCCB_CMD) == 48,
+ "RGXFWIF_FWCCB_CMD is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*! @} End of FWCCBTypes */
/*!
#define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64)
#define RGX_CCB_TYPE_TASK (IMG_UINT32_C(1) << 15)
-#define RGX_CCB_FWALLOC_ALIGN(size) (((size) + (RGXFWIF_FWALLOC_ALIGN-1U)) & ~(RGXFWIF_FWALLOC_ALIGN - 1U))
+#define RGX_CCB_FWALLOC_ALIGN(size) (PVR_ALIGN(size, RGXFWIF_FWALLOC_ALIGN))
typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE;
#endif
#define RGXFWIF_CCB_CMD_TYPE_PADDING (221U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Skip without action type command */
+#define RGXFWIF_CCB_CMD_TYPE_VK_TIMESTAMP (223U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< Process a vulkan timestamp */
/*! @} End of Client CCB command types */
typedef struct
IMG_UINT32 ui32CmdSize; /*!< Size of the command following this header */
IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */
IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION) && !defined(SUPPORT_OPEN_SOURCE_DRIVER)
RGXFWIF_WORKEST_KICK_DATA RGXFW_ALIGN sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */
+#endif
} RGXFWIF_CCB_CMD_HEADER;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_CCB_CMD_HEADER) == 16,
+ "RGXFWIF_CCB_CMD_HEADER is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*
******************************************************************************
* Client CCB commands which are only required by the kernel
IMG_BOOL bUpdated; /*!< Information is valid */
} UNCACHED_ALIGN RGXFWIF_COMPCHECKS;
-/*!
- ******************************************************************************
- * Updated configuration post FW data init.
- *****************************************************************************/
+/*! @Brief Firmware Runtime configuration data \ref RGXFWIF_RUNTIME_CFG
+ * allocated by services and used by the Firmware on boot
+ **/
typedef struct
{
- IMG_UINT32 ui32ActivePMLatencyms; /* APM latency in ms before signalling IDLE to the host */
- IMG_UINT32 ui32RuntimeCfgFlags; /* Compatibility and other flags */
- IMG_BOOL bActivePMLatencyPersistant; /* If set, APM latency does not reset to system default each GPU power transition */
- IMG_UINT32 ui32CoreClockSpeed; /* Core clock speed, currently only used to calculate timer ticks */
- IMG_UINT32 ui32DefaultDustsNumInit; /* Last number of dusts change requested by the host */
- IMG_UINT32 ui32PHRMode; /* Periodic Hardware Reset configuration values */
- IMG_UINT32 ui32HCSDeadlineMS; /* New number of milliseconds C/S is allowed to last */
- IMG_UINT32 ui32WdgPeriodUs; /* The watchdog period in microseconds */
- IMG_UINT32 aui32OSidPriority[RGXFW_MAX_NUM_OS]; /*!< Array of priorities per OS */
- PRGXFWIF_HWPERFBUF sHWPerfBuf; /* On-demand allocated HWPerf buffer address, to be passed to the FW */
+ IMG_UINT32 ui32ActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */
+ IMG_UINT32 ui32RuntimeCfgFlags; /*!< Compatibility and other flags */
+ IMG_BOOL bActivePMLatencyPersistant; /*!< If set, APM latency does not reset to system default each GPU power transition */
+ IMG_UINT32 ui32CoreClockSpeed; /*!< Core clock speed, currently only used to calculate timer ticks */
+ IMG_UINT32 ui32DefaultDustsNumInit; /*!< Last number of dusts change requested by the host */
+ IMG_UINT32 ui32PHRMode; /*!< Periodic Hardware Reset configuration values */
+ IMG_UINT32 ui32HCSDeadlineMS; /*!< New number of milliseconds C/S is allowed to last */
+ IMG_UINT32 ui32WdgPeriodUs; /*!< The watchdog period in microseconds */
+ IMG_UINT32 aui32DriverPriority[RGXFW_MAX_NUM_OSIDS]; /*!< Array of priorities per OS */
+ IMG_UINT32 aui32DriverIsolationGroup[RGXFW_MAX_NUM_OSIDS]; /*!< Array of isolation groups per OS */
+
+ PRGXFWIF_HWPERFBUF sHWPerfBuf; /*!< On-demand allocated HWPerf buffer address, to be passed to the FW */
+#if defined(SUPPORT_VALIDATION) && !defined(SUPPORT_OPEN_SOURCE_DRIVER)
+ IMG_BOOL bInjectFWFault; /*!< Injecting firmware fault to validate recovery through Host */
+#endif
} RGXFWIF_RUNTIME_CFG;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_RUNTIME_CFG) == 68,
+ "RGXFWIF_RUNTIME_CFG is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*!
*****************************************************************************
* Control data for RGX
typedef struct
{
IMG_PID uiPID;
- IMG_UINT32 ui32OSID;
+ IMG_UINT32 ui32DriverID;
} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM;
typedef struct
RGXFWIF_GPIO_VAL_LAST
} RGXFWIF_GPIO_VAL_MODE;
-typedef enum
-{
- FW_PERF_CONF_NONE = 0,
- FW_PERF_CONF_ICACHE = 1,
- FW_PERF_CONF_DCACHE = 2,
- FW_PERF_CONF_JTLB_INSTR = 5,
- FW_PERF_CONF_INSTRUCTIONS = 6
-} FW_PERF_CONF;
+typedef IMG_UINT32 FW_PERF_CONF;
+#define FW_PERF_CONF_NONE 0U
+#define FW_PERF_CONF_ICACHE 1U
+#define FW_PERF_CONF_DCACHE 2U
+#define FW_PERF_CONF_JTLB_INSTR 5U
+#define FW_PERF_CONF_INSTRUCTIONS 6U
typedef enum
{
* @} End of AddToGroup KCCBTypes
*/
+/*! @Brief OS connection data \ref RGXFWIF_CONNECTION_CTL allocated
+ * by services and used to track OS state in Firmware and Services
+ **/
typedef struct
{
/* Fw-Os connection states */
- volatile RGXFWIF_CONNECTION_FW_STATE eConnectionFwState;
- volatile RGXFWIF_CONNECTION_OS_STATE eConnectionOsState;
- volatile IMG_UINT32 ui32AliveFwToken;
- volatile IMG_UINT32 ui32AliveOsToken;
+ volatile RGXFWIF_CONNECTION_FW_STATE eConnectionFwState; /*!< Firmware-OS connection state */
+ volatile RGXFWIF_CONNECTION_OS_STATE eConnectionOsState; /*!< Services-OS connection state */
+ volatile IMG_UINT32 ui32AliveFwToken; /*!< OS Alive token updated by Firmware */
+ volatile IMG_UINT32 ui32AliveOsToken; /*!< OS Alive token updated by Services */
} UNCACHED_ALIGN RGXFWIF_CONNECTION_CTL;
/*! @Brief Firmware OS Initialization data \ref RGXFWIF_OSINIT
} UNCACHED_ALIGN RGXFWIF_OSINIT;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_OSINIT) == 104,
+ "RGXFWIF_OSINIT is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*! @Brief Firmware System Initialization data \ref RGXFWIF_SYSINIT
* allocated by services and used by the Firmware on boot
**/
typedef struct
{
+ RGX_MIPS_STATE sMIPSState; /*!< MIPS Debug Data; this must be the first member in the structure */
+
IMG_DEV_PHYADDR RGXFW_ALIGN sFaultPhysAddr; /*!< Fault read address */
IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSExecBase; /*!< PDS execution base */
RGXFWIF_DEV_VIRTADDR pbNonSecureBuffer;
#endif
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY)
+ RGXFWIF_DEV_VIRTADDR sActiveContextBufBase; /*!< Active context buffer base */
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS)
/*
* Used when validation is enabled to allow the host to check
* that MTS sent the correct sideband in response to a kick
#if defined(SUPPORT_AUTOVZ)
IMG_UINT32 ui32VzWdgPeriod;
#endif
-
+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY)
+ /* notify firmware power-up on host-side recovery */
+ IMG_BOOL bFwHostRecoveryMode;
+#endif
} UNCACHED_ALIGN RGXFWIF_SYSINIT;
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
+static_assert(offsetof(RGXFWIF_SYSINIT, sMIPSState) == 0,
+ "sMIPSState is not the first member of the RGXFWIF_SYSINIT struct");
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS)
#define RGXFWIF_KICK_TEST_ENABLED_BIT 0x1
#define RGXFWIF_KICK_TEST_OSID_SHIFT 0x1
#endif
IMG_UINT32 ui32GpuUtilFlags;
/* Last GPU state + OS time of the last state update */
- IMG_UINT64 RGXFW_ALIGN ui64LastWord;
-
+ IMG_UINT64 RGXFW_ALIGN ui64GpuLastWord;
/* Counters for the amount of time the GPU was active/idle/blocked */
- IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM];
+ IMG_UINT64 RGXFW_ALIGN aui64GpuStatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM];
+
+ /* Last GPU DM per-OS states + OS time of the last state update */
+ IMG_UINT64 RGXFW_ALIGN aaui64DMOSLastWord[RGXFWIF_DM_MAX][RGXFW_MAX_NUM_OSIDS];
+ /* Counters for the amount of time the GPU DMs were active/idle/blocked */
+ IMG_UINT64 RGXFW_ALIGN aaaui64DMOSStatsCounters[RGXFWIF_DM_MAX][RGXFW_MAX_NUM_OSIDS][RGXFWIF_GPU_UTIL_STATE_NUM];
} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_GPU_UTIL_FWCB) == 12584,
+ "RGXFWIF_GPU_UTIL_FWCB is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
typedef struct
{
IMG_UINT32 ui32RenderTargetIndex; //Render number
IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */
} UNCACHED_ALIGN RGXFWIF_RTA_CTL;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_RTA_CTL) == 32,
+ "RGXFWIF_RTA_CTL is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*!
* @InGroup RenderTarget
* @Brief Firmware Freelist holding usage state of the Parameter Buffers
IMG_UINT32 ui32CurrentPages; /*!< Total no. of pages made available to the PM HW */
IMG_UINT32 ui32AllocatedPageCount; /*!< No. of pages allocated by PM HW */
IMG_UINT32 ui32AllocatedMMUPageCount; /*!< No. of pages allocated for GPU MMU for PM*/
-#if defined(SUPPORT_SHADOW_FREELISTS)
+#if defined(SUPPORT_SHADOW_FREELISTS) && !defined(SUPPORT_OPEN_SOURCE_DRIVER)
IMG_UINT32 ui32HWRCounter;
PRGXFWIF_FWMEMCONTEXT psFWMemContext;
#endif
IMG_BOOL bGrowPending; /*!< Freelist grow is pending */
IMG_UINT32 ui32ReadyPages; /*!< Reserved pages to be used only on PM OOM event */
IMG_UINT32 ui32FreelistFlags; /*!< Compatibility and other flags */
-#if defined(SUPPORT_AGP)
+#if defined(SUPPORT_AGP) || defined(SUPPORT_OPEN_SOURCE_DRIVER)
IMG_UINT32 ui32PmGlobalPb; /*!< PM Global PB on which Freelist is loaded */
#endif
} UNCACHED_ALIGN RGXFWIF_FREELIST;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_FREELIST) == 64,
+ "RGXFWIF_FREELIST is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*!
******************************************************************************
* HWRTData
/* HWRTData flags */
/* Deprecated flags 1:0 */
-#define HWRTDATA_HAS_LAST_TA (1UL << 2)
-#define HWRTDATA_PARTIAL_RENDERED (1UL << 3)
-#define HWRTDATA_DISABLE_TILE_REORDERING (1UL << 4)
-#define HWRTDATA_NEED_BRN65101_BLIT (1UL << 5)
-#define HWRTDATA_FIRST_BRN65101_STRIP (1UL << 6)
-#define HWRTDATA_NEED_BRN67182_2ND_RENDER (1UL << 7)
+#define HWRTDATA_HAS_LAST_TA (IMG_UINT32_C(1) << 2)
+#define HWRTDATA_PARTIAL_RENDERED (IMG_UINT32_C(1) << 3)
+#define HWRTDATA_DISABLE_TILE_REORDERING (IMG_UINT32_C(1) << 4)
+#define HWRTDATA_NEED_BRN65101_BLIT (IMG_UINT32_C(1) << 5)
+#define HWRTDATA_FIRST_BRN65101_STRIP (IMG_UINT32_C(1) << 6)
+#define HWRTDATA_NEED_BRN67182_2ND_RENDER (IMG_UINT32_C(1) << 7)
#if defined(SUPPORT_AGP)
-#define HWRTDATA_GLOBAL_PB_NUMBER_BIT0 (1UL << 8)
+#define HWRTDATA_GLOBAL_PB_NUMBER_BIT0 (IMG_UINT32_C(1) << 8)
#if defined(SUPPORT_AGP4)
-#define HWRTDATA_GLOBAL_PB_NUMBER_BIT1 (1UL << 9)
+#define HWRTDATA_GLOBAL_PB_NUMBER_BIT1 (IMG_UINT32_C(1) << 9)
#endif
-#define HWRTDATA_GEOM_NEEDS_RESUME (1UL << 10)
+#define HWRTDATA_GEOM_NEEDS_RESUME (IMG_UINT32_C(1) << 10)
#endif
typedef enum
IMG_UINT32 ui32ISPMtileSize;
} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_HWRTDATA_COMMON) == 88,
+ "RGXFWIF_HWRTDATA_COMMON is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+#define MAX_FREELISTS_SIZE 3
+
+static_assert(RGXFW_MAX_FREELISTS <= MAX_FREELISTS_SIZE,
+ "RGXFW_MAX_FREELISTS is outside of allowable range for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
+
/*!
* @InGroup RenderTarget
* @Brief Firmware Render Target data i.e. HWRTDATA used to hold the PM context
{
IMG_DEV_VIRTADDR RGXFW_ALIGN psPMMListDevVAddr; /*!< MList Data Store */
- IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[4]; /*!< VCE Page Catalogue base */
- IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[4];
- IMG_UINT64 RGXFW_ALIGN ui64TECatBase[4]; /*!< TE Page Catalogue base */
- IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[4];
+ IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[1]; /*!< VCE Page Catalogue base */
+ IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[1];
+ IMG_UINT64 RGXFW_ALIGN ui64TECatBase[1]; /*!< TE Page Catalogue base */
+ IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[1];
IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; /*!< Alist Page Catalogue base */
IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase;
IMG_UINT32 ui32HWRTDataFlags;
RGXFWIF_RTDATA_STATE eState; /*!< Current workload processing state of HWRTDATA */
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+ PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[MAX_FREELISTS_SIZE]; /*!< Freelist to use */
+ IMG_UINT32 aui32FreeListHWRSnapshot[MAX_FREELISTS_SIZE];
+#else
PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; /*!< Freelist to use */
IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS];
+#endif
IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap table base */
- RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Render target clean up state */
-
RGXFWIF_RTA_CTL sRTACtl; /*!< Render target array data */
IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; /*!< Tail pointers base */
#else
RGXFWIF_DEV_VIRTADDR RGXFW_ALIGN pui32OwnerGeomNotUsedByHost;
#endif
-#if defined(SUPPORT_TRP)
+#if defined(SUPPORT_TRP) && !defined(SUPPORT_OPEN_SOURCE_DRIVER)
IMG_UINT32 ui32KickFlagsCopy;
- IMG_UINT32 ui32TRPState;
+ IMG_UINT32 ui32TRPState; /*!< Used by Firmware to track current state of a protected kick */
IMG_UINT32 ui32TEPageCopy;
IMG_UINT32 ui32VCEPageCopy;
#endif
-#if defined(SUPPORT_AGP)
+#if defined(SUPPORT_AGP) || defined(SUPPORT_OPEN_SOURCE_DRIVER)
IMG_BOOL bTACachesNeedZeroing;
#endif
-} UNCACHED_ALIGN RGXFWIF_HWRTDATA;
+
+ RGXFWIF_CLEANUP_CTL RGXFW_ALIGN_DCACHEL sCleanupState; /*!< Render target clean up state */
+} RGXFW_ALIGN_DCACHEL RGXFWIF_HWRTDATA;
+
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_HWRTDATA) == 256,
+ "RGXFWIF_HWRTDATA is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
/* Sync_checkpoint firmware object.
* This is the FW-addressable structure use to hold the sync checkpoint's
/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */
#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0)
+#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET
+#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U
+
+#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES)
+#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U
+
+#define RGXFWIF_KM_GENERAL_HEAP_TOTAL_BYTES (RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES)
+
+#define RGXFWIF_TDM_SECURE_QUEUE_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES)
+#define RGXFWIF_CDM_SECURE_QUEUE_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES)
+
+/*!
+ ******************************************************************************
+ * Virtualisation and Security
+ *****************************************************************************/
+#define FW_OSID (0U)
+#define MMU_CONTEXT_MAPPING_FWPRIV (0U) /* FW code/private data */
+
+#if defined(SECURE_FW_CODE_OSID)
+/* software workaround for SoCs without fw_code, fw_priv_data signals, MIPS only */
+#if defined(RGX_FEATURE_META)
+#error "SECURE_FW_CODE_OSID is not supported on META cores"
+#elif defined(RGX_FEATURE_RISCV_FW_PROCESSOR)
+#error "SECURE_FW_CODE_OSID is not supported on RISC-V cores"
+#elif (RGX_NUM_DRIVERS_SUPPORTED > 1)
+#error "SECURE_FW_CODE_OSID is not supported on virtualization drivers"
+#elif (SECURE_FW_CODE_OSID + 1 > 2)
+#define MIPS_FW_CODE_OSID (SECURE_FW_CODE_OSID)
+#else
+#define MIPS_FW_CODE_OSID (1U)
+#endif
+#endif /* defined(SECURE_FW_CODE_OSID) */
+
+static_assert((RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_FIRMWARE_OSID),
+ " Invalid RGX_FW_HEAP_OSID_ASSIGNMENT value. Rogue cores support only the RGX_FW_HEAP_USES_FIRMWARE_OSID config");
+
+/* Firmware and Host driver share the same OSID */
+#define FW_HEAP_OSID (FW_OSID)
+
+#if (RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_FIRMWARE_OSID) || defined(RGX_FEATURE_MIPS)
+/* The Firmware accesses its private code & data and the interface
+ * memory it shares with the KM drivers using the same MMU context */
+#define MMU_CONTEXT_MAPPING_FWIF MMU_CONTEXT_MAPPING_FWPRIV
+#else
+/* The Firmware accesses the interface memory it shares
+ * with the KM drivers using a reserved MMU context */
+#define MMU_CONTEXT_MAPPING_FWIF (7U)
+#endif
+
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
+/* virtualization without security support */
+#define DRIVER_ID(osid) (osid)
+#define OSID(did) (did)
+#else
+#define DRIVER_ID(osid) (0U)
+#define OSID(did) (did)
+#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */
+
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
+
+#define FOREACH_SUPPORTED_DRIVER(did) for ((did)=RGXFW_HOST_DRIVER_ID; (did) < RGX_NUM_DRIVERS_SUPPORTED; (did)++)
+
+#if defined(__KERNEL__)
+/* Driver implementation */
+#define FOREACH_ACTIVE_DRIVER(devinfo, did) FOREACH_SUPPORTED_DRIVER(did) \
+ { \
+ if (devinfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[did].bfOsState != RGXFW_CONNECTION_FW_ACTIVE) continue;
+
+#define END_FOREACH_ACTIVE_DRIVER }
+
+#else
+/* Firmware implementation */
+#define FOREACH_ACTIVE_DRIVER(did) do { \
+ unsigned int idx; \
+ for ((idx)=RGXFW_HOST_DRIVER_ID, (did)=gsRGXFWCtl.aui32ActiveDrivers[0U]; \
+ (idx) < RGXFW_NUM_ACTIVE_DRIVERS; \
+ ++(idx), (did)=gsRGXFWCtl.aui32ActiveDrivers[(idx)]) {
+
+#define END_FOREACH_ACTIVE_DRIVER }} while (false);
+#endif /* defined(__KERNEL__) */
+
+
+#else
+#define FOREACH_SUPPORTED_DRIVER(did) for ((did)=RGXFW_HOST_DRIVER_ID; (did) <= RGXFW_HOST_DRIVER_ID; (did)++)
+
+#define FOREACH_ACTIVE_DRIVER(did) FOREACH_SUPPORTED_DRIVER(did)
+#define END_FOREACH_ACTIVE_DRIVER
+
+#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */
+
+#define FOREACH_VALIDATION_OSID(osid) for ((osid)=0; (osid) < GPUVIRT_VALIDATION_NUM_OS; (osid)++)
+#define FOREACH_HW_OSID(osid) for ((osid)=0; (osid) < RGXFW_MAX_NUM_OSIDS; (osid)++)
+#define FOREACH_DRIVER_RAW_HEAP(did) for ((did)=RGX_FIRST_RAW_HEAP_DRIVER_ID; (did) < RGX_NUM_DRIVERS_SUPPORTED; (did)++)
+
#endif /* RGX_FWIF_KM_H */
/******************************************************************************
#include "img_defs.h"
#include "rgx_common.h"
#include "powervr/mem_types.h"
+#include "devicemem_typedefs.h"
/* Indicates the number of RTDATAs per RTDATASET */
#if defined(SUPPORT_AGP)
IMG_UINT32 ui32Value; /*!< Value to check-against/update-to */
} RGXFWIF_UFO;
+/*!
+ * @InGroup RenderTarget
+ * @Brief Track pending and executed workloads of HWRTDATA and ZSBUFFER
+ */
typedef struct
{
IMG_UINT32 ui32SubmittedCommands; /*!< Number of commands received by the FW */
* fence dependencies are not met. */
IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask, total capacity
in bytes of the CCB-1 */
-#if defined(SUPPORT_AGP)
+#if defined(SUPPORT_AGP) || defined(SUPPORT_OPEN_SOURCE_DRIVER)
IMG_UINT32 ui32ReadOffset2;
-#if defined(SUPPORT_AGP4)
+#if defined(SUPPORT_AGP4) || defined(SUPPORT_OPEN_SOURCE_DRIVER)
IMG_UINT32 ui32ReadOffset3;
IMG_UINT32 ui32ReadOffset4;
#endif
} UNCACHED_ALIGN RGXFWIF_CCCB_CTL;
+#if defined(SUPPORT_OPEN_SOURCE_DRIVER)
+static_assert(sizeof(RGXFWIF_CCCB_CTL) == 32,
+ "RGXFWIF_CCCB_CTL is incorrect size for SUPPORT_OPEN_SOURCE_DRIVER");
+#endif
typedef IMG_UINT32 RGXFW_FREELIST_TYPE;
RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reset reason */
IMG_UINT32 ui32ResetExtJobRef; /*!< External Job ID */
} RGX_CONTEXT_RESET_REASON_DATA;
+
+#define RGX_HEAP_UM_PDS_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY
+#define RGX_HEAP_UM_PDS_RESERVED_REGION_OFFSET 0
+#define RGX_HEAP_PDS_RESERVED_TOTAL_SIZE RGX_HEAP_UM_PDS_RESERVED_SIZE
+
+#define RGX_HEAP_UM_USC_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY
+#define RGX_HEAP_UM_USC_RESERVED_REGION_OFFSET 0
+#define RGX_HEAP_USC_RESERVED_TOTAL_SIZE RGX_HEAP_UM_USC_RESERVED_SIZE
+
+#define RGX_HEAP_UM_GENERAL_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY
+#define RGX_HEAP_UM_GENERAL_RESERVED_REGION_OFFSET 0
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#define RGX_HEAP_KM_GENERAL_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY
+#else
+#define RGX_HEAP_KM_GENERAL_RESERVED_SIZE 0
+#endif
+#define RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET RGX_HEAP_UM_GENERAL_RESERVED_SIZE
+
+#define RGX_HEAP_GENERAL_RESERVED_TOTAL_SIZE (RGX_HEAP_UM_GENERAL_RESERVED_SIZE + RGX_HEAP_KM_GENERAL_RESERVED_SIZE)
#endif /* RGX_FWIF_SHARED_H */
/******************************************************************************
#define RGX_HWPERF_FEATURE_MULTICORE_FLAG 0x0100U
#define RGX_HWPERF_FEATURE_VOLCANIC_FLAG 0x0800U
#define RGX_HWPERF_FEATURE_ROGUE_FLAG 0x1000U
-#define RGX_HWPERF_FEATURE_OCEANIC_FLAG 0x2000U
+#define RGX_HWPERF_FEATURE_RESERVED1_FLAG 0x2000U
/*! This structure holds the data of a firmware packet. */
typedef struct
RGX_HWPERF_PWR_DOWN = 4, /*!< Power turned off to a HW domain */
RGX_HWPERF_PWR_SAFETY_RESET = 5, /*!< Resetting the GPU HW units for safety reasons */
RGX_HWPERF_PWR_PHR_FULL = 6, /*!< Periodic HW full GPU Reset */
+ RGX_HWPERF_PWR_RD_UP = 7, /*!< Power turned on to a Rascal+Dust */
+ RGX_HWPERF_PWR_RD_DOWN = 8, /*!< Power turned off to a Rascal+Dust */
+ RGX_HWPERF_PWR_SPU_UP = 9, /*!< Power turned on to a SPU */
+ RGX_HWPERF_PWR_SPU_DOWN = 10, /*!< Power turned off to a SPU */
+ RGX_HWPERF_PWR_CLUSTER_UP = 11, /*!< Power turned on to a cluster */
+ RGX_HWPERF_PWR_CLUSTER_DOWN = 12, /*!< Power turned off to a cluster */
+ RGX_HWPERF_PWR_RAC_UP = 13, /*!< Power turned on to a RAC */
+ RGX_HWPERF_PWR_RAC_DOWN = 14, /*!< Power turned off to a RAC */
RGX_HWPERF_PWR_LAST,
} RGX_HWPERF_PWR;
RGX_HWPERF_FWACT_EV_HWR_FREELIST_READY, /*!< Freelist ready. */
RGX_HWPERF_FWACT_EV_FEATURES, /*!< Features present */
RGX_HWPERF_FWACT_EV_FILTER_SET, /*!< Event filter set. */
+ RGX_HWPERF_FWACT_EV_KICK_CANCEL, /*!< A previous pipelined kick cancel. */
RGX_HWPERF_FWACT_EV_LAST /*!< Number of element. */
} RGX_HWPERF_FWACT_EV;
*/
typedef enum
{
- RGX_HWPERF_KICK_TYPE_TA3D, /*!< Replaced by separate TA and 3D types */
- RGX_HWPERF_KICK_TYPE_TQ2D, /*!< 2D TQ Kick */
- RGX_HWPERF_KICK_TYPE_TQ3D, /*!< 3D TQ Kick */
- RGX_HWPERF_KICK_TYPE_CDM, /*!< Compute Kick */
- RGX_HWPERF_KICK_TYPE_RS, /*!< Ray Store Kick */
- RGX_HWPERF_KICK_TYPE_VRDM, /*!< Vertex Ray Data Master Kick */
- RGX_HWPERF_KICK_TYPE_TQTDM,/*!< 2D Data Master TQ Kick */
- RGX_HWPERF_KICK_TYPE_SYNC, /*!< Sync Kick */
- RGX_HWPERF_KICK_TYPE_TA, /*!< TA Kick */
- RGX_HWPERF_KICK_TYPE_3D, /*!< 3D Kick */
- RGX_HWPERF_KICK_TYPE_LAST,
+ RGX_HWPERF_KICK_TYPE_RESERVED_0, /*!< Replaced by separate TA and 3D types (Deprecated) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_1, /*!< 2D TQ Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_TQ2D) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_2, /*!< 3D TQ Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_TQ3D) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_3, /*!< Compute Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_CDM) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_4, /*!< Ray Store Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_RS) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_5, /*!< Vertex Ray Data Master Kick (Deprecated) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_6, /*!< 2D Data Master TQ Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_TQTDM) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_7, /*!< Sync Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_SYNC) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_8, /*!< TA Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_GEOM) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_9, /*!< 3D Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_3D) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_10,
+
+ RGX_HWPERF_KICK_TYPE_UNIFIED = 0x10,
+
+ RGX_HWPERF_KICK_TYPE2_TQ2D, /*!< 2D TQ Kick */
+ RGX_HWPERF_KICK_TYPE2_TQ3D, /*!< 3D TQ Kick */
+ RGX_HWPERF_KICK_TYPE2_TQTDM, /*!< 2D Data Master TQ Kick */
+ RGX_HWPERF_KICK_TYPE2_CDM, /*!< Compute Kick */
+ RGX_HWPERF_KICK_TYPE2_GEOM, /*!< GEOM Kick */
+ RGX_HWPERF_KICK_TYPE2_3D, /*!< 3D Kick */
+ RGX_HWPERF_KICK_TYPE2_SYNC, /*!< Sync Kick */
+ RGX_HWPERF_KICK_TYPE2_RS, /*!< Ray Store Kick */
+ RGX_HWPERF_KICK_TYPE2_LAST,
RGX_HWPERF_KICK_TYPE_FORCE_32BIT = 0x7fffffff
} RGX_HWPERF_KICK_TYPE;
{
RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, /*!< Invalid */
RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK, /*!< Device OK */
- RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING, /*!< Device responding to requests */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_NOT_RESPONDING,/*!< Device not responding to requests */
RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD, /*!< Device not responding */
RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT, /*!< Device has faulted */
RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST
} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON;
+/*! Data for device status event */
+typedef struct
+{
+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus;
+ /*!< Device's health status */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason;
+ /*!< Reason for device's health status */
+} RGX_HWPERF_HOST_DEVICE_HEALTH;
+
/*! RGX_HWPERF_DEV_INFO_EV values */
typedef enum
{
RGX_HWPERF_DEV_INFO_EV_HEALTH, /*!< Health sub-event */
+ RGX_HWPERF_DEV_INFO_EV_FEATURES, /*!< Features sub-event */
RGX_HWPERF_DEV_INFO_EV_LAST /*!< Last enumeration value */
} RGX_HWPERF_DEV_INFO_EV;
/*! RGX_HWPERF_HOST_DEV_INFO_DETAIL is a union of structures providing
- * further data regarding the device's status
+ * further data regarding the device's features and status
*/
typedef union
{
- /*! Data for device status event */
- struct
- {
- RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus;
- /*!< Device's health status */
- RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason;
- /*!< Reason for device's health status */
- } sDeviceStatus;
+ RGX_HWPERF_HOST_DEVICE_HEALTH sDeviceStatus; /*!< Device health status */
+ RGX_HWPERF_BVNC sBVNC; /*!< Device features */
} RGX_HWPERF_HOST_DEV_INFO_DETAIL;
/*! RGX_HWPERF_HOST_DEV_INFO_DATA contains device health status information */
/*! RGX_HWPERF_INFO_EV event subtype for RGX_HWPERF_HOST_INFO_DATA events */
typedef enum
{
- RGX_HWPERF_INFO_EV_MEM_USAGE, /*!< Memory usage event */
- RGX_HWPERF_INFO_EV_LAST /*!< End of enumeration */
+ RGX_HWPERF_INFO_EV_RESERVED_0,
+ RGX_HWPERF_INFO_EV_MEM64_USAGE, /*!< 64-bit memory usage event */
+ RGX_HWPERF_INFO_EV_LAST /*!< End of enumeration */
} RGX_HWPERF_INFO_EV;
/*! RGX_HWPERF_HOST_INFO_DETAIL contains the data payload for the
/*! Host Memory usage statistics */
struct
{
- IMG_UINT32 ui32TotalMemoryUsage; /*!< Total memory usage */
+ IMG_UINT64 ui64TotalMemoryUsage; /*!< Total memory usage (bytes) */
/*! Detailed memory usage */
struct
{
IMG_UINT32 ui32Pid; /*!< Process ID */
- IMG_UINT32 ui32KernelMemUsage; /*!< Kernel memory usage */
- IMG_UINT32 ui32GraphicsMemUsage; /*!< GPU memory usage */
+ IMG_UINT32 ui32Padding; /*!< Padding */
+ IMG_UINT64 ui64KernelMemUsage; /*!< Kernel memory usage (bytes) */
+ IMG_UINT64 ui64GraphicsMemUsage; /*!< GPU memory usage (bytes) */
} sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS];
} sMemUsageStats;
} RGX_HWPERF_HOST_INFO_DETAIL;
#define RGX_CNTBLK_ID_HUB 0x0002U /*!< Non-cluster grouping cores */
#define RGX_CNTBLK_ID_TORNADO 0x0003U /*!< XT cores */
#define RGX_CNTBLK_ID_JONES 0x0004U /*!< S7 cores */
-#if defined(RGX_FEATURE_HWPERF_OCEANIC)
-#define RGX_CNTBLK_ID_DIRECT_LAST 0x0003U /*!< Indirect blocks start from here */
-#else
#define RGX_CNTBLK_ID_DIRECT_LAST 0x0005U /*!< Indirect blocks start from here */
-#endif /* defined(RGX_FEATURE_HWPERF_OCEANIC) */
-
#define RGX_CNTBLK_ID_BF_DEPRECATED 0x0005U /*!< Doppler unit (DEPRECATED) */
#define RGX_CNTBLK_ID_BT_DEPRECATED 0x0006U /*!< Doppler unit (DEPRECATED) */
#define RGX_CNTBLK_ID_RT_DEPRECATED 0x0007U /*!< Doppler unit (DEPRECATED) */
/*! The number of layout blocks defined with configurable multiplexed
* performance counters, hence excludes custom counter blocks.
*/
-#if defined(RGX_FEATURE_HWPERF_OCEANIC)
-#define RGX_HWPERF_MAX_MUX_BLKS (\
- (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\
- RGX_CNTBLK_INDIRECT_COUNT(PBE, 0) )
-
-#define RGX_HWPERF_MAX_DA_BLKS (\
- (IMG_UINT32)RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 0)+\
- RGX_CNTBLK_INDIRECT_COUNT(USC, 0) )
-
-#define RGX_HWPERF_MAX_DEFINED_BLKS (\
- (IMG_UINT32)RGX_HWPERF_MAX_MUX_BLKS +\
- RGX_HWPERF_MAX_DA_BLKS )
-#else
#define RGX_HWPERF_MAX_DEFINED_BLKS (\
(IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\
RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7)+\
RGX_CNTBLK_INDIRECT_COUNT(PBE, 15) )
#define RGX_HWPERF_MAX_MUX_BLKS (\
RGX_HWPERF_MAX_DEFINED_BLKS )
-#endif
static_assert(
((RGX_CNTBLK_ID_DIRECT_LAST + ((RGX_CNTBLK_ID_LAST & RGX_CNTBLK_ID_GROUP_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT)) <= RGX_HWPERF_MAX_BVNC_BLOCK_LEN),
* The corresponding bit is set if the build option was enabled at compile
* time.
*
- * In order to extract the enabled build flags the INTERNAL_TEST switch should
- * be enabled in a client program which includes this header. Then the client
- * can test specific build flags by reading the bit value at
- * ##OPTIONNAME##_SET_OFFSET
- * in RGX_BUILD_OPTIONS_KM or RGX_BUILD_OPTIONS.
- *
* IMPORTANT: add new options to unused bits or define a new dword
* (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield
* remains backwards compatible.
#ifndef RGX_OPTIONS_H
#define RGX_OPTIONS_H
-#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL
+#define OPTIONS_NO_HARDWARE_EN (0x1UL << 0)
+#define OPTIONS_PDUMP_EN (0x1UL << 1)
+#define OPTIONS_UNUSED1_EN (0x1UL << 2)
+#define OPTIONS_SECURE_ALLOC_KM_EN (0x1UL << 3)
+#define OPTIONS_RGX_EN (0x1UL << 4)
+#define OPTIONS_SECURE_EXPORT_EN (0x1UL << 5)
+#define OPTIONS_INSECURE_EXPORT_EN (0x1UL << 6)
+#define OPTIONS_VFP_EN (0x1UL << 7)
+#define OPTIONS_WORKLOAD_ESTIMATION_EN (0x1UL << 8)
+#define OPTIONS_PDVFS_EN (0x1UL << 9)
+#define OPTIONS_DEBUG_EN (0x1UL << 10)
+#define OPTIONS_BUFFER_SYNC_EN (0x1UL << 11)
+#define OPTIONS_AUTOVZ_EN (0x1UL << 12)
+#define OPTIONS_AUTOVZ_HW_REGS_EN (0x1UL << 13)
+#define OPTIONS_FW_IRQ_REG_COUNTERS_EN (0x1UL << 14)
+#define OPTIONS_VALIDATION_EN (0x1UL << 15)
+
+#define OPTIONS_PERCONTEXT_FREELIST_EN (0x1UL << 31)
+
+#define RGX_BUILD_OPTIONS_MASK_KM \
+ (OPTIONS_NO_HARDWARE_EN | \
+ OPTIONS_PDUMP_EN | \
+ OPTIONS_SECURE_ALLOC_KM_EN | \
+ OPTIONS_RGX_EN | \
+ OPTIONS_SECURE_EXPORT_EN | \
+ OPTIONS_INSECURE_EXPORT_EN | \
+ OPTIONS_VFP_EN | \
+ OPTIONS_WORKLOAD_ESTIMATION_EN | \
+ OPTIONS_PDVFS_EN | \
+ OPTIONS_DEBUG_EN | \
+ OPTIONS_BUFFER_SYNC_EN | \
+ OPTIONS_AUTOVZ_EN | \
+ OPTIONS_AUTOVZ_HW_REGS_EN | \
+ OPTIONS_FW_IRQ_REG_COUNTERS_EN | \
+ OPTIONS_VALIDATION_EN)
+
+#define RGX_BUILD_OPTIONS_MASK_FW \
+ (RGX_BUILD_OPTIONS_MASK_KM & \
+ ~OPTIONS_BUFFER_SYNC_EN)
+
+/* Build options that the FW must have if the present on the KM */
+#define FW_OPTIONS_STRICT ((RGX_BUILD_OPTIONS_MASK_KM | \
+ OPTIONS_PERCONTEXT_FREELIST_EN) & \
+ ~(OPTIONS_DEBUG_EN | \
+ OPTIONS_WORKLOAD_ESTIMATION_EN | \
+ OPTIONS_PDVFS_EN))
+
+/* Build options that the UM must have if the present on the KM */
+#define UM_OPTIONS_STRICT ((RGX_BUILD_OPTIONS_MASK_KM | \
+ OPTIONS_PERCONTEXT_FREELIST_EN) & \
+ ~(OPTIONS_DEBUG_EN | \
+ OPTIONS_WORKLOAD_ESTIMATION_EN | \
+ OPTIONS_PDVFS_EN))
+
+/* Build options that the KM must have if the present on the UM */
+#define KM_OPTIONS_STRICT ((RGX_BUILD_OPTIONS_MASK_KM | \
+ OPTIONS_PERCONTEXT_FREELIST_EN) & \
+ ~(OPTIONS_DEBUG_EN | \
+ OPTIONS_WORKLOAD_ESTIMATION_EN | \
+ OPTIONS_PDVFS_EN | \
+ OPTIONS_BUFFER_SYNC_EN))
#define NO_HARDWARE_OPTION "NO_HARDWARE "
-#if defined(NO_HARDWARE) || defined(INTERNAL_TEST)
- #define NO_HARDWARE_SET_OFFSET OPTIONS_BIT0
- #define OPTIONS_BIT0 (0x1UL << 0)
+#if defined(NO_HARDWARE)
+ #define OPTIONS_BIT0 OPTIONS_NO_HARDWARE_EN
#if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#endif /* NO_HARDWARE */
#define PDUMP_OPTION "PDUMP "
-#if defined(PDUMP) || defined(INTERNAL_TEST)
- #define PDUMP_SET_OFFSET OPTIONS_BIT1
- #define OPTIONS_BIT1 (0x1UL << 1)
+#if defined(PDUMP)
+ #define OPTIONS_BIT1 OPTIONS_PDUMP_EN
#if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#endif /* PDUMP */
/* No longer used */
-#define INTERNAL_TEST_OPTION "INTERNAL_TEST "
-#if defined(INTERNAL_TEST)
- #define UNUSED_SET_OFFSET OPTIONS_BIT2
- #define OPTIONS_BIT2 (0x1UL << 2)
+#define INTERNAL_UNUSED1_OPTION "INTERNAL_UNUSED1 "
+#if defined(INTERNAL_UNUSED1)
+ #define OPTIONS_BIT2 OPTIONS_UNUSED1_EN
#if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#define OPTIONS_BIT2 0x0UL
#endif
-/* No longer used */
-#define UNUSED_OPTION " "
-#if defined(INTERNAL_TEST)
- #define OPTIONS_BIT3 (0x1UL << 3)
- #define INTERNAL_TEST_OPTION "INTERNAL_TEST "
+#define SECURE_ALLOC_KM_OPTION "SECURE_ALLOC_KM "
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+ #define OPTIONS_BIT3 OPTIONS_SECURE_ALLOC_KM_EN
#if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#else
- #define OPTIONS_BIT3 0x0UL
-#endif
+ #define OPTIONS_BIT3 0x0UL
+#endif /* SUPPORT_SECURE_ALLOC_KM */
-#define SUPPORT_RGX_OPTION " "
-#if defined(SUPPORT_RGX) || defined(INTERNAL_TEST)
- #define SUPPORT_RGX_SET_OFFSET OPTIONS_BIT4
- #define OPTIONS_BIT4 (0x1UL << 4)
+#define RGX_OPTION " "
+#if defined(SUPPORT_RGX)
+ #define OPTIONS_BIT4 OPTIONS_RGX_EN
#if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#define OPTIONS_BIT4 0x0UL
#endif /* SUPPORT_RGX */
-#define SUPPORT_SECURE_EXPORT_OPTION "SECURE_EXPORTS "
-#if defined(SUPPORT_SECURE_EXPORT) || defined(INTERNAL_TEST)
- #define SUPPORT_SECURE_EXPORT_SET_OFFSET OPTIONS_BIT5
- #define OPTIONS_BIT5 (0x1UL << 5)
+#define SECURE_EXPORT_OPTION "SECURE_EXPORTS "
+#if defined(SUPPORT_SECURE_EXPORT)
+ #define OPTIONS_BIT5 OPTIONS_SECURE_EXPORT_EN
#if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#define OPTIONS_BIT5 0x0UL
#endif /* SUPPORT_SECURE_EXPORT */
-#define SUPPORT_INSECURE_EXPORT_OPTION "INSECURE_EXPORTS "
-#if defined(SUPPORT_INSECURE_EXPORT) || defined(INTERNAL_TEST)
- #define SUPPORT_INSECURE_EXPORT_SET_OFFSET OPTIONS_BIT6
- #define OPTIONS_BIT6 (0x1UL << 6)
+#define INSECURE_EXPORT_OPTION "INSECURE_EXPORTS "
+#if defined(SUPPORT_INSECURE_EXPORT)
+ #define OPTIONS_BIT6 OPTIONS_INSECURE_EXPORT_EN
#if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#define OPTIONS_BIT6 0x0UL
#endif /* SUPPORT_INSECURE_EXPORT */
-#define SUPPORT_VFP_OPTION "VFP "
-#if defined(SUPPORT_VFP) || defined(INTERNAL_TEST)
- #define SUPPORT_VFP_SET_OFFSET OPTIONS_BIT7
- #define OPTIONS_BIT7 (0x1UL << 7)
+#define VFP_OPTION "VFP "
+#if defined(SUPPORT_VFP)
+ #define OPTIONS_BIT7 OPTIONS_VFP_EN
#if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#define OPTIONS_BIT7 0x0UL
#endif /* SUPPORT_VFP */
-#define SUPPORT_WORKLOAD_ESTIMATION_OPTION "WORKLOAD_ESTIMATION "
-#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined(INTERNAL_TEST)
- #define SUPPORT_WORKLOAD_ESTIMATION_OFFSET OPTIONS_BIT8
- #define OPTIONS_BIT8 (0x1UL << 8)
+#define WORKLOAD_ESTIMATION_OPTION "WORKLOAD_ESTIMATION "
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ #define OPTIONS_BIT8 OPTIONS_WORKLOAD_ESTIMATION_EN
#if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#else
#define OPTIONS_BIT8 0x0UL
#endif /* SUPPORT_WORKLOAD_ESTIMATION */
-#define OPTIONS_WORKLOAD_ESTIMATION_MASK (0x1UL << 8)
-#define SUPPORT_PDVFS_OPTION "PDVFS "
-#if defined(SUPPORT_PDVFS) || defined(INTERNAL_TEST)
- #define SUPPORT_PDVFS_OFFSET OPTIONS_BIT9
- #define OPTIONS_BIT9 (0x1UL << 9)
+#define PDVFS_OPTION "PDVFS "
+#if defined(SUPPORT_PDVFS)
+ #define OPTIONS_BIT9 OPTIONS_PDVFS_EN
#if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#else
#define OPTIONS_BIT9 0x0UL
#endif /* SUPPORT_PDVFS */
-#define OPTIONS_PDVFS_MASK (0x1UL << 9)
#define DEBUG_OPTION "DEBUG "
-#if defined(DEBUG) || defined(INTERNAL_TEST)
- #define DEBUG_SET_OFFSET OPTIONS_BIT10
- #define OPTIONS_BIT10 (0x1UL << 10)
+#if defined(DEBUG)
+ #define OPTIONS_BIT10 OPTIONS_DEBUG_EN
#if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#else
#define OPTIONS_BIT10 0x0UL
#endif /* DEBUG */
-/* The bit position of this should be the same as DEBUG_SET_OFFSET option
- * when defined.
- */
-#define OPTIONS_DEBUG_MASK (0x1UL << 10)
-#define SUPPORT_BUFFER_SYNC_OPTION "BUFFER_SYNC "
-#if defined(SUPPORT_BUFFER_SYNC) || defined(INTERNAL_TEST)
- #define SUPPORT_BUFFER_SYNC_SET_OFFSET OPTIONS_BIT11
- #define OPTIONS_BIT11 (0x1UL << 11)
+#define BUFFER_SYNC_OPTION "BUFFER_SYNC "
+#if defined(SUPPORT_BUFFER_SYNC)
+ #define OPTIONS_BIT11 OPTIONS_BUFFER_SYNC_EN
#if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#define OPTIONS_BIT11 0x0UL
#endif /* SUPPORT_BUFFER_SYNC */
-#define SUPPORT_AUTOVZ_OPTION "AUTOVZ "
+#define AUTOVZ_OPTION "AUTOVZ "
#if defined(SUPPORT_AUTOVZ)
- #define SUPPORT_AUTOVZ_OFFSET OPTIONS_BIT12
- #define OPTIONS_BIT12 (0x1UL << 12)
+ #define OPTIONS_BIT12 OPTIONS_AUTOVZ_EN
#if OPTIONS_BIT12 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#else
- #define OPTIONS_BIT12 0x0UL
+ #define OPTIONS_BIT12 0x0UL
#endif /* SUPPORT_AUTOVZ */
-#define SUPPORT_AUTOVZ_HW_REGS_OPTION "AUTOVZ_HW_REGS "
+#define AUTOVZ_HW_REGS_OPTION "AUTOVZ_HW_REGS "
#if defined(SUPPORT_AUTOVZ_HW_REGS)
- #define SUPPORT_AUTOVZ_HW_REGS_OFFSET OPTIONS_BIT13
- #define OPTIONS_BIT13 (0x1UL << 13)
+ #define OPTIONS_BIT13 OPTIONS_AUTOVZ_HW_REGS_EN
#if OPTIONS_BIT13 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#else
- #define OPTIONS_BIT13 0x0UL
+ #define OPTIONS_BIT13 0x0UL
#endif /* SUPPORT_AUTOVZ_HW_REGS */
#define RGX_FW_IRQ_OS_COUNTERS_OPTION "FW_IRQ_OS_COUNTERS "
-#if defined(RGX_FW_IRQ_OS_COUNTERS) || defined(INTERNAL_TEST)
- #define SUPPORT_FW_IRQ_REG_COUNTERS OPTIONS_BIT14
- #define OPTIONS_BIT14 (0x1UL << 14)
+#if defined(RGX_FW_IRQ_OS_COUNTERS)
+ #define OPTIONS_BIT14 OPTIONS_FW_IRQ_REG_COUNTERS_EN
#if OPTIONS_BIT14 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#define OPTIONS_BIT14 0x0UL
#endif /* RGX_FW_IRQ_OS_COUNTERS */
-#define VALIDATION_EN_MASK (0x1UL << 15)
-#define SUPPORT_VALIDATION_OPTION "VALIDATION "
+#define VALIDATION_OPTION "VALIDATION "
#if defined(SUPPORT_VALIDATION)
- #define SUPPORT_VALIDATION_OFFSET OPTIONS_BIT15
- #define OPTIONS_BIT15 (0x1UL << 15)
+ #define OPTIONS_BIT15 OPTIONS_VALIDATION_EN
#if OPTIONS_BIT15 > RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
#define OPTIONS_BIT15 0x0UL
#endif /* SUPPORT_VALIDATION */
-#define RGX_BUILD_OPTIONS_KM \
- (OPTIONS_BIT0 |\
- OPTIONS_BIT1 |\
- OPTIONS_BIT2 |\
- OPTIONS_BIT3 |\
- OPTIONS_BIT4 |\
- OPTIONS_BIT6 |\
- OPTIONS_BIT7 |\
- OPTIONS_BIT8 |\
- OPTIONS_BIT9 |\
- OPTIONS_BIT10 |\
- OPTIONS_BIT11 |\
- OPTIONS_BIT12 |\
- OPTIONS_BIT13 |\
- OPTIONS_BIT14 |\
- OPTIONS_BIT15)
-
-#define RGX_BUILD_OPTIONS_LIST \
- { \
- NO_HARDWARE_OPTION, \
- PDUMP_OPTION, \
- INTERNAL_TEST_OPTION, \
- UNUSED_OPTION, \
- SUPPORT_RGX_OPTION, \
- SUPPORT_SECURE_EXPORT_OPTION, \
- SUPPORT_INSECURE_EXPORT_OPTION, \
- SUPPORT_VFP_OPTION, \
- SUPPORT_WORKLOAD_ESTIMATION_OPTION, \
- SUPPORT_PDVFS_OPTION, \
- DEBUG_OPTION, \
- SUPPORT_BUFFER_SYNC_OPTION, \
- SUPPORT_AUTOVZ_OPTION, \
- SUPPORT_AUTOVZ_HW_REGS_OPTION, \
- RGX_FW_IRQ_OS_COUNTERS_OPTION, \
- SUPPORT_VALIDATION_OPTION \
- }
-
-#define RGX_BUILD_OPTIONS_MASK_FW \
- (RGX_BUILD_OPTIONS_MASK_KM & \
- ~OPTIONS_BIT11)
-
-#define OPTIONS_BIT31 (0x1UL << 31)
+#define OPTIONS_BIT31 OPTIONS_PERCONTEXT_FREELIST_EN
#if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM
#error "Bit exceeds reserved range"
#endif
-#define SUPPORT_PERCONTEXT_FREELIST_SET_OFFSET OPTIONS_BIT31
+
+#define RGX_BUILD_OPTIONS_KM \
+ (OPTIONS_BIT0 |\
+ OPTIONS_BIT1 |\
+ OPTIONS_BIT2 |\
+ OPTIONS_BIT3 |\
+ OPTIONS_BIT4 |\
+ OPTIONS_BIT5 |\
+ OPTIONS_BIT6 |\
+ OPTIONS_BIT7 |\
+ OPTIONS_BIT8 |\
+ OPTIONS_BIT9 |\
+ OPTIONS_BIT10 |\
+ OPTIONS_BIT11 |\
+ OPTIONS_BIT12 |\
+ OPTIONS_BIT13 |\
+ OPTIONS_BIT14 |\
+ OPTIONS_BIT15)
#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31)
-#define OPTIONS_STRICT (RGX_BUILD_OPTIONS & \
- ~(OPTIONS_DEBUG_MASK | \
- OPTIONS_WORKLOAD_ESTIMATION_MASK | \
- OPTIONS_PDVFS_MASK))
+#define RGX_BUILD_OPTIONS_LIST \
+ { \
+ NO_HARDWARE_OPTION, \
+ PDUMP_OPTION, \
+ INTERNAL_UNUSED1_OPTION, \
+ SECURE_ALLOC_KM_OPTION, \
+ RGX_OPTION, \
+ SECURE_EXPORT_OPTION, \
+ INSECURE_EXPORT_OPTION, \
+ VFP_OPTION, \
+ WORKLOAD_ESTIMATION_OPTION, \
+ PDVFS_OPTION, \
+ DEBUG_OPTION, \
+ BUFFER_SYNC_OPTION, \
+ AUTOVZ_OPTION, \
+ AUTOVZ_HW_REGS_OPTION, \
+ RGX_FW_IRQ_OS_COUNTERS_OPTION, \
+ VALIDATION_OPTION \
+ }
#endif /* RGX_OPTIONS_H */
#define RGX_HEAP_SIZE_2MiB IMG_UINT64_C(0x0000200000)
#define RGX_HEAP_SIZE_4MiB IMG_UINT64_C(0x0000400000)
#define RGX_HEAP_SIZE_16MiB IMG_UINT64_C(0x0001000000)
+#define RGX_HEAP_SIZE_32MiB IMG_UINT64_C(0x0002000000)
#define RGX_HEAP_SIZE_256MiB IMG_UINT64_C(0x0010000000)
#define RGX_HEAP_SIZE_1GiB IMG_UINT64_C(0x0040000000)
/* 0xDC_0000_0000 - 0xDF_FFFF_FFFF **/
/* 880 GiB to 896 GiB, size of 16 GiB : FREE **/
-/* 0xE0_0000_0000 - 0xE0_FFFF_FFFF **/
- /* 896 GiB to 900 GiB, size of 4 GiB : USCCODE_HEAP **/
+/* 0xE0_0000_0000 - 0xE0_FDFF_FFFF **/
+ /* 896 GiB to 900 GiB, size of 4 GiB less 32 MiB : USCCODE_HEAP **/
#define RGX_USCCODE_HEAP_BASE IMG_UINT64_C(0xE000000000)
- #define RGX_USCCODE_HEAP_SIZE RGX_HEAP_SIZE_4GiB
+ #define RGX_USCCODE_HEAP_SIZE (RGX_HEAP_SIZE_4GiB - RGX_HEAP_SIZE_32MiB)
+
+/* 0xE0_FE00_0000 - 0xE0_FFFF_FFFF **/
+ /* 900 GiB less 32 MiB to 900 GiB, size of 32 MiB : RESERVED VOLCANIC **/
/* 0xE1_0000_0000 - 0xE1_BFFF_FFFF **/
/* 900 GiB to 903 GiB, size of 3 GiB : RESERVED **/
#ifndef SERVICES_KM_H
#define SERVICES_KM_H
+#include "img_types.h"
+
#if defined(SUPPORT_GPUVIRT_VALIDATION)
#include "virt_validation_defs.h"
#endif
/*! 4k page size definition */
-#define PVRSRV_4K_PAGE_SIZE 4096UL /*!< Size of a 4K Page */
+#define PVRSRV_4K_PAGE_SIZE 4096U /*!< Size of a 4K Page */
#define PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT 12 /*!< Amount to shift an address by so that
it is always page-aligned */
/*! 16k page size definition */
-#define PVRSRV_16K_PAGE_SIZE 16384UL /*!< Size of a 16K Page */
+#define PVRSRV_16K_PAGE_SIZE 16384U /*!< Size of a 16K Page */
#define PVRSRV_16K_PAGE_SIZE_ALIGNSHIFT 14 /*!< Amount to shift an address by so that
it is always page-aligned */
/*! 64k page size definition */
-#define PVRSRV_64K_PAGE_SIZE 65536UL /*!< Size of a 64K Page */
+#define PVRSRV_64K_PAGE_SIZE 65536U /*!< Size of a 64K Page */
#define PVRSRV_64K_PAGE_SIZE_ALIGNSHIFT 16 /*!< Amount to shift an address by so that
it is always page-aligned */
/*! 256k page size definition */
-#define PVRSRV_256K_PAGE_SIZE 262144UL /*!< Size of a 256K Page */
+#define PVRSRV_256K_PAGE_SIZE 262144U /*!< Size of a 256K Page */
#define PVRSRV_256K_PAGE_SIZE_ALIGNSHIFT 18 /*!< Amount to shift an address by so that
it is always page-aligned */
/*! 1MB page size definition */
-#define PVRSRV_1M_PAGE_SIZE 1048576UL /*!< Size of a 1M Page */
+#define PVRSRV_1M_PAGE_SIZE 1048576U /*!< Size of a 1M Page */
#define PVRSRV_1M_PAGE_SIZE_ALIGNSHIFT 20 /*!< Amount to shift an address by so that
it is always page-aligned */
/*! 2MB page size definition */
-#define PVRSRV_2M_PAGE_SIZE 2097152UL /*!< Size of a 2M Page */
+#define PVRSRV_2M_PAGE_SIZE 2097152U /*!< Size of a 2M Page */
#define PVRSRV_2M_PAGE_SIZE_ALIGNSHIFT 21 /*!< Amount to shift an address by so that
it is always page-aligned */
#define PDUMP_FRAME_UNSET IMG_UINT32_MAX
/* Status of the device. */
-typedef enum
-{
- PVRSRV_DEVICE_STATUS_UNKNOWN, /* status of the device is unknown */
- PVRSRV_DEVICE_STATUS_OK, /* the device is operational */
- PVRSRV_DEVICE_STATUS_NOT_RESPONDING, /* the device is not responding */
- PVRSRV_DEVICE_STATUS_DEVICE_ERROR /* the device is not operational */
-} PVRSRV_DEVICE_STATUS;
+typedef IMG_UINT32 PVRSRV_DEVICE_STATUS;
+#define PVRSRV_DEVICE_STATUS_UNKNOWN 0U /* status of the device is unknown */
+#define PVRSRV_DEVICE_STATUS_OK 1U /* the device is operational */
+#define PVRSRV_DEVICE_STATUS_NOT_RESPONDING 2U /* the device is not responding */
+#define PVRSRV_DEVICE_STATUS_DEVICE_ERROR 3U /* the device is not operational */
#endif /* SERVICES_KM_H */
/**************************************************************************//**
*****************************************************************************
* States for power management
*****************************************************************************/
+
/*!
System Power State Enum
*/
+
+#define _PVRSRV_SYS_POWER_STATES \
+ X(Unspecified, -1) /*!< Unspecified : Uninitialised */ \
+ X(OFF, 0) /*!< Off */ \
+ X(ON, 1) /*!< On */
+
typedef enum _PVRSRV_SYS_POWER_STATE_
{
- PVRSRV_SYS_POWER_STATE_Unspecified = -1, /*!< Unspecified : Uninitialised */
- PVRSRV_SYS_POWER_STATE_OFF = 0, /*!< Off */
- PVRSRV_SYS_POWER_STATE_ON = 1, /*!< On */
+#define X(name, value) PVRSRV_SYS_POWER_STATE_##name = value,
+ _PVRSRV_SYS_POWER_STATES
+#undef X
PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff /*!< Force enum to be at least 32-bits wide */
Power Flags Enum
*/
typedef IMG_UINT32 PVRSRV_POWER_FLAGS;
-#define PVRSRV_POWER_FLAGS_NONE 0U /*!< No flags */
-#define PVRSRV_POWER_FLAGS_FORCED 1U << 0 /*!< Power the transition should not fail */
-#define PVRSRV_POWER_FLAGS_SUSPEND 1U << 1 /*!< Power transition is due to OS suspend request */
+#define PVRSRV_POWER_FLAGS_NONE 0U /*!< No flags */
+#define PVRSRV_POWER_FLAGS_FORCED (1U << 0) /*!< Power the transition should not fail */
+#define PVRSRV_POWER_FLAGS_SUSPEND_REQ (1U << 1) /*!< Power transition is due to OS suspend request */
+#define PVRSRV_POWER_FLAGS_RESUME_REQ (1U << 2) /*!< Power transition is due to OS resume request */
/* Clock speed handler prototypes */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX fw interface alignment checks
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Checks to avoid disalignment in RGX fw data structures
+ shared with the host
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_FWIF_ALIGNCHECKS_H)
+#define RGX_FWIF_ALIGNCHECKS_H
+
+/* for the offsetof macro */
+#if defined(__KERNEL__) && defined(__linux__)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+/*!
+ ******************************************************************************
+ * Alignment UM/FW checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_UM_MAX 128U
+
+#if defined(PM_INTERACTIVE_MODE)
+#define HWRTDATA_PM_OFFSET offsetof(RGXFWIF_HWRTDATA, sPMMListDevVAddr),
+#define HWRTDATA_HEAPTABLE_OFFSET offsetof(RGXFWIF_HWRTDATA, psVHeapTableDevVAddr),
+#else
+#define HWRTDATA_PM_OFFSET offsetof(RGXFWIF_HWRTDATA, sPMRenderStateDevVAddr),
+#define HWRTDATA_HEAPTABLE_OFFSET
+#endif
+
+#define RGXFW_ALIGN_CHECKS_INIT0 \
+ sizeof(RGXFWIF_TRACEBUF), \
+ offsetof(RGXFWIF_TRACEBUF, ui32LogType), \
+ offsetof(RGXFWIF_TRACEBUF, sTraceBuf), \
+ offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), \
+ offsetof(RGXFWIF_TRACEBUF, ui32TracebufFlags), \
+ \
+ sizeof(RGXFWIF_SYSDATA), \
+ offsetof(RGXFWIF_SYSDATA, ePowState), \
+ offsetof(RGXFWIF_SYSDATA, ui32HWPerfDropCount), \
+ offsetof(RGXFWIF_SYSDATA, ui32LastDropOrdinal), \
+ offsetof(RGXFWIF_SYSDATA, ui32FWFaults), \
+ offsetof(RGXFWIF_SYSDATA, ui32HWRStateFlags), \
+ \
+ sizeof(RGXFWIF_OSDATA), \
+ offsetof(RGXFWIF_OSDATA, ui32HostSyncCheckMark), \
+ offsetof(RGXFWIF_OSDATA, ui32KCCBCmdsExecuted), \
+ \
+ sizeof(RGXFWIF_HWRINFOBUF), \
+ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmLockedUpCount), \
+ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmOverranCount), \
+ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmRecoveredCount), \
+ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmFalseDetectCount), \
+ \
+ /* RGXFWIF_CMDTA checks */ \
+ sizeof(RGXFWIF_CMDTA), \
+ offsetof(RGXFWIF_CMDTA, sGeomRegs), \
+ \
+ /* RGXFWIF_CMD3D checks */ \
+ sizeof(RGXFWIF_CMD3D), \
+ offsetof(RGXFWIF_CMD3D, s3DRegs), \
+ \
+ /* RGXFWIF_CMD_COMPUTE checks */ \
+ sizeof(RGXFWIF_CMD_COMPUTE), \
+ offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs), \
+ \
+ /* RGXFWIF_FREELIST checks */ \
+ sizeof(RGXFWIF_FREELIST), \
+ offsetof(RGXFWIF_FREELIST, sFreeListBaseDevVAddr),\
+ offsetof(RGXFWIF_FREELIST, sFreeListStateDevVAddr),\
+ offsetof(RGXFWIF_FREELIST, sFreeListLastGrowDevVAddr),\
+ offsetof(RGXFWIF_FREELIST, ui32MaxPages),\
+ offsetof(RGXFWIF_FREELIST, ui32CurrentPages),\
+ \
+ /* RGXFWIF_HWRTDATA checks */ \
+ sizeof(RGXFWIF_HWRTDATA), \
+ HWRTDATA_PM_OFFSET \
+ HWRTDATA_HEAPTABLE_OFFSET \
+ offsetof(RGXFWIF_HWRTDATA, apsFreeLists),\
+ /*offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase),*/ \
+ offsetof(RGXFWIF_HWRTDATA, eState), \
+ \
+\
+ sizeof(RGXFWIF_HWPERF_CTL), \
+ offsetof(RGXFWIF_HWPERF_CTL, sBlkCfg), \
+ sizeof(RGXFWIF_CMDTDM), \
+ offsetof(RGXFWIF_CMDTDM, sTDMRegs)
+
+#define RGXFW_ALIGN_CHECKS_INIT RGXFW_ALIGN_CHECKS_INIT0
+
+
+
+/*!
+ ******************************************************************************
+ * Alignment KM checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_INIT_KM0 \
+ sizeof(RGXFWIF_SYSINIT), \
+ offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr), \
+ offsetof(RGXFWIF_SYSINIT, sPDSExecBase), \
+ offsetof(RGXFWIF_SYSINIT, sUSCExecBase), \
+ offsetof(RGXFWIF_SYSINIT, asSigBufCtl), \
+ offsetof(RGXFWIF_SYSINIT, sTraceBufCtl), \
+ offsetof(RGXFWIF_SYSINIT, sFwSysData), \
+ \
+ sizeof(RGXFWIF_OSINIT), \
+ offsetof(RGXFWIF_OSINIT, psKernelCCBCtl), \
+ offsetof(RGXFWIF_OSINIT, psKernelCCB), \
+ offsetof(RGXFWIF_OSINIT, psFirmwareCCBCtl), \
+ offsetof(RGXFWIF_OSINIT, psFirmwareCCB), \
+ offsetof(RGXFWIF_OSINIT, sFwOsData), \
+ offsetof(RGXFWIF_OSINIT, sRGXCompChecks), \
+ \
+ /* RGXFWIF_FWRENDERCONTEXT checks */ \
+ sizeof(RGXFWIF_FWRENDERCONTEXT), \
+ offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), \
+ offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), \
+ \
+ sizeof(RGXFWIF_FWCOMPUTECONTEXT), \
+ offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext), \
+ offsetof(RGXFWIF_FWCOMPUTECONTEXT, sStaticComputeContextState),\
+ offsetof(RGXFWIF_FWCOMPUTECONTEXT, ui32WorkEstCCBSubmitted),\
+ \
+ sizeof(RGXFWIF_FWTDMCONTEXT), \
+ offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext), \
+ offsetof(RGXFWIF_FWTDMCONTEXT, ui32WorkEstCCBSubmitted),\
+ \
+ sizeof(RGXFWIF_FWCOMMONCONTEXT), \
+ offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext), \
+ offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode), \
+ offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB), \
+ \
+ sizeof(RGXFWIF_MMUCACHEDATA), \
+ offsetof(RGXFWIF_MMUCACHEDATA, ui32CacheFlags), \
+ offsetof(RGXFWIF_MMUCACHEDATA, sMMUCacheSync), \
+ offsetof(RGXFWIF_MMUCACHEDATA, ui32MMUCacheSyncUpdateValue)
+
+#if defined(SUPPORT_TRP)
+#define RGXFW_ALIGN_CHECKS_INIT_KM \
+ RGXFW_ALIGN_CHECKS_INIT_KM0, \
+ offsetof(RGXFWIF_FWTDMCONTEXT, ui32TRPState), \
+ offsetof(RGXFWIF_FWTDMCONTEXT, aui64TRPChecksums2D)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT_KM RGXFW_ALIGN_CHECKS_INIT_KM0
+#endif
+
+#endif /* RGX_FWIF_ALIGNCHECKS_H */
+
+/******************************************************************************
+ End of file (rgx_fwif_alignchecks.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File rgx_fwif_hwperf.h
+@Title RGX HWPerf support
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Shared header between RGX firmware and Init process
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_FWIF_HWPERF_H
+#define RGX_FWIF_HWPERF_H
+
+#include "rgx_fwif_shared.h"
+#include "rgx_hwperf.h"
+#include "rgxdefs_km.h"
+
+/* Server and Firmware definitions only */
+
+/*! The number of HWPerf blocks in the GPU */
+
+#if defined(RGX_FIRMWARE)
+#define RGX_HWPERF_NUM_SPU ((RGX_FEATURE_NUM_SPU))
+#define RGX_HWPERF_NUM_USC ((RGX_FEATURE_NUM_CLUSTERS))
+#define RGX_HWPERF_NUM_ISP_PER_SPU ((RGX_FEATURE_NUM_ISP_PER_SPU))
+#define RGX_HWPERF_NUM_PBE ((RGX_FEATURE_PBE_PER_SPU) * (RGX_FEATURE_NUM_SPU))
+#define RGX_HWPERF_NUM_MERCER ((RGX_FEATURE_NUM_CLUSTERS))
+#define RGX_HWPERF_NUM_PBE_SHARED ((RGX_FEATURE_NUM_SPU))
+#define RGX_HWPERF_NUM_SWIFT ((RGX_FEATURE_NUM_SPU * RGX_FEATURE_MAX_TPU_PER_SPU))
+#define RGX_HWPERF_NUM_TEXAS ((RGX_FEATURE_NUM_SPU))
+#if (RGX_FEATURE_RAY_TRACING_ARCH > 2) && (RGX_FEATURE_SPU0_RAC_PRESENT > 0)
+#define RGX_HWPERF_NUM_RAC ((RGX_NUM_RAC))
+#else
+#define RGX_HWPERF_NUM_RAC ((0))
+#endif
+#define RGX_HWPERF_NUM_TPU ((RGX_FEATURE_NUM_SPU * RGX_FEATURE_MAX_TPU_PER_SPU))
+#define RGX_HWPERF_NUM_ISP ((RGX_FEATURE_NUM_CLUSTERS))
+
+#define RGX_CNTBLK_INDIRECT_COUNT(_class) ((RGX_HWPERF_NUM_ ## _class))
+
+/*! The number of layout blocks defined with configurable
+ * performance counters. Compile time constants.
+ * This is for the Series 8XT+ layout.
+ */
+#define RGX_HWPERF_MAX_DEFINED_BLKS (\
+ (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\
+ RGX_CNTBLK_INDIRECT_COUNT(ISP) +\
+ RGX_CNTBLK_INDIRECT_COUNT(MERCER) +\
+ RGX_CNTBLK_INDIRECT_COUNT(PBE) +\
+ RGX_CNTBLK_INDIRECT_COUNT(PBE_SHARED) +\
+ RGX_CNTBLK_INDIRECT_COUNT(USC) +\
+ RGX_CNTBLK_INDIRECT_COUNT(TPU) +\
+ RGX_CNTBLK_INDIRECT_COUNT(SWIFT) +\
+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS) +\
+ RGX_CNTBLK_INDIRECT_COUNT(RAC))
+
+#endif /* RGX_FIRMWARE */
+
+/*****************************************************************************/
+
+/* Structure used in the FW's global control data to hold the performance
+ * counters provisioned for a given block. */
+typedef struct
+{
+ IMG_UINT32 uiBlockID;
+ IMG_UINT32 uiNumCounters; // Number of counters held
+ // in aui32CounterCfg
+ // [0..RGX_CNTBLK_COUNTERS_MAX)
+ IMG_UINT32 uiEnabled; // 1 => enabled, 0=> disabled
+ RGXFWIF_DEV_VIRTADDR psModel; // link to model table for uiBlockID
+ IMG_UINT32 aui32CounterCfg[RGX_CNTBLK_COUNTERS_MAX];
+} RGXFWIF_HWPERF_CTL_BLK;
+
+
+/*!
+ *****************************************************************************
+ * Structure used in the FW's global RGXFW_CTL store, holding HWPerf counter
+ * block configuration. It is written to by the Server on FW initialisation
+ * (PDUMP=1) and by the FW BG kCCB command processing code. It is read by
+ * the FW IRQ register programming and HWPerf event generation routines.
+ * Size of the sBlkCfg[] array must be consistent between KM/UM and FW.
+ * FW will ASSERT if the sizes are different
+ * (ui32NumBlocks != RGX_HWPERF_MAX_DEFINED_BLKS)
+ ****************************************************************************/
+typedef struct
+{
+ IMG_UINT32 ui32Reserved;
+ IMG_UINT32 ui32CtrlWord;
+ IMG_UINT32 ui32EnabledBlksCount;
+ IMG_UINT32 ui32NumBlocks;
+ RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[1]; // First array entry
+} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL;
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX firmware interface structures used by pvrsrvkm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX firmware interface structures used by pvrsrvkm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_FWIF_KM_H)
+#define RGX_FWIF_KM_H
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+#include "rgxdefs_km.h"
+#include "dllist.h"
+#include "rgx_hwperf.h"
+#include "rgxheapconfig.h"
+
+
+/*************************************************************************/ /*!
+ Logging type
+*/ /**************************************************************************/
+#define RGXFWIF_LOG_TYPE_NONE 0x00000000U
+#define RGXFWIF_LOG_TYPE_TRACE 0x00000001U
+#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002U
+#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004U
+#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U
+#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010U
+#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020U
+#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040U
+#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080U
+#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100U
+#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200U
+#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400U
+#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800U
+#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000U
+#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000U
+#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000U
+#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U
+#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU
+#define RGXFWIF_LOG_TYPE_MASK 0x80007FFFU
+
+/* String used in pvrdebug -h output */
+#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug"
+
+/* Table entry to map log group strings to log type value */
+typedef struct {
+ const IMG_CHAR* pszLogGroupName;
+ IMG_UINT32 ui32LogGroupType;
+} RGXFWIF_LOG_GROUP_MAP_ENTRY;
+
+/*
+ Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup
+ table where needed. Keep log group names short, no more than 20 chars.
+*/
+#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \
+ { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \
+ { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \
+ { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \
+ { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \
+ { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \
+ { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \
+ { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \
+ { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \
+ { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \
+ { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \
+ { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \
+ { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \
+ { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \
+ { "misc", RGXFWIF_LOG_TYPE_GROUP_MISC }, \
+ { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG }
+
+
+/* Used in print statements to display log group state, one %s per group defined */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+
+/* Used in a print statement to display log group state, one per group */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) (((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) ?("main ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) ?("mts ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) ?("cleanup ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) ?("csw ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) ?("bif ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_PM) ?("pm ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) ?("rtd ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) ?("spm ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_POW) ?("pow ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) ?("hwr ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) ?("hwp ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) ?("rpm ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) ?("dma ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) ?("misc ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) ?("debug ") :(""))
+
+
+/************************************************************************
+* RGX FW signature checks
+************************************************************************/
+#define RGXFW_SIG_BUFFER_SIZE_MIN (8192)
+
+#define RGXFWIF_TIMEDIFF_ID ((0x1UL << 28) | RGX_CR_TIMER)
+
+/*!
+ ******************************************************************************
+ * Trace Buffer
+ *****************************************************************************/
+
+/*! Min, Max, and Default size of RGXFWIF_TRACEBUF_SPACE in DWords */
+#define RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS 8192U /* 32KB */
+#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U /* ~48KB */
+#define RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS 32768U /* 128KB */
+
+#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+#define RGXFW_THREAD_NUM 2U
+#else
+#define RGXFW_THREAD_NUM 1U
+#endif
+
+#define RGXFW_POLL_TYPE_SET 0x80000000U
+
+#define RGXFW_PROCESS_NAME_LEN (16)
+
+typedef struct
+{
+ IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+ IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+ IMG_UINT32 ui32LineNum;
+} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF;
+
+/*!
+ * @Defgroup SRVAndFWTracing Services and Firmware Tracing data interface
+ * @Brief The document groups/lists the data structures and the interfaces related to Services and Firmware Tracing
+ * @{
+ */
+
+/*!
+ * @Brief Firmware trace buffer details
+ */
+typedef struct
+{
+ IMG_UINT32 ui32TracePointer; /*!< Trace pointer (write index into Trace Buffer) */
+ IMG_UINT32 ui32WrapCount; /*!< Number of times the Trace Buffer has wrapped */
+
+#if defined(RGX_FIRMWARE)
+ IMG_UINT32 *pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address), to be used by firmware for writing into trace buffer */
+#else
+ RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address)*/
+#endif
+ IMG_PUINT32 RGXFW_ALIGN pui32TraceBuffer; /*!< Trace buffer address (Host address), to be used by host when reading from trace buffer */
+
+ RGXFWIF_FILE_INFO_BUF RGXFW_ALIGN sAssertBuf;
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE;
+
+/*! @} End of Defgroup SRVAndFWTracing */
+
+#define RGXFWIF_FWFAULTINFO_MAX (8U) /* Total number of FW fault logs stored */
+
+typedef struct
+{
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimer;
+ IMG_UINT64 RGXFW_ALIGN ui64OSTimer;
+ IMG_UINT32 RGXFW_ALIGN ui32Data;
+ IMG_UINT32 ui32Reserved;
+ RGXFWIF_FILE_INFO_BUF sFaultBuf;
+} UNCACHED_ALIGN RGX_FWFAULTINFO;
+
+
+#define RGXFWIF_POW_STATES \
+ X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \
+ X(RGXFWIF_POW_ON) /* running HW commands */ \
+ X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \
+ X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */
+
+typedef enum
+{
+#define X(NAME) NAME,
+ RGXFWIF_POW_STATES
+#undef X
+} RGXFWIF_POW_STATE;
+
+/* Firmware HWR states */
+#define RGXFWIF_HWR_HARDWARE_OK (IMG_UINT32_C(0x1) << 0U) /*!< The HW state is ok or locked up */
+#define RGXFWIF_HWR_RESET_IN_PROGRESS (IMG_UINT32_C(0x1) << 1U) /*!< Tells if a HWR reset is in progress */
+#define RGXFWIF_HWR_GENERAL_LOCKUP (IMG_UINT32_C(0x1) << 3U) /*!< A DM unrelated lockup has been detected */
+#define RGXFWIF_HWR_DM_RUNNING_OK (IMG_UINT32_C(0x1) << 4U) /*!< At least one DM is running without being close to a lockup */
+#define RGXFWIF_HWR_DM_STALLING (IMG_UINT32_C(0x1) << 5U) /*!< At least one DM is close to lockup */
+#define RGXFWIF_HWR_FW_FAULT (IMG_UINT32_C(0x1) << 6U) /*!< The FW has faulted and needs to restart */
+#define RGXFWIF_HWR_RESTART_REQUESTED (0x1U << 7U) /*!< The FW has requested the host to restart it */
+
+#define RGXFWIF_PHR_STATE_SHIFT (8U)
+#define RGXFWIF_PHR_RESTART_REQUESTED (IMG_UINT32_C(1) << RGXFWIF_PHR_STATE_SHIFT) /*!< The FW has requested the host to restart it, per PHR configuration */
+#define RGXFWIF_PHR_RESTART_FINISHED (IMG_UINT32_C(2) << RGXFWIF_PHR_STATE_SHIFT) /*!< A PHR triggered GPU reset has just finished */
+#define RGXFWIF_PHR_RESTART_MASK (RGXFWIF_PHR_RESTART_REQUESTED | RGXFWIF_PHR_RESTART_FINISHED)
+
+#define RGXFWIF_PHR_MODE_OFF (0UL)
+#define RGXFWIF_PHR_MODE_RD_RESET (1UL)
+#define RGXFWIF_PHR_MODE_FULL_RESET (2UL)
+
+typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS;
+
+/* Firmware per-DM HWR states */
+#define RGXFWIF_DM_STATE_WORKING (0x00U) /*!< DM is working if all flags are cleared */
+#define RGXFWIF_DM_STATE_READY_FOR_HWR (IMG_UINT32_C(0x1) << 0) /*!< DM is idle and ready for HWR */
+#define RGXFWIF_DM_STATE_NEEDS_SKIP (IMG_UINT32_C(0x1) << 2) /*!< DM need to skip to next cmd before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP (IMG_UINT32_C(0x1) << 3) /*!< DM need partial render cleanup before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR (IMG_UINT32_C(0x1) << 4) /*!< DM need to increment Recovery Count once fully recovered */
+#define RGXFWIF_DM_STATE_GUILTY_LOCKUP (IMG_UINT32_C(0x1) << 5) /*!< DM was identified as locking up and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP (IMG_UINT32_C(0x1) << 6) /*!< DM was innocently affected by another lockup which caused HWR */
+#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING (IMG_UINT32_C(0x1) << 7) /*!< DM was identified as over-running and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING (IMG_UINT32_C(0x1) << 8) /*!< DM was innocently affected by another DM over-running which caused HWR */
+#define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH (IMG_UINT32_C(0x1) << 9) /*!< DM was forced into HWR as it delayed more important workloads */
+#define RGXFWIF_DM_STATE_GPU_ECC_HWR (IMG_UINT32_C(0x1) << 10) /*!< DM was forced into HWR due to an uncorrected GPU ECC error */
+
+/* Firmware's connection state */
+typedef enum
+{
+ RGXFW_CONNECTION_FW_OFFLINE = 0, /*!< Firmware is offline */
+ RGXFW_CONNECTION_FW_READY, /*!< Firmware is initialised */
+ RGXFW_CONNECTION_FW_ACTIVE, /*!< Firmware connection is fully established */
+ RGXFW_CONNECTION_FW_OFFLOADING, /*!< Firmware is clearing up connection data */
+ RGXFW_CONNECTION_FW_STATE_COUNT
+} RGXFWIF_CONNECTION_FW_STATE;
+
+/* OS' connection state */
+typedef enum
+{
+ RGXFW_CONNECTION_OS_OFFLINE = 0, /*!< OS is offline */
+ RGXFW_CONNECTION_OS_READY, /*!< OS's KM driver is setup and waiting */
+ RGXFW_CONNECTION_OS_ACTIVE, /*!< OS connection is fully established */
+ RGXFW_CONNECTION_OS_STATE_COUNT
+} RGXFWIF_CONNECTION_OS_STATE;
+
+typedef struct
+{
+ IMG_UINT bfOsState : 3;
+ IMG_UINT bfFLOk : 1;
+ IMG_UINT bfFLGrowPending : 1;
+ IMG_UINT bfReserved : 27;
+} RGXFWIF_OS_RUNTIME_FLAGS;
+
+typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS;
+
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+#define PVR_SLR_LOG_ENTRIES 10U
+#define PVR_SLR_LOG_STRLEN 30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */
+
+typedef struct
+{
+ IMG_UINT64 RGXFW_ALIGN ui64Timestamp;
+ IMG_UINT32 ui32FWCtxAddr;
+ IMG_UINT32 ui32NumUFOs;
+ IMG_CHAR aszCCBName[PVR_SLR_LOG_STRLEN];
+} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY;
+#endif
+
+/*!
+ * @InGroup SRVAndFWTracing
+ * @Brief Firmware trace control data
+ */
+typedef struct
+{
+ IMG_UINT32 ui32LogType; /*!< FW trace log group configuration */
+ RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; /*!< FW Trace buffer */
+ IMG_UINT32 ui32TraceBufSizeInDWords; /*!< FW Trace buffer size in dwords, Member initialised only when sTraceBuf is actually allocated
+ (in RGXTraceBufferInitOnDemandResources) */
+ IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF;
+
+/*! @Brief Firmware system data shared with the Host driver */
+typedef struct
+{
+ IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */
+ IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */
+ volatile RGXFWIF_POW_STATE ePowState;
+ volatile IMG_UINT32 ui32HWPerfRIdx;
+ volatile IMG_UINT32 ui32HWPerfWIdx;
+ volatile IMG_UINT32 ui32HWPerfWrapCount;
+ IMG_UINT32 ui32HWPerfSize; /*!< Constant after setup, needed in FW */
+ IMG_UINT32 ui32HWPerfDropCount; /*!< The number of times the FW drops a packet due to buffer full */
+
+ /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with
+ * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */
+ IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */
+ IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */
+ IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */
+ RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OSIDS];/*!< State flags for each Operating System mirrored from Fw coremem */
+ RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; /*!< Firmware fault info */
+ IMG_UINT32 ui32FWFaults; /*!< Firmware faults count */
+ IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; /*!< Failed poll address */
+ IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; /*!< Failed poll mask */
+ IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; /*!< Failed poll count */
+ IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime;
+
+#if defined(SUPPORT_VALIDATION)
+ IMG_UINT32 ui32RenderKillingCtl; /*!< Rasterisation DM Killing Configuration from host */
+ IMG_UINT32 ui32CDMTDMKillingCtl; /*!< CDM/TDM Killing Configuration from host */
+#endif
+#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK)
+#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8)
+#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE)
+ IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX];
+#endif
+ RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags; /*!< Firmware's Current HWR state */
+ RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_MAX]; /*!< Each DM's HWR state */
+ IMG_UINT32 ui32FwSysDataFlags; /*!< Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_SYSDATA;
+
+/*!
+ * @InGroup ContextSwitching
+ * @Brief Firmware per-os data and configuration
+ */
+typedef struct
+{
+ IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */
+ IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */
+ IMG_UINT32 ui32HostSyncCheckMark; /*!< Markers to signal that the Firmware should perform a full sync check */
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+ IMG_UINT32 ui32ForcedUpdatesRequested;
+ IMG_UINT8 ui8SLRLogWp;
+ RGXFWIF_SLR_ENTRY sSLRLogFirst;
+ RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES];
+ IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime;
+#endif
+ volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */
+ IMG_UINT32 ui32KCCBCmdsExecuted; /*!< Executed Kernel CCB command count */
+ RGXFWIF_DEV_VIRTADDR sPowerSync; /*!< Sync prim used to signal the host the power off state */
+ IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_OSDATA;
+
+/* Firmware trace time-stamp field breakup */
+
+/* RGX_CR_TIMER register read (48 bits) value*/
+#define RGXFWT_TIMESTAMP_TIME_SHIFT (0U)
+#define RGXFWT_TIMESTAMP_TIME_CLRMSK (IMG_UINT64_C(0xFFFF000000000000))
+
+/* Extra debug-info (16 bits) */
+#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U)
+#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~RGXFWT_TIMESTAMP_TIME_CLRMSK
+
+
+/* Debug-info sub-fields */
+/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */
+#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U)
+#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET (1U << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT)
+
+/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */
+#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U)
+#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET (1U << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT)
+
+/* Bit 2: RGX_CR_SLAVE_EVENT register is non-zero */
+#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U)
+#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET (1U << RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT)
+
+/* Bit 3-15: Unused bits */
+
+#define RGXFWT_DEBUG_INFO_STR_MAXLEN 64
+#define RGXFWT_DEBUG_INFO_STR_PREPEND " (debug info: "
+#define RGXFWT_DEBUG_INFO_STR_APPEND ")"
+
+/* Table of debug info sub-field's masks and corresponding message strings
+ * to be appended to firmware trace
+ *
+ * Mask : 16 bit mask to be applied to debug-info field
+ * String : debug info message string
+ */
+
+#define RGXFWT_DEBUG_INFO_MSKSTRLIST \
+/*Mask, String*/ \
+X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf") \
+X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") \
+X(RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET, "slave events")
+
+/*!
+ ******************************************************************************
+ * HWR Data
+ *****************************************************************************/
+/*!
+ * @Defgroup HWRInfo FW HWR shared data interface
+ * @Brief Types grouping data structures and defines used in realising the HWR record.
+ * @{
+ */
+/*! @Brief HWR Lockup types */
+typedef enum
+{
+ RGX_HWRTYPE_UNKNOWNFAILURE = 0, /*!< Unknown failure */
+ RGX_HWRTYPE_OVERRUN = 1, /*!< DM overrun */
+ RGX_HWRTYPE_POLLFAILURE = 2, /*!< Poll failure */
+ RGX_HWRTYPE_BIF0FAULT = 3, /*!< BIF0 fault */
+ RGX_HWRTYPE_BIF1FAULT = 4, /*!< BIF1 fault */
+ RGX_HWRTYPE_TEXASBIF0FAULT = 5, /*!< TEXASBIF0 fault */
+ RGX_HWRTYPE_MMUFAULT = 6, /*!< MMU fault */
+ RGX_HWRTYPE_MMUMETAFAULT = 7, /*!< MMU META fault */
+ RGX_HWRTYPE_MIPSTLBFAULT = 8, /*!< MIPS TLB fault */
+ RGX_HWRTYPE_ECCFAULT = 9, /*!< ECC fault */
+ RGX_HWRTYPE_MMURISCVFAULT = 10, /*!< MMU RISCV fault */
+} RGX_HWRTYPE;
+
+#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) (((eHWRType) == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1)
+
+#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((((eHWRType) == RGX_HWRTYPE_BIF0FAULT) || \
+ ((eHWRType) == RGX_HWRTYPE_BIF1FAULT) || \
+ ((eHWRType) == RGX_HWRTYPE_TEXASBIF0FAULT) || \
+ ((eHWRType) == RGX_HWRTYPE_MMUFAULT) || \
+ ((eHWRType) == RGX_HWRTYPE_MMUMETAFAULT) || \
+ ((eHWRType) == RGX_HWRTYPE_MIPSTLBFAULT) || \
+ ((eHWRType) == RGX_HWRTYPE_MMURISCVFAULT)) ? true : false)
+
+/************************
+ * GPU HW error codes *
+ ************************/
+typedef enum
+{
+ RGX_HW_ERR_NA = 0x0,
+ RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL = 0x101,
+} RGX_HW_ERR;
+
+typedef struct
+{
+ IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus; /*!< BIF request status */
+ IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus; /*!< MMU status */
+ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */
+ IMG_UINT64 RGXFW_ALIGN ui64Reserved;
+} RGX_BIFINFO;
+
+typedef struct
+{
+ IMG_UINT32 ui32FaultGPU; /*!< ECC fault in GPU */
+} RGX_ECCINFO;
+
+typedef struct
+{
+ IMG_UINT64 RGXFW_ALIGN aui64MMUStatus[2]; /*!< MMU status */
+ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */
+ IMG_UINT64 RGXFW_ALIGN ui64Reserved;
+} RGX_MMUINFO;
+
+typedef struct
+{
+ IMG_UINT32 ui32ThreadNum; /*!< Thread ID performing poll operation */
+ IMG_UINT32 ui32CrPollAddr; /*!< CR Poll Address */
+ IMG_UINT32 ui32CrPollMask; /*!< CR Poll mask */
+ IMG_UINT32 ui32CrPollLastValue; /*!< CR Poll last value */
+ IMG_UINT64 RGXFW_ALIGN ui64Reserved;
+} UNCACHED_ALIGN RGX_POLLINFO;
+
+typedef struct
+{
+ IMG_UINT32 ui32BadVAddr; /*!< VA address */
+ IMG_UINT32 ui32EntryLo;
+} RGX_TLBINFO;
+
+typedef struct
+{
+ union
+ {
+ RGX_BIFINFO sBIFInfo; /*!< BIF failure details */
+ RGX_MMUINFO sMMUInfo; /*!< MMU failure details */
+ RGX_POLLINFO sPollInfo; /*!< Poll failure details */
+ RGX_TLBINFO sTLBInfo; /*!< TLB failure details */
+ RGX_ECCINFO sECCInfo; /*!< ECC failure details */
+ } uHWRData;
+
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimer; /*!< Timer value at the time of lockup */
+ IMG_UINT64 RGXFW_ALIGN ui64OSTimer; /*!< OS timer value at the time of lockup */
+ IMG_UINT32 ui32FrameNum; /*!< Frame number of the workload */
+ IMG_UINT32 ui32PID; /*!< PID belonging to the workload */
+ IMG_UINT32 ui32ActiveHWRTData; /*!< HWRT data of the workload */
+ IMG_UINT32 ui32HWRNumber; /*!< HWR number */
+ IMG_UINT32 ui32EventStatus; /*!< Core specific event status register at the time of lockup */
+ IMG_UINT32 ui32HWRRecoveryFlags; /*!< DM state flags */
+ RGX_HWRTYPE eHWRType; /*!< Type of lockup */
+ RGXFWIF_DM eDM; /*!< Recovery triggered for the DM */
+ IMG_UINT32 ui32CoreID; /*!< Core ID of the GPU */
+ RGX_HW_ERR eHWErrorCode; /*!< Error code used to determine HW fault */
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; /*!< Workload kick time */
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; /*!< HW reset start time */
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; /*!< HW reset stop time */
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; /*!< freelist ready time on the last HWR */
+ IMG_CHAR RGXFW_ALIGN szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */
+ IMG_UINT32 RGXFW_ALIGN ui32PDSStalledDMs; /*!< DMs stalled on PDS Store space */
+ IMG_UINT32 ui32PDSActiveDMUSCs; /*!< Per-DM USC PDS activity */
+} UNCACHED_ALIGN RGX_HWRINFO;
+
+#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX_LAST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */
+#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U) /* Index of the last log in the HWR log buffer */
+
+typedef struct
+{
+ RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; /*!< Max number of recovery record */
+ IMG_UINT32 ui32HwrCounter; /*!< HWR counter used in FL reconstruction */
+ IMG_UINT32 ui32WriteIndex; /*!< Index for updating recovery information in sHWRInfo */
+ IMG_UINT32 ui32DDReqCount; /*!< Count of DebugDump requested to the host after recovery */
+ IMG_UINT32 ui32HWRInfoBufFlags; /* Compatibility and other flags */
+ IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_MAX]; /*!< Lockup count for each DM */
+ IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_MAX]; /*!< Overrun count for each DM */
+ IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_MAX]; /*!< Lockup + Overrun count for each DM */
+ IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_MAX]; /*!< False lockup detection count for each DM */
+} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF;
+
+/*! @} End of HWRInfo */
+
+#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (IMG_UINT32_C(0x1))
+#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN (IMG_UINT32_C(0x2))
+#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN (IMG_UINT32_C(0x3))
+#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN (IMG_UINT32_C(0x4))
+
+#define RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN (IMG_UINT32_C(0x1))
+#define RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (IMG_UINT32_C(0x2))
+
+#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1))
+#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2))
+/*!
+ ******************************************************************************
+ * RGX firmware Init Config Data
+ * NOTE: Please be careful to keep backwards compatibility with DDKv1 for the
+ * CTXSWITCH controls.
+ *****************************************************************************/
+
+/* Flag definitions affecting the firmware globally */
+#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (IMG_UINT32_C(0x1) << 0) /*!< Randomise context switch requests */
+#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (IMG_UINT32_C(0x1) << 1)
+#define RGXFWIF_INICFG_HWPERF_EN (IMG_UINT32_C(0x1) << 2)
+#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN (IMG_UINT32_C(0x1) << 3) /*!< Randomise DM-killing requests */
+#define RGXFWIF_INICFG_POW_RASCALDUST (IMG_UINT32_C(0x1) << 4)
+#define RGXFWIF_INICFG_SPU_CLOCK_GATE (IMG_UINT32_C(0x1) << 5)
+#define RGXFWIF_INICFG_FBCDC_V3_1_EN (IMG_UINT32_C(0x1) << 6)
+#define RGXFWIF_INICFG_CHECK_MLIST_EN (IMG_UINT32_C(0x1) << 7)
+#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (IMG_UINT32_C(0x1) << 8)
+#define RGXFWIF_INICFG_TRY_OVERLAPPING_DM_PIPELINES (IMG_UINT32_C(0x1) << 9)
+/* 10 unused */
+/* 11 unused */
+#define RGXFWIF_INICFG_REGCONFIG_EN (IMG_UINT32_C(0x1) << 12)
+#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (IMG_UINT32_C(0x1) << 13)
+#define RGXFWIF_INICFG_HWP_DISABLE_FILTER (IMG_UINT32_C(0x1) << 14)
+/* 15 unused */
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (IMG_UINT32_C(0x1) << 19)
+#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (IMG_UINT32_C(0x1) << 20)
+#define RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED (IMG_UINT32_C(0x1) << 21)
+#define RGXFWIF_INICFG_VALIDATE_IRQ (IMG_UINT32_C(0x1) << 22)
+#define RGXFWIF_INICFG_DISABLE_PDP_EN (IMG_UINT32_C(0x1) << 23)
+#define RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN (IMG_UINT32_C(0x1) << 24)
+#define RGXFWIF_INICFG_WORKEST (IMG_UINT32_C(0x1) << 25)
+#define RGXFWIF_INICFG_PDVFS (IMG_UINT32_C(0x1) << 26)
+#define RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT (27)
+#define RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND (RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN (RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define RGXFWIF_INICFG_CDM_ARBITRATION_MASK (IMG_UINT32_C(0x3) << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT)
+#define RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT (29)
+#define RGXFWIF_INICFG_ISPSCHEDMODE_NONE (0)
+#define RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP (RGXFWIF_ISP_SCHEDMODE_VER1_IPP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT)
+#define RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP (RGXFWIF_ISP_SCHEDMODE_VER2_ISP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT)
+#define RGXFWIF_INICFG_ISPSCHEDMODE_MASK (RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP |\
+ RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP)
+#define RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER (IMG_UINT32_C(0x1) << 31)
+#define RGXFWIF_INICFG_ALL (0xFFFFF3FFU)
+
+/* Extended Flag definitions affecting the firmware globally */
+#define RGXFWIF_INICFG_EXT_ALL (0x0U)
+
+#define RGXFWIF_INICFG_SYS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \
+ RGXFWIF_INICFG_CTXSWITCH_SRESET_EN)
+
+/* Flag definitions affecting only workloads submitted by a particular OS */
+
+/*!
+ * @AddToGroup ContextSwitching
+ * @{
+ * @Name Per-OS DM context switch configuration flags
+ * @{
+ */
+#define RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN (IMG_UINT32_C(0x1) << 0) /*!< Enables TDM context switch */
+#define RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN (IMG_UINT32_C(0x1) << 1) /*!< Enables GEOM-TA and GEOM-SHG context switch */
+#define RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN (IMG_UINT32_C(0x1) << 2) /*!< Enables FRAG DM context switch */
+#define RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN (IMG_UINT32_C(0x1) << 3) /*!< Enables CDM context switch */
+#define RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN (IMG_UINT32_C(0x1) << 4)
+
+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM (IMG_UINT32_C(0x1) << 5)
+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM (IMG_UINT32_C(0x1) << 6)
+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D (IMG_UINT32_C(0x1) << 7)
+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM (IMG_UINT32_C(0x1) << 8)
+#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_RDM (IMG_UINT32_C(0x1) << 9)
+
+#define RGXFWIF_INICFG_OS_ALL (0x3FFU)
+
+#define RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL (RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \
+ RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN | \
+ RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN | \
+ RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN | \
+ RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN)
+
+#define RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL)
+
+/*!
+ * @} End of Per-OS Context switch configuration flags
+ * @} End of AddToGroup ContextSwitching
+ */
+
+#define RGXFWIF_FILTCFG_TRUNCATE_HALF (IMG_UINT32_C(0x1) << 3)
+#define RGXFWIF_FILTCFG_TRUNCATE_INT (IMG_UINT32_C(0x1) << 2)
+#define RGXFWIF_FILTCFG_NEW_FILTER_MODE (IMG_UINT32_C(0x1) << 1)
+
+typedef enum
+{
+ RGX_ACTIVEPM_FORCE_OFF = 0,
+ RGX_ACTIVEPM_FORCE_ON = 1,
+ RGX_ACTIVEPM_DEFAULT = 2
+} RGX_ACTIVEPM_CONF;
+
+typedef enum
+{
+ RGX_RD_POWER_ISLAND_FORCE_OFF = 0,
+ RGX_RD_POWER_ISLAND_FORCE_ON = 1,
+ RGX_RD_POWER_ISLAND_DEFAULT = 2
+} RGX_RD_POWER_ISLAND_CONF;
+
+typedef struct
+{
+ IMG_UINT16 ui16RegNum; /*!< Register number */
+ IMG_UINT16 ui16IndirectRegNum; /*!< Indirect register number (or 0 if not used) */
+ IMG_UINT16 ui16IndirectStartVal; /*!< Start value for indirect register */
+ IMG_UINT16 ui16IndirectEndVal; /*!< End value for indirect register */
+} RGXFW_REGISTER_LIST;
+
+#if defined(RGX_FIRMWARE)
+typedef DLLIST_NODE RGXFWIF_DLLIST_NODE;
+#else
+typedef struct {RGXFWIF_DEV_VIRTADDR p;
+ RGXFWIF_DEV_VIRTADDR n;} RGXFWIF_DLLIST_NODE;
+#endif
+
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SIGBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TRACEBUF;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SYSDATA;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_OSDATA;
+#if defined(SUPPORT_TBI_INTERFACE)
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TBIBUF;
+#endif
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERFBUF;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRINFOBUF;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RUNTIME_CFG;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FWCB;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_REG_CFG;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERF_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_CNTBLK;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_RTN_SLOTS;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWCOMMONCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ZSBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CORE_CLK_RATE;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COUNTERBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FIRMWAREGCOVBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRTDATA;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TIMESTAMP_ADDR;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD;
+
+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY)
+/*!
+ * @Brief Buffer to store KM active client contexts
+ */
+typedef struct
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */
+} RGXFWIF_ACTIVE_CONTEXT_BUF_DATA;
+#endif
+
+/*!
+ * This number is used to represent an invalid page catalogue physical address
+ */
+#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU
+
+/*!
+ * This number is used to represent an unallocated set of page catalog base registers
+ */
+#define RGXFW_BIF_INVALID_PCSET 0xFFFFFFFFU
+
+/*!
+ * Firmware memory context.
+ */
+typedef struct
+{
+ IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */
+ IMG_UINT32 uiPageCatBaseRegSet; /*!< index of the associated set of page catalog base registers (RGXFW_BIF_INVALID_PCSET == unallocated) */
+ IMG_UINT32 uiBreakpointAddr; /*!< breakpoint address */
+ IMG_UINT32 uiBPHandlerAddr; /*!< breakpoint handler address */
+ IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */
+ IMG_UINT64 RGXFW_ALIGN ui64FBCStateIDMask; /*!< FBCDC state descriptor IDs (non-zero means defer on mem context activation) */
+ IMG_UINT64 RGXFW_ALIGN ui64SpillAddr;
+ IMG_UINT32 ui32FwMemCtxFlags; /*!< Compatibility and other flags */
+
+#if defined(SUPPORT_CUSTOM_OSID_EMISSION)
+ IMG_UINT32 ui32OSid;
+ IMG_BOOL bOSidAxiProt;
+#endif
+
+} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT;
+
+/*!
+ * FW context state flags
+ */
+#define RGXFWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U)
+#define RGXFWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000002U)
+#define RGXFWIF_CONTEXT_FLAGS_LAST_KICK_SECURE (0x00000200U)
+
+/*!
+ * @InGroup ContextSwitching
+ * @Brief Firmware GEOM/TA context suspend state (per GEOM core)
+ */
+typedef struct
+{
+ IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_CMD0;
+ IMG_UINT32 uTAReg_DCE_CMD1;
+ IMG_UINT32 uTAReg_DCE_WRITE;
+ IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_DRAW0;
+ IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_DRAW1;
+ IMG_UINT64 ui64EnabledUnitsMask;
+ IMG_UINT32 uTAReg_GTA_SO_PRIM[4];
+ IMG_UINT16 ui16TACurrentIdx;
+} UNCACHED_ALIGN RGXFWIF_TACTX_STATE_PER_GEOM;
+
+/*!
+ * @InGroup ContextSwitching
+ * @Brief Firmware GEOM/TA context suspend states for all GEOM cores
+ */
+typedef struct
+{
+ /*! FW-accessible TA state which must be written out to memory on context store */
+ RGXFWIF_TACTX_STATE_PER_GEOM asGeomCore[RGX_NUM_GEOM_CORES];
+} UNCACHED_ALIGN RGXFWIF_TACTX_STATE;
+
+/* The following defines need to be auto generated using the HW defines
+ * rather than hard coding it */
+#define RGXFWIF_ISP_PIPE_COUNT_MAX (48)
+#define RGXFWIF_PIPE_COUNT_PER_ISP (2)
+#define RGXFWIF_IPP_RESUME_REG_COUNT (1)
+
+#if !defined(__KERNEL__)
+#define RGXFWIF_ISP_COUNT (RGX_FEATURE_NUM_SPU * RGX_FEATURE_NUM_ISP_PER_SPU)
+#define RGXFWIF_ISP_PIPE_COUNT (RGXFWIF_ISP_COUNT * RGXFWIF_PIPE_COUNT_PER_ISP)
+#if RGXFWIF_ISP_PIPE_COUNT > RGXFWIF_ISP_PIPE_COUNT_MAX
+#error RGXFWIF_ISP_PIPE_COUNT should not be greater than RGXFWIF_ISP_PIPE_COUNT_MAX
+#endif
+#endif /* !defined(__KERNEL__) */
+
+/*!
+ * @InGroup ContextSwitching
+ * @Brief Firmware FRAG/3D context suspend state
+ */
+typedef struct
+{
+#if defined(PM_INTERACTIVE_MODE)
+ IMG_UINT32 RGXFW_ALIGN u3DReg_PM_DEALLOCATED_MASK_STATUS; /*!< Managed by PM HW in the non-interactive mode */
+#endif
+ IMG_UINT32 ui32CtxStateFlags; /*!< Compatibility and other flags */
+
+ /* FW-accessible ISP state which must be written out to memory on context store */
+ /* au3DReg_ISP_STORE should be the last element of the structure
+ * as this is an array whose size is determined at runtime
+ * after detecting the RGX core */
+ IMG_UINT64 RGXFW_ALIGN au3DReg_ISP_STORE[]; /*!< ISP state (per-pipe) */
+} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE;
+
+#define RGXFWIF_CTX_USING_BUFFER_A (0)
+#define RGXFWIF_CTX_USING_BUFFER_B (1U)
+
+typedef struct
+{
+ IMG_UINT32 ui32CtxStateFlags; /*!< Target buffer and other flags */
+ IMG_UINT64 ui64EnabledUnitsMask;
+} RGXFWIF_COMPUTECTX_STATE;
+
+#define RGXFWIF_CONTEXT_MISC_FLAGS_STATS_PENDING (1U << 0)
+#define RGXFWIF_CONTEXT_MISC_FLAGS_HAS_DEFER_COUNT (1U << 1)
+
+/*!
+ * @InGroup WorkloadContexts
+ * @Brief Firmware Common Context (or FWCC)
+ */
+typedef struct RGXFWIF_FWCOMMONCONTEXT_
+{
+ /* CCB details for this firmware context */
+ PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */
+ PRGXFWIF_CCCB psCCB; /*!< CCB base */
+ RGXFWIF_DMA_ADDR sCCBMetaDMAAddr;
+
+ /* Context suspend state */
+ PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */
+
+ /* Flags e.g. for context switching */
+ IMG_UINT32 ui32FWComCtxFlags;
+ IMG_INT32 i32Priority; /*!< Priority level */
+ IMG_UINT32 ui32PrioritySeqNum;
+
+ /* Framework state */
+ PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */
+
+ /* Misc and compatibility flags */
+ IMG_UINT32 ui32MiscFlags;
+
+ /* Statistic updates waiting to be passed back to the host... */
+ IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */
+ IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */
+ IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */
+ RGXFWIF_DM eDM; /*!< Data Master type */
+ IMG_UINT64 RGXFW_ALIGN ui64WaitSignalAddress; /*!< Device Virtual Address of the signal the context is waiting on */
+ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitSignalNode; /*!< List entry for the wait-signal list */
+ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */
+ IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */
+
+ IMG_UINT64 RGXFW_ALIGN ui64RobustnessAddress;
+ IMG_UINT32 ui32MaxDeadlineMS; /*!< Max HWR deadline limit in ms */
+ bool bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */
+
+ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */
+ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sRunNode; /*!< List entry for the run list */
+ RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */
+
+ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */
+
+ /* References to the host side originators */
+ IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */
+ IMG_UINT32 ui32PID; /*!< associated process ID */
+
+ IMG_BOOL bGeomOOMDisabled; /*!< True when Geom DM OOM is not allowed */
+
+ IMG_UINT32 ui32PipelinedKicks; /*!< Number of kick from this CCB currently submitted to the DM pipeline */
+ IMG_CHAR szProcName[RGXFW_PROCESS_NAME_LEN]; /*!< User process name */
+
+ IMG_UINT32 ui32DeferCount; /*!< Number of context defers before forced scheduling of context */
+} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT;
+
+static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) <= 256,
+ "Size of structure RGXFWIF_FWCOMMONCONTEXT exceeds maximum expected size.");
+
+typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_2D[RGX_TRP_MAX_NUM_CORES][2];
+typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_3D[RGX_TRP_MAX_NUM_CORES][4];
+typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_GEOM[RGX_TRP_MAX_NUM_CORES][4];
+
+/*!
+ * @InGroup WorkloadContexts
+ * @Brief Firmware render context.
+ */
+typedef struct
+{
+ RGXFWIF_FWCOMMONCONTEXT sTAContext; /*!< Firmware context for the TA */
+ RGXFWIF_FWCOMMONCONTEXT s3DContext; /*!< Firmware context for the 3D */
+
+ RGXFWIF_STATIC_RENDERCONTEXT_STATE sStaticRenderContextState;
+
+ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */
+ IMG_UINT32 ui32FwRenderCtxFlags; /*!< Compatibility and other flags */
+
+#if defined(SUPPORT_TRP)
+ RGXFWIF_TRP_CHECKSUM_3D aui64TRPChecksums3D; /*!< Used by Firmware to store checksums during 3D WRR */
+ RGXFWIF_TRP_CHECKSUM_GEOM aui64TRPChecksumsGeom; /*!< Used by Firmware to store checksums during TA WRR */
+ RGXFWIF_DM eTRPGeomCoreAffinity; /* !< Represent the DM affinity for pending 2nd TRP pass of GEOM otherwise points RGXFWIF_DM_MAX. */
+#endif
+} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT;
+
+/*!
+ Firmware compute context.
+*/
+typedef struct
+{
+ RGXFWIF_FWCOMMONCONTEXT sCDMContext; /*!< Firmware context for the CDM */
+
+ RGXFWIF_STATIC_COMPUTECONTEXT_STATE sStaticComputeContextState;
+
+ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */
+
+ IMG_UINT32 ui32ComputeCtxFlags; /*!< Compatibility and other flags */
+
+ IMG_UINT32 ui32WGPState;
+ IMG_UINT32 aui32WGPChecksum[RGX_WGP_MAX_NUM_CORES];
+} UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT;
+
+/*!
+ Firmware ray context.
+*/
+typedef struct
+{
+ RGXFWIF_FWCOMMONCONTEXT sRDMContext; /*!< Firmware context for the RDM */
+ RGXFWIF_STATIC_RAYCONTEXT_STATE sStaticRayContextState;
+ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */
+
+} UNCACHED_ALIGN RGXFWIF_FWRAYCONTEXT;
+
+/*!
+ * @InGroup WorkloadContexts
+ * @Brief Firmware TDM context.
+ */
+typedef struct
+{
+ RGXFWIF_FWCOMMONCONTEXT sTDMContext; /*!< Firmware context for the TDM */
+
+ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */
+#if defined(SUPPORT_TRP)
+ IMG_UINT32 ui32TRPState; /*!< Used by Firmware to track current state of a protected kick */
+ RGXFWIF_TRP_CHECKSUM_2D RGXFW_ALIGN aui64TRPChecksums2D; /*!< Used by Firmware to store checksums during TDM WRR */
+#endif
+
+} UNCACHED_ALIGN RGXFWIF_FWTDMCONTEXT;
+
+/*!
+ ******************************************************************************
+ * Defines for CMD_TYPE corruption detection and forward compatibility check
+ *****************************************************************************/
+
+/* CMD_TYPE 32bit contains:
+ * 31:16 Reserved for magic value to detect corruption (16 bits)
+ * 15 Reserved for RGX_CCB_TYPE_TASK (1 bit)
+ * 14:0 Bits available for CMD_TYPEs (15 bits) */
+
+
+/* Magic value to detect corruption */
+#define RGX_CMD_MAGIC_DWORD IMG_UINT32_C(0x2ABC)
+#define RGX_CMD_MAGIC_DWORD_MASK (0xFFFF0000U)
+#define RGX_CMD_MAGIC_DWORD_SHIFT (16U)
+#define RGX_CMD_MAGIC_DWORD_SHIFTED (RGX_CMD_MAGIC_DWORD << RGX_CMD_MAGIC_DWORD_SHIFT)
+
+/*!
+ * @InGroup KCCBTypes ClientCCBTypes
+ * @Brief Generic CCB control structure
+ */
+typedef struct
+{
+ volatile IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */
+ volatile IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */
+ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */
+ IMG_UINT32 ui32CmdSize; /*!< size of each command in bytes */
+} UNCACHED_ALIGN RGXFWIF_CCB_CTL;
+
+/*!
+ * @Defgroup KCCBTypes Kernel CCB data interface
+ * @Brief Types grouping data structures and defines used in realising the KCCB functionality
+ * @{
+ */
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PT (0x1U) /* MMU_CTRL_INVAL_PT_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800U) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (0x0) /* not used */
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U) /* indicates FW should interrupt the host */
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_MMUCACHE type command
+ */
+typedef struct
+{
+ IMG_UINT32 ui32CacheFlags;
+ RGXFWIF_DEV_VIRTADDR sMMUCacheSync;
+ IMG_UINT32 ui32MMUCacheSyncUpdateValue;
+} RGXFWIF_MMUCACHEDATA;
+
+#define RGXFWIF_BPDATA_FLAGS_ENABLE (1U << 0)
+#define RGXFWIF_BPDATA_FLAGS_WRITE (1U << 1)
+#define RGXFWIF_BPDATA_FLAGS_CTL (1U << 2)
+#define RGXFWIF_BPDATA_FLAGS_REGS (1U << 3)
+
+typedef struct
+{
+ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */
+ IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */
+ IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */
+ IMG_UINT32 ui32BPDM; /*!< Breakpoint control */
+ IMG_UINT32 ui32BPDataFlags;
+ IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */
+ IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */
+ IMG_UINT64 RGXFW_ALIGN ui64SpillAddr;
+ RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */
+} RGXFWIF_BPDATA;
+
+#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS (RGXFWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_KICK type command
+ */
+typedef struct
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */
+ IMG_UINT32 ui32CWoffUpdate; /*!< Client CCB woff update */
+ IMG_UINT32 ui32CWrapMaskUpdate; /*!< Client CCB wrap mask update after CCCB growth */
+ IMG_UINT32 ui32NumCleanupCtl; /*!< number of CleanupCtl pointers attached */
+ PRGXFWIF_CLEANUP_CTL apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ IMG_UINT32 ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */
+#endif
+} RGXFWIF_KCCB_CMD_KICK_DATA;
+
+/*!
+ * @Brief Command data for @Ref RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK type command
+ */
+typedef struct
+{
+ RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; /*!< GEOM DM kick command data */
+ RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; /*!< FRAG DM kick command data */
+} RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FORCE_UPDATE type command
+ */
+typedef struct
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */
+ IMG_UINT32 ui32CCBFenceOffset; /*!< Client CCB fence offset */
+} RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA;
+
+/*!
+ * @Brief Resource types supported by \ref RGXFWIF_KCCB_CMD_CLEANUP type command
+ */
+typedef enum
+{
+ RGXFWIF_CLEANUP_FWCOMMONCONTEXT, /*!< FW common context cleanup */
+ RGXFWIF_CLEANUP_HWRTDATA, /*!< FW HW RT data cleanup */
+ RGXFWIF_CLEANUP_FREELIST, /*!< FW freelist cleanup */
+ RGXFWIF_CLEANUP_ZSBUFFER, /*!< FW ZS Buffer cleanup */
+} RGXFWIF_CLEANUP_TYPE;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CLEANUP type command
+ */
+typedef struct
+{
+ RGXFWIF_CLEANUP_TYPE eCleanupType; /*!< Cleanup type */
+ union {
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< FW common context to cleanup */
+ PRGXFWIF_HWRTDATA psHWRTData; /*!< HW RT to cleanup */
+ PRGXFWIF_FREELIST psFreelist; /*!< Freelist to cleanup */
+ PRGXFWIF_ZSBUFFER psZSBuffer; /*!< ZS Buffer to cleanup */
+ } uCleanupData;
+} RGXFWIF_CLEANUP_REQUEST;
+
+/*!
+ * @Brief Type of power requests supported in \ref RGXFWIF_KCCB_CMD_POW type command
+ */
+typedef enum
+{
+ RGXFWIF_POW_OFF_REQ = 1, /*!< GPU power-off request */
+ RGXFWIF_POW_FORCED_IDLE_REQ, /*!< Force-idle related request */
+ RGXFWIF_POW_NUM_UNITS_CHANGE, /*!< Request to change default powered scalable units */
+ RGXFWIF_POW_APM_LATENCY_CHANGE /*!< Request to change the APM latency period */
+} RGXFWIF_POWER_TYPE;
+
+/*!
+ * @Brief Supported force-idle related requests with \ref RGXFWIF_POW_FORCED_IDLE_REQ type request
+ */
+typedef enum
+{
+ RGXFWIF_POWER_FORCE_IDLE = 1, /*!< Request to force-idle GPU */
+ RGXFWIF_POWER_CANCEL_FORCED_IDLE, /*!< Request to cancel a previously successful force-idle transition */
+ RGXFWIF_POWER_HOST_TIMEOUT, /*!< Notification that host timed-out waiting for force-idle state */
+} RGXFWIF_POWER_FORCE_IDLE_TYPE;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_POW type command
+ */
+typedef struct
+{
+ RGXFWIF_POWER_TYPE ePowType; /*!< Type of power request */
+ union
+ {
+ struct
+ {
+ IMG_UINT32 ui32PowUnitsStateMask; /*!< New power units state mask */
+ IMG_UINT32 ui32RACStateMask; /*!< New RAC state mask */
+ };
+ IMG_BOOL bForced; /*!< If the operation is mandatory */
+ RGXFWIF_POWER_FORCE_IDLE_TYPE ePowRequestType; /*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */
+ } uPowerReqData;
+} RGXFWIF_POWER_REQUEST;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_SLCFLUSHINVAL type command
+ */
+typedef struct
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */
+ IMG_BOOL bInval; /*!< Invalidate the cache as well as flushing */
+ IMG_BOOL bDMContext; /*!< The data to flush/invalidate belongs to a specific DM context */
+ IMG_UINT64 RGXFW_ALIGN ui64Address; /*!< Optional address of range (only useful when bDMContext == FALSE) */
+ IMG_UINT64 RGXFW_ALIGN ui64Size; /*!< Optional size of range (only useful when bDMContext == FALSE) */
+} RGXFWIF_SLCFLUSHINVALDATA;
+
+typedef enum
+{
+ RGXFWIF_HWPERF_CTRL_TOGGLE = 0,
+ RGXFWIF_HWPERF_CTRL_SET = 1,
+ RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2
+} RGXFWIF_HWPERF_UPDATE_CONFIG;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG type command
+ */
+typedef struct
+{
+ RGXFWIF_HWPERF_UPDATE_CONFIG eOpCode; /*!< Control operation code */
+ IMG_UINT64 RGXFW_ALIGN ui64Mask; /*!< Mask of events to toggle */
+} RGXFWIF_HWPERF_CTRL;
+
+typedef enum
+{
+ RGXFWIF_HWPERF_CNTR_NOOP = 0, /* No-Op */
+ RGXFWIF_HWPERF_CNTR_ENABLE = 1, /* Enable Counters */
+ RGXFWIF_HWPERF_CNTR_DISABLE = 2 /* Disable Counters */
+} RGXFWIF_HWPERF_CNTR_CONFIG;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS type command
+ */
+typedef struct
+{
+ IMG_UINT32 ui32CtrlWord;
+ IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */
+ PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */
+} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE type command
+ */
+typedef struct
+{
+ IMG_UINT32 ui32NewClockSpeed; /*!< New clock speed */
+} RGXFWIF_CORECLKSPEEDCHANGE_DATA;
+
+#define RGXFWIF_HWPERF_CTRL_BLKS_MAX 16
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS type command
+ */
+typedef struct
+{
+ bool bEnable;
+ IMG_UINT32 ui32NumBlocks; /*!< Number of block IDs in the array */
+ IMG_UINT16 aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX]; /*!< Array of RGX_HWPERF_CNTBLK_ID values */
+} RGXFWIF_HWPERF_CTRL_BLKS;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE & \ref RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE type commands
+ */
+typedef struct
+{
+ RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; /*!< ZS-Buffer FW address */
+ IMG_BOOL bDone; /*!< action backing/unbacking succeeded */
+} RGXFWIF_ZSBUFFER_BACKING_DATA;
+
+#if defined(SUPPORT_VALIDATION)
+typedef struct
+{
+ IMG_UINT32 ui32RegWidth;
+ IMG_BOOL bWriteOp;
+ IMG_UINT32 ui32RegAddr;
+ IMG_UINT64 RGXFW_ALIGN ui64RegVal;
+} RGXFWIF_RGXREG_DATA;
+
+typedef struct
+{
+ IMG_UINT64 ui64BaseAddress;
+ PRGXFWIF_FWCOMMONCONTEXT psContext;
+ IMG_UINT32 ui32Size;
+} RGXFWIF_GPUMAP_DATA;
+#endif
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE type command
+ */
+typedef struct
+{
+ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; /*!< Freelist FW address */
+ IMG_UINT32 ui32DeltaPages; /*!< Amount of the Freelist change */
+ IMG_UINT32 ui32NewPages; /*!< New amount of pages on the freelist (including ready pages) */
+ IMG_UINT32 ui32ReadyPages; /*!< Number of ready pages to be held in reserve until OOM */
+} RGXFWIF_FREELIST_GS_DATA;
+
+#define RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS * 2U)
+#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE type command
+ */
+typedef struct
+{
+ IMG_UINT32 ui32FreelistsCount;
+ IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT];
+} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE type command
+ */
+typedef struct
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to that may need to be resumed following write offset update */
+} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA;
+
+/*!
+ ******************************************************************************
+ * Proactive DVFS Structures
+ *****************************************************************************/
+#define NUM_OPP_VALUES 16
+
+typedef struct
+{
+ IMG_UINT32 ui32Volt; /* V */
+ IMG_UINT32 ui32Freq; /* Hz */
+} UNCACHED_ALIGN PDVFS_OPP;
+
+typedef struct
+{
+ PDVFS_OPP asOPPValues[NUM_OPP_VALUES];
+#if defined(DEBUG)
+ IMG_UINT32 ui32MinOPPPoint;
+#endif
+ IMG_UINT32 ui32MaxOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP;
+
+typedef struct
+{
+ IMG_UINT32 ui32MaxOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA;
+
+typedef struct
+{
+ IMG_UINT32 ui32MinOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_MIN_FREQ_DATA;
+
+/*!
+ ******************************************************************************
+ * Register configuration structures
+ *****************************************************************************/
+
+#define RGXFWIF_REG_CFG_MAX_SIZE 512
+
+typedef enum
+{
+ RGXFWIF_REGCFG_CMD_ADD = 101,
+ RGXFWIF_REGCFG_CMD_CLEAR = 102,
+ RGXFWIF_REGCFG_CMD_ENABLE = 103,
+ RGXFWIF_REGCFG_CMD_DISABLE = 104
+} RGXFWIF_REGDATA_CMD_TYPE;
+
+typedef IMG_UINT32 RGXFWIF_REG_CFG_TYPE;
+#define RGXFWIF_REG_CFG_TYPE_PWR_ON 0U /* Sidekick power event */
+#define RGXFWIF_REG_CFG_TYPE_DUST_CHANGE 1U /* Rascal / dust power event */
+#define RGXFWIF_REG_CFG_TYPE_TA 2U /* TA kick */
+#define RGXFWIF_REG_CFG_TYPE_3D 3U /* 3D kick */
+#define RGXFWIF_REG_CFG_TYPE_CDM 4U /* Compute kick */
+#define RGXFWIF_REG_CFG_TYPE_TDM 5U /* TDM kick */
+#define RGXFWIF_REG_CFG_TYPE_ALL 6U /* Applies to all types. Keep as last element */
+
+typedef struct
+{
+ IMG_UINT64 ui64Addr;
+ IMG_UINT64 ui64Mask;
+ IMG_UINT64 ui64Value;
+} RGXFWIF_REG_CFG_REC;
+
+typedef struct
+{
+ RGXFWIF_REGDATA_CMD_TYPE eCmdType;
+ RGXFWIF_REG_CFG_TYPE eRegConfigType;
+ RGXFWIF_REG_CFG_REC RGXFW_ALIGN sRegConfig;
+
+} RGXFWIF_REGCONFIG_DATA;
+
+typedef struct
+{
+ /**
+ * PDump WRW command write granularity is 32 bits.
+ * Add padding to ensure array size is 32 bit granular.
+ */
+ IMG_UINT8 RGXFW_ALIGN aui8NumRegsType[PVR_ALIGN(RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))];
+ RGXFWIF_REG_CFG_REC RGXFW_ALIGN asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE];
+} UNCACHED_ALIGN RGXFWIF_REG_CFG;
+
+typedef enum
+{
+ RGXFWIF_OS_ONLINE = 1,
+ RGXFWIF_OS_OFFLINE
+} RGXFWIF_OS_STATE_CHANGE;
+
+/*!
+ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE type command
+ */
+typedef struct
+{
+ IMG_UINT32 ui32DriverID;
+ RGXFWIF_OS_STATE_CHANGE eNewOSState;
+} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA;
+
+typedef enum
+{
+ RGXFWIF_PWR_COUNTER_DUMP_START = 1,
+ RGXFWIF_PWR_COUNTER_DUMP_STOP,
+ RGXFWIF_PWR_COUNTER_DUMP_SAMPLE,
+} RGXFWIF_COUNTER_DUMP_REQUEST;
+
+typedef struct
+{
+ RGXFWIF_COUNTER_DUMP_REQUEST eCounterDumpRequest;
+} RGXFW_ALIGN RGXFWIF_COUNTER_DUMP_DATA;
+
+/*!
+ * @Brief List of command types supported by the Kernel CCB
+ */
+typedef enum
+{
+ /* Common commands */
+ RGXFWIF_KCCB_CMD_KICK = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< DM workload kick command */
+ RGXFWIF_KCCB_CMD_MMUCACHE = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< MMU cache invalidation request */
+ RGXFWIF_KCCB_CMD_BP = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */
+ RGXFWIF_KCCB_CMD_CLEANUP = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */
+ RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */
+ RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */
+ RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */
+ RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */
+ RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */
+ RGXFWIF_KCCB_CMD_HEALTH_CHECK = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */
+ RGXFWIF_KCCB_CMD_FORCE_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */
+ RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 113U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */
+ RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. */
+
+ /* Commands only permitted to the native or host OS */
+ RGXFWIF_KCCB_CMD_POW = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request (type specified in the command data) */
+ RGXFWIF_KCCB_CMD_REGCONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 202U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */
+ RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */
+ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE = 205U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the priority/group for a particular OSID. It can only be serviced for the Host DDK */
+ RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */
+ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */
+ RGXFWIF_KCCB_CMD_PHR_CFG = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */
+#if defined(SUPPORT_VALIDATION)
+ RGXFWIF_KCCB_CMD_RGXREG = 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Read RGX Register from FW */
+#endif
+ RGXFWIF_KCCB_CMD_WDG_CFG = 210U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */
+ RGXFWIF_KCCB_CMD_COUNTER_DUMP = 211U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */
+#if defined(SUPPORT_VALIDATION)
+ RGXFWIF_KCCB_CMD_GPUMAP = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Request a FW GPU mapping which is written into by the FW with a pattern */
+#endif
+
+ /* HWPerf commands */
+ RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 300U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */
+ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 301U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */
+ RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 302U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */
+ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT = 303U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks during the init process */
+
+} RGXFWIF_KCCB_CMD_TYPE;
+
+#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_POW - 1)
+
+/*! @Brief Kernel CCB command packet */
+typedef struct
+{
+ RGXFWIF_KCCB_CMD_TYPE eCmdType; /*!< Command type */
+ IMG_UINT32 ui32KCCBFlags; /*!< Compatibility and other flags */
+
+ /* NOTE: Make sure that uCmdData is the last member of this struct
+ * This is to calculate actual command size for device mem copy.
+ * (Refer RGXGetCmdMemCopySize())
+ * */
+ union
+ {
+ RGXFWIF_KCCB_CMD_KICK_DATA sCmdKickData; /*!< Data for Kick command */
+ RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA sCombinedTA3DCmdKickData; /*!< Data for combined TA/3D Kick command */
+ RGXFWIF_MMUCACHEDATA sMMUCacheData; /*!< Data for MMU cache command */
+ RGXFWIF_BPDATA sBPData; /*!< Data for Breakpoint Commands */
+ RGXFWIF_SLCFLUSHINVALDATA sSLCFlushInvalData; /*!< Data for SLC Flush/Inval commands */
+ RGXFWIF_CLEANUP_REQUEST sCleanupData; /*!< Data for cleanup commands */
+ RGXFWIF_POWER_REQUEST sPowData; /*!< Data for power request commands */
+ RGXFWIF_HWPERF_CTRL sHWPerfCtrl; /*!< Data for HWPerf control command */
+ RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS sHWPerfCfgEnableBlks; /*!< Data for HWPerf configure, clear and enable performance counter block command */
+ RGXFWIF_HWPERF_CTRL_BLKS sHWPerfCtrlBlks; /*!< Data for HWPerf enable or disable performance counter block commands */
+ RGXFWIF_CORECLKSPEEDCHANGE_DATA sCoreClkSpeedChangeData;/*!< Data for core clock speed change */
+ RGXFWIF_ZSBUFFER_BACKING_DATA sZSBufferBackingData; /*!< Feedback for Z/S Buffer backing/unbacking */
+ RGXFWIF_FREELIST_GS_DATA sFreeListGSData; /*!< Feedback for Freelist grow/shrink */
+ RGXFWIF_FREELISTS_RECONSTRUCTION_DATA sFreeListsReconstructionData; /*!< Feedback for Freelists reconstruction */
+ RGXFWIF_REGCONFIG_DATA sRegConfigData; /*!< Data for custom register configuration */
+ RGXFWIF_WRITE_OFFSET_UPDATE_DATA sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */
+#if defined(SUPPORT_PDVFS)
+ RGXFWIF_PDVFS_MAX_FREQ_DATA sPDVFSMaxFreqData;
+ RGXFWIF_PDVFS_MIN_FREQ_DATA sPDVFSMinFreqData; /*!< Data for setting the min frequency/OPP */
+#endif
+ RGXFWIF_OS_STATE_CHANGE_DATA sCmdOSOnlineStateData; /*!< Data for updating the Guest Online states */
+ RGXFWIF_DEV_VIRTADDR sTBIBuffer; /*!< Dev address for TBI buffer allocated on demand */
+ RGXFWIF_COUNTER_DUMP_DATA sCounterDumpConfigData; /*!< Data for dumping of register ranges */
+ RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA sForceUpdateData; /*!< Data for signalling all unmet fences for a given CCB */
+#if defined(SUPPORT_VALIDATION)
+ RGXFWIF_RGXREG_DATA sFwRgxData; /*!< Data for reading off an RGX register */
+ RGXFWIF_GPUMAP_DATA sGPUMapData; /*!< Data for requesting a FW GPU mapping which is written into by the FW with a pattern */
+#endif
+ } UNCACHED_ALIGN uCmdData;
+} UNCACHED_ALIGN RGXFWIF_KCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD);
+
+/*! @} End of KCCBTypes */
+
+/*!
+ * @Defgroup FWCCBTypes Firmware CCB data interface
+ * @Brief Types grouping data structures and defines used in realising the Firmware CCB functionality
+ * @{
+ */
+
+/*!
+ ******************************************************************************
+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING and the
+ * \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING Firmware CCB commands
+ *****************************************************************************/
+typedef struct
+{
+ IMG_UINT32 ui32ZSBufferID; /*!< ZS buffer ID */
+} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA;
+
+/*!
+ ******************************************************************************
+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELIST_GROW Firmware CCB
+ * command
+ *****************************************************************************/
+typedef struct
+{
+ IMG_UINT32 ui32FreelistID; /*!< Freelist ID */
+} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA;
+
+/*!
+ ******************************************************************************
+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION
+ * Firmware CCB command
+ *****************************************************************************/
+typedef struct
+{
+ IMG_UINT32 ui32FreelistsCount; /*!< Freelists count */
+ IMG_UINT32 ui32HwrCounter; /*!< HWR counter */
+ IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; /*!< Array of freelist IDs to reconstruct */
+} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA;
+
+#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF (1U<<0) /*!< 1 if a page fault happened */
+#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS (1U<<1) /*!< 1 if applicable to all contexts */
+
+/*!
+ ******************************************************************************
+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION
+ * Firmware CCB command
+ *****************************************************************************/
+typedef struct
+{
+ IMG_UINT32 ui32ServerCommonContextID; /*!< Context affected by the reset */
+ RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reason for reset */
+ RGXFWIF_DM eDM; /*!< Data Master affected by the reset */
+ IMG_UINT32 ui32ResetJobRef; /*!< Job ref running at the time of reset */
+ IMG_UINT32 ui32Flags; /*!< RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */
+ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< At what page catalog address */
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sFaultAddress; /*!< Page fault address (only when applicable) */
+} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA;
+
+/*!
+ ******************************************************************************
+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION
+ * Firmware CCB command
+ *****************************************************************************/
+typedef struct
+{
+ IMG_DEV_VIRTADDR sFWFaultAddr; /*!< Page fault address */
+} RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA;
+
+/*!
+ ******************************************************************************
+ * List of command types supported by the Firmware CCB
+ *****************************************************************************/
+typedef enum
+{
+ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be backed with physical pages
+ \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */
+ RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be unbacked
+ \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */
+ RGXFWIF_FWCCB_CMD_FREELIST_GROW = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand freelist grow
+ \n Command data: RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA */
+ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests freelists reconstruction
+ \n Command data: RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA */
+ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a HWR event on a context
+ \n Command data: RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA */
+ RGXFWIF_FWCCB_CMD_DEBUG_DUMP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand debug dump
+ \n Command data: None */
+ RGXFWIF_FWCCB_CMD_UPDATE_STATS = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand update on process stats
+ \n Command data: RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA */
+#if defined(SUPPORT_PDVFS)
+ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+#endif
+ RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests GPU restart
+ \n Command data: None */
+#if defined(SUPPORT_VALIDATION)
+ RGXFWIF_FWCCB_CMD_REG_READ = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+#if defined(SUPPORT_SOC_TIMER)
+ RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED,
+#endif
+#endif
+ RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a FW pagefault
+ \n Command data: RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA */
+} RGXFWIF_FWCCB_CMD_TYPE;
+
+/*!
+ ******************************************************************************
+ * List of the various stats of the process to update/increment
+ *****************************************************************************/
+typedef enum
+{
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTDMStores stat */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE;
+
+/*!
+ ******************************************************************************
+ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_UPDATE_STATS Firmware CCB
+ * command
+ *****************************************************************************/
+typedef struct
+{
+ RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE eElementToUpdate; /*!< Element to update */
+ IMG_PID pidOwner; /*!< The pid of the process whose stats are being updated */
+ IMG_INT32 i32AdjustmentValue; /*!< Adjustment to be made to the statistic */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA;
+
+typedef struct
+{
+ IMG_UINT32 ui32CoreClkRate;
+} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA;
+
+#if defined(SUPPORT_VALIDATION)
+typedef struct
+{
+ IMG_UINT64 ui64RegValue;
+} RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA;
+
+#if defined(SUPPORT_SOC_TIMER)
+typedef struct
+{
+ IMG_UINT64 ui64timerGray;
+ IMG_UINT64 ui64timerBinary;
+ IMG_UINT64 aui64uscTimers[RGX_FEATURE_NUM_CLUSTERS];
+} RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA;
+#endif
+#endif
+
+/*!
+ ******************************************************************************
+ * @Brief Firmware CCB command structure
+ *****************************************************************************/
+typedef struct
+{
+ RGXFWIF_FWCCB_CMD_TYPE eCmdType; /*!< Command type */
+ IMG_UINT32 ui32FWCCBFlags; /*!< Compatibility and other flags */
+
+ union
+ {
+ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA sCmdZSBufferBacking; /*!< Data for Z/S-Buffer on-demand (un)backing*/
+ RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA sCmdFreeListGS; /*!< Data for on-demand freelist grow/shrink */
+ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA sCmdFreeListsReconstruction; /*!< Data for freelists reconstruction */
+ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA sCmdContextResetNotification; /*!< Data for context reset notification */
+ RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */
+ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange;
+ RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA sCmdFWPagefault; /*!< Data for context reset notification */
+#if defined(SUPPORT_VALIDATION)
+ RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA sCmdRgxRegReadData;
+#if defined(SUPPORT_SOC_TIMER)
+ RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA sCmdTimers;
+#endif
+#endif
+ } RGXFW_ALIGN uCmdData;
+} RGXFW_ALIGN RGXFWIF_FWCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD);
+
+/*! @} End of FWCCBTypes */
+
+/*!
+ ******************************************************************************
+ * Workload estimation Firmware CCB command structure for RGX
+ *****************************************************************************/
+typedef struct
+{
+ IMG_UINT16 ui16ReturnDataIndex; /*!< Index for return data array */
+ IMG_UINT32 ui32CyclesTaken; /*!< The cycles the workload took on the hardware */
+} RGXFWIF_WORKEST_FWCCB_CMD;
+
+/*!
+ * @Defgroup ClientCCBTypes Client CCB data interface
+ * @Brief Types grouping data structures and defines used in realising Client CCB commands/functionality
+ * @{
+ */
+
+/* Required memory alignment for 64-bit variables accessible by Meta
+ (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared
+ between the host and meta that contains 64-bit variables has to maintain
+ this alignment) */
+#define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64)
+
+#define RGX_CCB_TYPE_TASK (IMG_UINT32_C(1) << 15)
+#define RGX_CCB_FWALLOC_ALIGN(size) PVR_ALIGN(size, RGXFWIF_FWALLOC_ALIGN)
+
+typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE;
+
+/*!
+ * @Name Client CCB command types
+ * @{
+ */
+#define RGXFWIF_CCB_CMD_TYPE_GEOM (201U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TA DM command */
+#define RGXFWIF_CCB_CMD_TYPE_TQ_3D (202U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for TQ operation */
+#define RGXFWIF_CCB_CMD_TYPE_3D (203U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command */
+#define RGXFWIF_CCB_CMD_TYPE_3D_PR (204U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for Partial render */
+#define RGXFWIF_CCB_CMD_TYPE_CDM (205U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< Compute DM command */
+#define RGXFWIF_CCB_CMD_TYPE_TQ_TDM (206U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TDM command */
+#define RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE (207U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_TQ_2D (208U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 2D DM command for TQ operation */
+#define RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP (209U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_NULL (210U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_ABORT (211U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+
+/* Leave a gap between CCB specific commands and generic commands */
+#define RGXFWIF_CCB_CMD_TYPE_FENCE (212U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a command */
+#define RGXFWIF_CCB_CMD_TYPE_UPDATE (213U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates of a command */
+#define RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE (214U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates related to workload resources */
+#define RGXFWIF_CCB_CMD_TYPE_FENCE_PR (215U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a PR command */
+#define RGXFWIF_CCB_CMD_TYPE_PRIORITY (216U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Context priority update command */
+/* Pre and Post timestamp commands are supposed to sandwich the DM cmd. The
+ padding code with the CCB wrap upsets the FW if we don't have the task type
+ bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+*/
+#define RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP (217U | RGX_CMD_MAGIC_DWORD_SHIFTED)
+#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE (218U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates of a command */
+#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE (219U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates related to workload resources */
+
+#if defined(SUPPORT_VALIDATION)
+#define RGXFWIF_CCB_CMD_TYPE_REG_READ (220U | RGX_CMD_MAGIC_DWORD_SHIFTED)
+#endif
+
+#define RGXFWIF_CCB_CMD_TYPE_PADDING (221U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Skip without action type command */
+#define RGXFWIF_CCB_CMD_TYPE_RAY (222U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK)
+#define RGXFWIF_CCB_CMD_TYPE_VK_TIMESTAMP (223U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< Process a vulkan timestamp */
+/*! @} End of Client CCB command types */
+
+
+#define RGXFWIF_TRP_STATUS_UNKNOWN 0x000U
+#define RGXFWIF_TRP_STATUS_CHECKSUMS_OK 0x001U
+#define RGXFWIF_TRP_STATUS_CHECKSUMS_ERROR 0x002U
+
+#define RGXFWIF_CR_TRP_SIGNATURE_STATUS (RGX_CR_SCRATCH10)
+
+
+typedef struct
+{
+ /* Index for the KM Workload estimation return data array */
+ IMG_UINT16 RGXFW_ALIGN ui16ReturnDataIndex;
+ /* Predicted time taken to do the work in cycles */
+ IMG_UINT32 RGXFW_ALIGN ui32CyclesPrediction;
+ /* Deadline for the workload (in usecs) */
+ IMG_UINT64 RGXFW_ALIGN ui64Deadline;
+} RGXFWIF_WORKEST_KICK_DATA;
+
+/*! @Brief Command header of a command in the client CCB buffer.
+ *
+ * Followed by this header is the command-data specific to the
+ * command-type as specified in the header.
+ */
+typedef struct
+{
+ RGXFWIF_CCB_CMD_TYPE eCmdType; /*!< Command data type following this command header */
+ IMG_UINT32 ui32CmdSize; /*!< Size of the command following this header */
+ IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */
+ IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ RGXFWIF_WORKEST_KICK_DATA RGXFW_ALIGN sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */
+#endif
+} RGXFWIF_CCB_CMD_HEADER;
+
+/*
+ ******************************************************************************
+ * Client CCB commands which are only required by the kernel
+ *****************************************************************************/
+
+/*! @Brief Command data for \ref RGXFWIF_CCB_CMD_TYPE_PRIORITY type client CCB command */
+typedef struct
+{
+ IMG_INT32 i32Priority; /*!< Priority level */
+} RGXFWIF_CMD_PRIORITY;
+
+/*! @} End of ClientCCBTypes */
+
+/*!
+ ******************************************************************************
+ * Signature and Checksums Buffer
+ *****************************************************************************/
+typedef struct
+{
+ PRGXFWIF_SIGBUFFER sBuffer; /*!< Ptr to Signature Buffer memory */
+ IMG_UINT32 ui32LeftSizeInRegs; /*!< Amount of space left for storing regs in the buffer */
+} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL;
+
+typedef struct
+{
+ PRGXFWIF_COUNTERBUFFER sBuffer; /*!< Ptr to counter dump buffer */
+ IMG_UINT32 ui32SizeInDwords; /*!< Amount of space for storing in the buffer */
+} UNCACHED_ALIGN RGXFWIF_COUNTER_DUMP_CTL;
+
+typedef struct
+{
+ PRGXFWIF_FIRMWAREGCOVBUFFER sBuffer; /*!< Ptr to firmware gcov buffer */
+ IMG_UINT32 ui32Size; /*!< Amount of space for storing in the buffer */
+} UNCACHED_ALIGN RGXFWIF_FIRMWARE_GCOV_CTL;
+
+/*!
+ *****************************************************************************
+ * RGX Compatibility checks
+ *****************************************************************************/
+
+/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC is a subject of change,
+ following define should be increased by 1 to indicate to compatibility logic,
+ that layout has changed */
+#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 3
+
+typedef struct
+{
+ IMG_UINT32 ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */
+ IMG_UINT64 RGXFW_ALIGN ui64BVNC;
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC;
+
+typedef struct
+{
+ IMG_UINT8 ui8OsCountSupport;
+} UNCACHED_ALIGN RGXFWIF_INIT_OPTIONS;
+
+#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \
+ RGXFWIF_COMPCHECKS_BVNC (name) = { \
+ RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \
+ 0, \
+ }
+#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \
+ do { \
+ (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \
+ (name).ui64BVNC = 0; \
+ } while (0)
+
+typedef struct
+{
+ RGXFWIF_COMPCHECKS_BVNC sHWBVNC; /*!< hardware BVNC (from the RGX registers) */
+ RGXFWIF_COMPCHECKS_BVNC sFWBVNC; /*!< firmware BVNC */
+ IMG_UINT32 ui32FWProcessorVersion; /*!< identifier of the FW processor version */
+ IMG_UINT32 ui32DDKVersion; /*!< software DDK version */
+ IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */
+ IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */
+ RGXFWIF_INIT_OPTIONS sInitOptions; /*!< initialisation options bit-field */
+ IMG_BOOL bUpdated; /*!< Information is valid */
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS;
+
+/*! @Brief Firmware Runtime configuration data \ref RGXFWIF_RUNTIME_CFG
+ * allocated by services and used by the Firmware on boot
+ **/
+typedef struct
+{
+ IMG_UINT32 ui32ActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */
+ IMG_UINT32 ui32RuntimeCfgFlags; /*!< Compatibility and other flags */
+ IMG_BOOL bActivePMLatencyPersistant; /*!< If set, APM latency does not reset to system default each GPU power transition */
+ IMG_UINT32 ui32CoreClockSpeed; /*!< Core clock speed, currently only used to calculate timer ticks */
+ IMG_UINT32 ui32PowUnitsStateMask; /*!< Power Unit state mask set by the host */
+ IMG_UINT32 ui32RACStateMask; /*!< RAC state mask set by the host */
+ IMG_UINT32 ui32PHRMode; /*!< Periodic Hardware Reset configuration values */
+ IMG_UINT32 ui32HCSDeadlineMS; /*!< New number of milliseconds C/S is allowed to last */
+ IMG_UINT32 ui32WdgPeriodUs; /*!< The watchdog period in microseconds */
+ IMG_UINT32 aui32DriverPriority[RGXFW_MAX_NUM_OSIDS]; /*!< Array of priorities per OS */
+ IMG_UINT32 aui32DriverIsolationGroup[RGXFW_MAX_NUM_OSIDS]; /*!< Array of isolation groups per OS */
+
+ PRGXFWIF_HWPERFBUF sHWPerfBuf; /*!< On-demand allocated HWPerf buffer address, to be passed to the FW */
+ RGXFWIF_DMA_ADDR sHWPerfDMABuf;
+ RGXFWIF_DMA_ADDR sHWPerfCtlDMABuf;
+#if defined(SUPPORT_VALIDATION)
+ IMG_BOOL bInjectFWFault; /*!< Injecting firmware fault to validate recovery through Host */
+#endif
+} RGXFWIF_RUNTIME_CFG;
+
+/*!
+ *****************************************************************************
+ * Control data for RGX
+ *****************************************************************************/
+
+#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999U)
+
+#if defined(PDUMP)
+
+#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32U
+
+typedef enum
+{
+ RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT,
+ RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT
+} RGXFWIF_PID_FILTER_MODE;
+
+typedef struct
+{
+ IMG_PID uiPID;
+ IMG_UINT32 ui32DriverID;
+} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM;
+
+typedef struct
+{
+ RGXFWIF_PID_FILTER_MODE eMode;
+ /* each process in the filter list is specified by a PID and OS ID pair.
+ * each PID and OS pair is an item in the items array (asItems).
+ * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries
+ * then it must be terminated by an item with pid of zero.
+ */
+ RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS];
+} RGXFW_ALIGN RGXFWIF_PID_FILTER;
+#endif
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA (0x1U << 0)
+#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE (0x1U << 1)
+#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE (0x1U << 2)
+#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE (0x1U << 3)
+#endif
+
+typedef enum
+{
+ RGXFWIF_USRM_DM_VDM = 0,
+ RGXFWIF_USRM_DM_DDM = 1,
+ RGXFWIF_USRM_DM_CDM = 2,
+ RGXFWIF_USRM_DM_PDM = 3,
+ RGXFWIF_USRM_DM_TDM = 4,
+ RGXFWIF_USRM_DM_LAST
+} RGXFWIF_USRM_DM;
+
+typedef enum
+{
+ RGXFWIF_UVBRM_DM_VDM = 0,
+ RGXFWIF_UVBRM_DM_DDM = 1,
+ RGXFWIF_UVBRM_DM_LAST
+} RGXFWIF_UVBRM_DM;
+
+typedef enum
+{
+ RGXFWIF_TPU_DM_PDM = 0,
+ RGXFWIF_TPU_DM_VDM = 1,
+ RGXFWIF_TPU_DM_CDM = 2,
+ RGXFWIF_TPU_DM_TDM = 3,
+ RGXFWIF_TPU_DM_RDM = 4,
+ RGXFWIF_TPU_DM_LAST
+} RGXFWIF_TPU_DM;
+
+typedef enum
+{
+ RGXFWIF_GPIO_VAL_OFF = 0, /*!< No GPIO validation */
+ RGXFWIF_GPIO_VAL_GENERAL = 1, /*!< Simple test case that
+ initiates by sending data via the
+ GPIO and then sends back any data
+ received over the GPIO */
+ RGXFWIF_GPIO_VAL_AP = 2, /*!< More complex test case that writes
+ and reads data across the entire
+ GPIO AP address range.*/
+#if defined(SUPPORT_STRIP_RENDERING)
+ RGXFWIF_GPIO_VAL_SR_BASIC = 3, /*!< Strip Rendering AP based basic test.*/
+ RGXFWIF_GPIO_VAL_SR_COMPLEX = 4, /*!< Strip Rendering AP based complex test.*/
+#endif
+ RGXFWIF_GPIO_VAL_TESTBENCH = 5, /*!< Validates the GPIO Testbench. */
+ RGXFWIF_GPIO_VAL_LOOPBACK = 6, /*!< Send and then receive each byte
+ in the range 0-255. */
+ RGXFWIF_GPIO_VAL_LOOPBACK_LITE = 7, /*!< Send and then receive each power-of-2
+ byte in the range 0-255. */
+ RGXFWIF_GPIO_VAL_LAST
+} RGXFWIF_GPIO_VAL_MODE;
+
+typedef enum
+{
+ FW_PERF_CONF_NONE = 0,
+ FW_PERF_CONF_ICACHE = 1,
+ FW_PERF_CONF_DCACHE = 2,
+ FW_PERF_CONF_JTLB_INSTR = 5,
+ FW_PERF_CONF_INSTRUCTIONS = 6
+} FW_PERF_CONF;
+
+typedef enum
+{
+ FW_BOOT_STAGE_TLB_INIT_FAILURE = -2,
+ FW_BOOT_STAGE_NOT_AVAILABLE = -1,
+ FW_BOOT_NOT_STARTED = 0,
+ FW_BOOT_BLDR_STARTED = 1,
+ FW_BOOT_CACHE_DONE,
+ FW_BOOT_TLB_DONE,
+ FW_BOOT_MAIN_STARTED,
+ FW_BOOT_ALIGNCHECKS_DONE,
+ FW_BOOT_INIT_DONE,
+} FW_BOOT_STAGE;
+
+/*!
+ * @AddToGroup KCCBTypes
+ * @{
+ * @Name Kernel CCB return slot responses
+ * @{
+ * Usage of bit-fields instead of bare integers
+ * allows FW to possibly pack-in several responses for each single kCCB command.
+ */
+
+#define RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED (1U << 0) /*!< Command executed (return status from FW) */
+#define RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY (1U << 1) /*!< A cleanup was requested but resource busy */
+#define RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE (1U << 2) /*!< Poll failed in FW for a HW operation to complete */
+
+#define RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U /*!< Reset value of a kCCB return slot (set by host) */
+/*!
+ * @} End of Name Kernel CCB return slot responses
+ * @} End of AddToGroup KCCBTypes
+ */
+
+/*! @Brief OS connection data \ref RGXFWIF_CONNECTION_CTL allocated
+ * by services and used to track OS state in Firmware and Services
+ **/
+typedef struct
+{
+ /* Fw-Os connection states */
+ volatile RGXFWIF_CONNECTION_FW_STATE eConnectionFwState; /*!< Firmware-OS connection state */
+ volatile RGXFWIF_CONNECTION_OS_STATE eConnectionOsState; /*!< Services-OS connection state */
+ volatile IMG_UINT32 ui32AliveFwToken; /*!< OS Alive token updated by Firmware */
+ volatile IMG_UINT32 ui32AliveOsToken; /*!< OS Alive token updated by Services */
+} UNCACHED_ALIGN RGXFWIF_CONNECTION_CTL;
+
+/*! @Brief Firmware OS Initialization data \ref RGXFWIF_OSINIT
+ * allocated by services and used by the Firmware on boot
+ **/
+typedef struct
+{
+ /* Kernel CCB */
+ PRGXFWIF_CCB_CTL psKernelCCBCtl; /*!< Kernel CCB Control */
+ PRGXFWIF_CCB psKernelCCB; /*!< Kernel CCB */
+ PRGXFWIF_CCB_RTN_SLOTS psKernelCCBRtnSlots; /*!< Kernel CCB return slots */
+
+ /* Firmware CCB */
+ PRGXFWIF_CCB_CTL psFirmwareCCBCtl; /*!< Firmware CCB control */
+ PRGXFWIF_CCB psFirmwareCCB; /*!< Firmware CCB */
+
+ /* Workload Estimation Firmware CCB */
+ PRGXFWIF_CCB_CTL psWorkEstFirmwareCCBCtl; /*!< Workload estimation control */
+ PRGXFWIF_CCB psWorkEstFirmwareCCB; /*!< Workload estimation buffer */
+
+ PRGXFWIF_HWRINFOBUF sRGXFWIfHWRInfoBufCtl; /*!< HWRecoveryInfo control */
+
+ IMG_UINT32 ui32HWRDebugDumpLimit; /*!< Firmware debug dump maximum limit */
+
+ PRGXFWIF_OSDATA sFwOsData; /*!< Firmware per-os shared data */
+
+ RGXFWIF_COMPCHECKS sRGXCompChecks; /*!< Compatibility checks to be populated by the Firmware */
+
+} UNCACHED_ALIGN RGXFWIF_OSINIT;
+
+/*! @Brief Firmware System Initialization data \ref RGXFWIF_SYSINIT
+ * allocated by services and used by the Firmware on boot
+ **/
+typedef struct
+{
+ IMG_DEV_PHYADDR RGXFW_ALIGN sFaultPhysAddr; /*!< Fault read address */
+
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSExecBase; /*!< PDS execution base */
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sUSCExecBase; /*!< USC execution base */
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCStateTableBase; /*!< FBCDC bindless texture state table base */
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCLargeStateTableBase;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sTextureHeapBase; /*!< Texture state base */
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSIndirectHeapBase; /* Pixel Indirect State base */
+
+ IMG_UINT64 RGXFW_ALIGN ui64HWPerfFilter; /*! Event filter for Firmware events */
+
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sSLC3FenceDevVAddr; /*!< Address to use as a fence when issuing SLC3_CFI */
+
+ IMG_UINT64 RGXFW_ALIGN aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_LAST];
+ IMG_UINT32 RGXFW_ALIGN aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST];
+ IMG_UINT32 RGXFW_ALIGN aui32USRMNumRegions[RGXFWIF_USRM_DM_LAST];
+
+ IMG_UINT32 ui32FilterFlags;
+
+ RGXFWIF_SIGBUF_CTL asSigBufCtl[RGXFWIF_DM_MAX]; /*!< Signature and Checksum Buffers for DMs */
+#if defined(SUPPORT_VALIDATION)
+ RGXFWIF_SIGBUF_CTL asValidationSigBufCtl[RGXFWIF_DM_MAX];
+ IMG_UINT64 RGXFW_ALIGN ui64RCEDisableMask;
+ IMG_UINT32 RGXFW_ALIGN ui32PCGPktDropThresh;
+ IMG_UINT32 RGXFW_ALIGN ui32RaySLCMMUAutoCacheOps;
+#endif
+
+ PRGXFWIF_RUNTIME_CFG sRuntimeCfg; /*!< Firmware Runtime configuration */
+
+ PRGXFWIF_TRACEBUF sTraceBufCtl; /*!< Firmware Trace buffer control */
+ PRGXFWIF_SYSDATA sFwSysData; /*!< Firmware System shared data */
+#if defined(SUPPORT_TBI_INTERFACE)
+ PRGXFWIF_TBIBUF sTBIBuf; /*!< Tbi log buffer */
+#endif
+
+ PRGXFWIF_GPU_UTIL_FWCB sGpuUtilFWCbCtl; /*!< GPU utilization buffer */
+ PRGXFWIF_REG_CFG sRegCfg; /*!< Firmware register user configuration */
+ PRGXFWIF_HWPERF_CTL sHWPerfCtl; /*!< HWPerf counter block configuration.*/
+
+ RGXFWIF_COUNTER_DUMP_CTL sCounterDumpCtl;
+
+#if defined(SUPPORT_FIRMWARE_GCOV)
+ RGXFWIF_FIRMWARE_GCOV_CTL sFirmwareGcovCtl; /*!< Firmware gcov buffer control */
+#endif
+
+ RGXFWIF_DEV_VIRTADDR sAlignChecks; /*!< Array holding Server structures alignment data */
+
+ IMG_UINT32 ui32InitialCoreClockSpeed; /*!< Core clock speed at FW boot time */
+
+ IMG_UINT32 ui32InitialActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */
+
+ IMG_BOOL bFirmwareStarted; /*!< Flag to be set by the Firmware after successful start */
+
+ IMG_UINT32 ui32MarkerVal; /*!< Host/FW Trace synchronisation Partition Marker */
+
+ IMG_UINT32 ui32FirmwareStartedTimeStamp; /*!< Firmware initialization complete time */
+
+ IMG_UINT32 ui32JonesDisableMask;
+
+ RGXFWIF_DMA_ADDR sCorememDataStore; /*!< Firmware coremem data */
+
+ FW_PERF_CONF eFirmwarePerf; /*!< Firmware performance counter config */
+
+#if defined(SUPPORT_PDVFS)
+ RGXFWIF_PDVFS_OPP RGXFW_ALIGN sPDVFSOPPInfo;
+
+ /**
+ * FW Pointer to memory containing core clock rate in Hz.
+ * Firmware (PDVFS) updates the memory when running on non primary FW thread
+ * to communicate to host driver.
+ */
+ PRGXFWIF_CORE_CLK_RATE RGXFW_ALIGN sCoreClockRate;
+#endif
+
+#if defined(PDUMP)
+ RGXFWIF_PID_FILTER sPIDFilter;
+#endif
+
+ RGXFWIF_GPIO_VAL_MODE eGPIOValidationMode;
+
+ RGX_HWPERF_BVNC sBvncKmFeatureFlags; /*!< Used in HWPerf for decoding BVNC Features*/
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+ IMG_UINT32 ui32SecurityTestFlags;
+ RGXFWIF_DEV_VIRTADDR pbSecureBuffer;
+ RGXFWIF_DEV_VIRTADDR pbNonSecureBuffer;
+#endif
+
+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY)
+ RGXFWIF_DEV_VIRTADDR sActiveContextBufBase; /*!< Active context buffer base */
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS)
+ /*
+ * Used when validation is enabled to allow the host to check
+ * that MTS sent the correct sideband in response to a kick
+ * from a given OSes schedule register.
+ * Testing is enabled if RGXFWIF_KICK_TEST_ENABLED_BIT is set
+ *
+ * Set by the host to:
+ * (osid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT
+ * reset to 0 by FW when kicked by the given OSid
+ */
+ IMG_UINT32 ui32OSKickTest;
+#endif
+
+#if defined(SUPPORT_AUTOVZ)
+ IMG_UINT32 ui32VzWdgPeriod;
+#endif
+
+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY)
+ /* notify firmware power-up on host-side recovery */
+ IMG_BOOL bFwHostRecoveryMode;
+#endif
+
+#if defined(SUPPORT_SECURE_CONTEXT_SWITCH)
+ RGXFWIF_DEV_VIRTADDR pbFwScratchBuf;
+#endif
+} UNCACHED_ALIGN RGXFWIF_SYSINIT;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS)
+#define RGXFWIF_KICK_TEST_ENABLED_BIT 0x1
+#define RGXFWIF_KICK_TEST_OSID_SHIFT 0x1
+#endif
+
+/*!
+ *****************************************************************************
+ * Timer correlation shared data and defines
+ *****************************************************************************/
+
+typedef struct
+{
+ IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp;
+ IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp;
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp;
+
+ /* Utility variable used to convert CR timer deltas to OS timer deltas (nS),
+ * where the deltas are relative to the timestamps above:
+ * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */
+ IMG_UINT64 RGXFW_ALIGN ui64CRDeltaToOSDeltaKNs;
+
+ IMG_UINT32 ui32CoreClockSpeed;
+ IMG_UINT32 ui32Reserved;
+} UNCACHED_ALIGN RGXFWIF_TIME_CORR;
+
+
+/* The following macros are used to help converting FW timestamps to the Host
+ * time domain. On the FW the RGX_CR_TIMER counter is used to keep track of
+ * time; it increments by 1 every 256 GPU clock ticks, so the general
+ * formula to perform the conversion is:
+ *
+ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS,
+ * otherwise if (scale == 10^6) then deltaOS is in uS ]
+ *
+ * deltaCR * 256 256 * scale
+ * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ]
+ * GPUclockspeed GPUclockspeed
+ *
+ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20)
+ * to get some better accuracy and to avoid returning 0 in the integer
+ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz.
+ * This is the same as keeping K as a decimal number.
+ *
+ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies
+ * (deltaCR * K is more or less a constant), and it's relative to the base
+ * OS timestamp sampled as a part of the timer correlation data.
+ * This base is refreshed on GPU power-on, DVFS transition and periodic
+ * frequency calibration (executed every few seconds if the FW is doing
+ * some work), so as long as the GPU is doing something and one of these
+ * events is triggered then deltaCR * K will not overflow and deltaOS will be
+ * correct.
+ */
+
+#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20)
+
+#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \
+ (((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT)
+
+
+/*!
+ ******************************************************************************
+ * GPU Utilisation
+ *****************************************************************************/
+
+/* See rgx_common.h for a list of GPU states */
+#define RGXFWIF_GPU_UTIL_TIME_MASK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK)
+
+#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK)
+#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK)
+
+/* The OS timestamps computed by the FW are approximations of the real time,
+ * which means they could be slightly behind or ahead the real timer on the Host.
+ * In some cases we can perform subtractions between FW approximated
+ * timestamps and real OS timestamps, so we need a form of protection against
+ * negative results if for instance the FW one is a bit ahead of time.
+ */
+#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \
+ (((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U)
+
+#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \
+ (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state))
+
+
+/* The timer correlation array must be big enough to ensure old entries won't be
+ * overwritten before all the HWPerf events linked to those entries are processed
+ * by the MISR. The update frequency of this array depends on how fast the system
+ * can change state (basically how small the APM latency is) and perform DVFS transitions.
+ *
+ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading
+ * an entry while the Host is updating it. With 2 entries in the worst case the FW
+ * will read old data, which is still quite ok if the Host is updating the timer
+ * correlation at that time.
+ */
+#define RGXFWIF_TIME_CORR_ARRAY_SIZE 256U
+#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount) ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE)
+
+/* Make sure the timer correlation array size is a power of 2 */
+static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U,
+ "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two");
+
+typedef struct
+{
+ RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE];
+ IMG_UINT32 ui32TimeCorrSeqCount;
+
+ /* Compatibility and other flags */
+ IMG_UINT32 ui32GpuUtilFlags;
+
+ /* Last GPU state + OS time of the last state update */
+ IMG_UINT64 RGXFW_ALIGN ui64GpuLastWord;
+ /* Counters for the amount of time the GPU was active/idle/blocked */
+ IMG_UINT64 RGXFW_ALIGN aui64GpuStatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM];
+
+ /* Last GPU DM per-OS states + OS time of the last state update */
+ IMG_UINT64 RGXFW_ALIGN aaui64DMOSLastWord[RGXFWIF_DM_MAX][RGXFW_MAX_NUM_OSIDS];
+ /* Counters for the amount of time the GPU DMs were active/idle/blocked */
+ IMG_UINT64 RGXFW_ALIGN aaaui64DMOSStatsCounters[RGXFWIF_DM_MAX][RGXFW_MAX_NUM_OSIDS][RGXFWIF_GPU_UTIL_STATE_NUM];
+} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB;
+
+typedef struct
+{
+ IMG_UINT32 ui32RenderTargetIndex; //Render number
+ IMG_UINT32 ui32CurrentRenderTarget; //index in RTA
+ IMG_UINT32 ui32ActiveRenderTargets; //total active RTs
+ RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices
+ RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders; //Array of number of occurred partial renders per render target
+ IMG_UINT32 ui32MaxRTs; //Number of render targets in the array
+ IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_RTA_CTL;
+
+/*!
+ * @InGroup RenderTarget
+ * @Brief Firmware Freelist holding usage state of the Parameter Buffers
+ */
+typedef struct
+{
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListBaseDevVAddr; /*!< Freelist page table base address */
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListStateDevVAddr; /*!< Freelist state buffer base address */
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListLastGrowDevVAddr; /*!< Freelist base address at last grow */
+
+#if defined(PM_INTERACTIVE_MODE)
+ IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr;/*!< Freelist page table entry for current free page */
+ IMG_UINT32 ui32CurrentStackTop; /*!< Freelist current free page */
+#endif
+
+ IMG_UINT32 ui32MaxPages; /*!< Max no. of pages can be added to the freelist */
+ IMG_UINT32 ui32GrowPages; /*!< No pages to add in each freelist grow */
+ IMG_UINT32 ui32CurrentPages; /*!< Total no. of pages made available to the PM HW */
+#if defined(PM_INTERACTIVE_MODE)
+ IMG_UINT32 ui32AllocatedPageCount; /*!< No. of pages allocated by PM HW */
+ IMG_UINT32 ui32AllocatedMMUPageCount; /*!< No. of pages allocated for GPU MMU for PM*/
+#endif
+#if defined(SUPPORT_SHADOW_FREELISTS)
+ IMG_UINT32 ui32HWRCounter;
+ PRGXFWIF_FWMEMCONTEXT psFWMemContext;
+#endif
+ IMG_UINT32 ui32FreeListID; /*!< Unique Freelist ID */
+ IMG_BOOL bGrowPending; /*!< Freelist grow is pending */
+ IMG_UINT32 ui32ReadyPages; /*!< Reserved pages to be used only on PM OOM event */
+ IMG_UINT32 ui32FreelistFlags; /*!< Compatibility and other flags */
+
+ IMG_BOOL bUpdatePending;
+ IMG_UINT32 ui32UpdateNewPages;
+ IMG_UINT32 ui32UpdateNewReadyPages;
+} UNCACHED_ALIGN RGXFWIF_FREELIST;
+
+/*!
+ ******************************************************************************
+ * HWRTData
+ *****************************************************************************/
+
+/* HWRTData flags */
+/* Deprecated flags 1:0 */
+#define HWRTDATA_HAS_LAST_TA (1UL << 2)
+#define HWRTDATA_PARTIAL_RENDERED (1UL << 3)
+#define HWRTDATA_KILLED (1UL << 4)
+#define HWRTDATA_KILL_AFTER_TARESTART (1UL << 5)
+#if defined(SUPPORT_AGP)
+#define HWRTDATA_GLOBAL_PB_NUMBER_BIT0 (1UL << 6)
+#if defined(SUPPORT_AGP4)
+#define HWRTDATA_GLOBAL_PB_NUMBER_BIT1 (1UL << 7)
+#endif
+#define HWRTDATA_GEOM_NEEDS_RESUME (1UL << 8)
+#endif
+
+typedef enum
+{
+ RGXFWIF_RTDATA_STATE_NONE = 0,
+ RGXFWIF_RTDATA_STATE_KICKTA,
+ RGXFWIF_RTDATA_STATE_KICKTAFIRST,
+ RGXFWIF_RTDATA_STATE_TAFINISHED,
+ RGXFWIF_RTDATA_STATE_KICK3D,
+ RGXFWIF_RTDATA_STATE_3DFINISHED,
+ RGXFWIF_RTDATA_STATE_3DCONTEXTSTORED,
+ RGXFWIF_RTDATA_STATE_TAOUTOFMEM,
+ RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED,
+ /* In case of HWR, we can't set the RTDATA state to NONE,
+ * as this will cause any TA to become a first TA.
+ * To ensure all related TA's are skipped, we use the HWR state */
+ RGXFWIF_RTDATA_STATE_HWR,
+ RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU
+} RGXFWIF_RTDATA_STATE;
+
+typedef struct
+{
+ IMG_UINT32 ui32ScreenPixelMax;
+ IMG_UINT64 RGXFW_ALIGN ui64PPPMultiSampleCtl;
+ IMG_UINT32 ui32TEStride;
+ IMG_UINT32 ui32TPCSize;
+ IMG_UINT32 ui32TEScreen;
+ IMG_UINT32 ui32TEAA;
+ IMG_UINT32 ui32TEMTILE1;
+ IMG_UINT32 ui32TEMTILE2;
+ IMG_UINT32 ui32RgnStride;
+ IMG_UINT32 ui32ISPMergeLowerX;
+ IMG_UINT32 ui32ISPMergeLowerY;
+ IMG_UINT32 ui32ISPMergeUpperX;
+ IMG_UINT32 ui32ISPMergeUpperY;
+ IMG_UINT32 ui32ISPMergeScaleX;
+ IMG_UINT32 ui32ISPMergeScaleY;
+} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON;
+
+/*!
+ * @InGroup RenderTarget
+ * @Brief Firmware Render Target data i.e. HWRTDATA used to hold the PM context
+ */
+typedef struct
+{
+ IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[2]; /*!< VCE Page Catalogue base */
+ IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[2];
+ IMG_UINT64 RGXFW_ALIGN ui64TECatBase[2]; /*!< TE Page Catalogue base */
+ IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[2];
+ IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; /*!< Alist Page Catalogue base */
+ IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase;
+
+#if defined(PM_INTERACTIVE_MODE)
+ IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap table base */
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sPMMListDevVAddr; /*!< Mlist table base */
+#else
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sPMRenderStateDevVAddr; /*!< Series8 PM State buffers */
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sPMSecureRenderStateDevVAddr;
+#endif
+
+ PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; /*!< Freelist to use */
+ IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS];
+ IMG_BOOL bRenderStateNeedsReset;
+
+ RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; /*!< Render target dimension dependent data */
+
+ IMG_UINT32 ui32HWRTDataFlags;
+ RGXFWIF_RTDATA_STATE eState; /*!< Current workload processing state of HWRTDATA */
+
+ RGXFWIF_RTA_CTL sRTACtl; /*!< Render target array data */
+
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; /*!< Tail pointers base */
+#if defined(RGX_FIRMWARE)
+ struct RGXFWIF_FWCOMMONCONTEXT_* psOwnerGeom;
+#else
+ RGXFWIF_DEV_VIRTADDR pui32OwnerGeomNotUsedByHost;
+#endif
+
+#if defined(PM_INTERACTIVE_MODE)
+ IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer; /*!< Freelist page table entry for current Mlist page */
+ IMG_UINT32 ui32PMMListStackPointer; /*!< Current Mlist page */
+#endif
+#if defined(SUPPORT_TRP)
+ IMG_UINT32 ui32KickFlagsCopy;
+ IMG_UINT32 ui32TRPState; /*!< Used by Firmware to track current state of a protected kick */
+#endif
+
+ RGXFWIF_CLEANUP_CTL RGXFW_ALIGN_DCACHEL sCleanupState; /*!< Render target clean up state */
+} RGXFW_ALIGN_DCACHEL RGXFWIF_HWRTDATA;
+
+/* Sync_checkpoint firmware object.
+ * This is the FW-addressable structure use to hold the sync checkpoint's
+ * state and other information which needs to be accessed by the firmware.
+ */
+typedef struct
+{
+ IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */
+ IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */
+} SYNC_CHECKPOINT_FW_OBJ;
+
+/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */
+#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0)
+
+#define RGXFW_SCRATCH_BUF_SIZE (32768U)
+
+#define RGX_NUM_CORES (8U)
+
+#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET
+#define RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U
+
+#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES)
+#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES 2048U
+
+#define RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES)
+#define RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_MAX_SIZE_BYTES 8192U
+
+#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_MAX_SIZE_BYTES)
+#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_MAX_SIZE_BYTES (0x00004000U * RGX_NUM_CORES + 48U + 1023U)
+
+#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_MAX_SIZE_BYTES)
+#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_MAX_SIZE_BYTES RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_MAX_SIZE_BYTES
+
+#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_OFFSET_BYTES (RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_OFFSET_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_MAX_SIZE_BYTES)
+#define RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_MAX_SIZE_BYTES (((0x00000080U + 127U) * RGX_NUM_CORES) + 127U)
+
+#define RGXFWIF_KM_GENERAL_HEAP_TOTAL_BYTES PVR_ALIGN((RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_MAX_SIZE_BYTES + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_MAX_SIZE_BYTES), PAGE_SIZE)
+
+#define RGXFWIF_TDM_SECURE_QUEUE_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_TDM_SECURE_QUEUE_OFFSET_BYTES)
+#define RGXFWIF_CDM_SECURE_QUEUE_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_QUEUE_OFFSET_BYTES)
+#define RGXFWIF_3D_SECURE_IPP_BUF_VADDR (RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_3D_SECURE_IPP_BUF_OFFSET_BYTES)
+#define RGXFWIF_CDM_SECURE_SR_BUF_VADDR PVR_ALIGN((RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_BUF_OFFSET_BYTES), 1024ULL)
+#define RGXFWIF_CDM_SECURE_SR_B_BUF_VADDR PVR_ALIGN((RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_SR_B_BUF_OFFSET_BYTES), 1024ULL)
+#define RGXFWIF_CDM_SECURE_CONTEXT_STATE_VADDR PVR_ALIGN((RGX_GENERAL_HEAP_BASE + RGXFWIF_KM_GENERAL_HEAP_CDM_SECURE_CONTEXT_OFFSET_BYTES), 128ULL)
+
+/*!
+ ******************************************************************************
+ * Virtualisation and Security
+ *****************************************************************************/
+#define FW_OSID (0U)
+#define MMU_CONTEXT_MAPPING_FWPRIV (0U) /* FW code/private data */
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#if (RGX_NUM_DRIVERS_SUPPORTED == 1)
+/* native with security support: keep reverse compatibility with existing scheme */
+#define DRIVER_OSID_START_OFFSET (FW_OSID+2)
+#elif (RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_DEDICATED_OSID)
+/* OSIDs 0 and 1 reserved for Firmware */
+#define DRIVER_OSID_START_OFFSET (FW_OSID+2)
+#else
+/* OSIDs 1 reserved for Firmware */
+#define DRIVER_OSID_START_OFFSET (FW_OSID+1)
+#endif
+
+#else
+/* Firmware and Host driver share the same OSID */
+#define DRIVER_OSID_START_OFFSET (FW_OSID)
+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
+
+#if (RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_DEDICATED_OSID)
+/* Firmware heaps reside in a dedicated non-secure IPA space. */
+#define FW_HEAP_OSID (FW_OSID+1)
+#elif (RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_HOST_OSID)
+/* Firmware heaps reside in the Host driver's non-secure IPA space. */
+#define FW_HEAP_OSID (DRIVER_OSID_START_OFFSET)
+#elif (RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_FIRMWARE_OSID)
+/* Firmware heaps reside in the IPA space as the Firmware. */
+#define FW_HEAP_OSID (FW_OSID)
+#else
+#error "RGX_FW_HEAP_OSID_ASSIGNMENT not configured correctly."
+#endif
+
+#if (RGX_FW_HEAP_OSID_ASSIGNMENT == RGX_FW_HEAP_USES_FIRMWARE_OSID)
+/* The Firmware accesses its private code & data and the interface
+ * memory it shares with the KM drivers using the same MMU context */
+#define MMU_CONTEXT_MAPPING_FWIF MMU_CONTEXT_MAPPING_FWPRIV
+#else
+/* The Firmware accesses the interface memory it shares
+ * with the KM drivers using a reserved MMU context */
+#define MMU_CONTEXT_MAPPING_FWIF (7U)
+#endif
+
+#if (RGX_NUM_DRIVERS_SUPPORTED > 1) && defined(SUPPORT_TRUSTED_DEVICE)
+/* virtualization with security support */
+#define DRIVER_ID(osid) (osid - DRIVER_OSID_START_OFFSET)
+#define OSID(did) (did + DRIVER_OSID_START_OFFSET)
+#define OSID_SECURE(did) (RGXFW_MAX_NUM_OSIDS - 1 - \
+ (did==0 ? (0) : \
+ (did==1 ? (DRIVER0_SECURITY_SUPPORT) : \
+ (did==2 ? (DRIVER0_SECURITY_SUPPORT + DRIVER1_SECURITY_SUPPORT) : \
+ (did==3 ? (DRIVER0_SECURITY_SUPPORT + DRIVER1_SECURITY_SUPPORT + DRIVER2_SECURITY_SUPPORT) : \
+ (did==4 ? (DRIVER0_SECURITY_SUPPORT + DRIVER1_SECURITY_SUPPORT + DRIVER2_SECURITY_SUPPORT + DRIVER3_SECURITY_SUPPORT) : \
+ (did==5 ? (DRIVER0_SECURITY_SUPPORT + DRIVER1_SECURITY_SUPPORT + DRIVER2_SECURITY_SUPPORT + DRIVER3_SECURITY_SUPPORT + DRIVER4_SECURITY_SUPPORT) : \
+ (did==6 ? (DRIVER0_SECURITY_SUPPORT + DRIVER1_SECURITY_SUPPORT + DRIVER2_SECURITY_SUPPORT + DRIVER3_SECURITY_SUPPORT + DRIVER4_SECURITY_SUPPORT + DRIVER5_SECURITY_SUPPORT) : \
+ (DRIVER0_SECURITY_SUPPORT + DRIVER1_SECURITY_SUPPORT + DRIVER2_SECURITY_SUPPORT + DRIVER3_SECURITY_SUPPORT + DRIVER4_SECURITY_SUPPORT + DRIVER5_SECURITY_SUPPORT + DRIVER6_SECURITY_SUPPORT)))))))))
+
+static_assert((RGX_NUM_DRIVERS_SUPPORTED + DRIVER_OSID_START_OFFSET +
+ DRIVER0_SECURITY_SUPPORT + DRIVER1_SECURITY_SUPPORT + DRIVER2_SECURITY_SUPPORT + DRIVER3_SECURITY_SUPPORT +
+ DRIVER4_SECURITY_SUPPORT + DRIVER5_SECURITY_SUPPORT + DRIVER6_SECURITY_SUPPORT + DRIVER7_SECURITY_SUPPORT) <= RGXFW_MAX_NUM_OSIDS,
+ "The GPU hardware is not equipped with enough hardware OSIDs to satisfy the requirements.");
+
+#elif (RGX_NUM_DRIVERS_SUPPORTED > 1)
+/* virtualization without security support */
+#define DRIVER_ID(osid) (osid - DRIVER_OSID_START_OFFSET)
+#define OSID(did) (did + DRIVER_OSID_START_OFFSET)
+
+#elif defined(SUPPORT_TRUSTED_DEVICE)
+/* native with security support: keep reverse compatibility with existing scheme */
+#define DRIVER_ID(osid) (0U)
+#define OSID_SECURE(did) (1U)
+#define OSID(did) (2U)
+#else
+/* native without security support */
+#define DRIVER_ID(osid) (0U)
+#define OSID(did) (did)
+#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) && defined(SUPPORT_TRUSTED_DEVICE) */
+
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
+
+#define FOREACH_SUPPORTED_DRIVER(did) for ((did)=RGXFW_HOST_DRIVER_ID; (did) < RGX_NUM_DRIVERS_SUPPORTED; (did)++)
+
+#if defined(__KERNEL__)
+/* Driver implementation */
+#define FOREACH_ACTIVE_DRIVER(devinfo, did) FOREACH_SUPPORTED_DRIVER(did) \
+ { \
+ if (devinfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[did].bfOsState != RGXFW_CONNECTION_FW_ACTIVE) continue;
+
+#define END_FOREACH_ACTIVE_DRIVER }
+
+#else
+/* Firmware implementation */
+#define FOREACH_ACTIVE_DRIVER(did) do { \
+ unsigned int idx; \
+ for ((idx)=RGXFW_HOST_DRIVER_ID, (did)=gsRGXFWCtl.aui32ActiveDrivers[0U]; \
+ (idx) < RGXFW_NUM_ACTIVE_DRIVERS; \
+ ++(idx), (did)=gsRGXFWCtl.aui32ActiveDrivers[(idx)]) {
+
+#define END_FOREACH_ACTIVE_DRIVER }} while (false);
+#endif /* defined(__KERNEL__) */
+
+
+#else
+#define FOREACH_SUPPORTED_DRIVER(did) for ((did)=RGXFW_HOST_DRIVER_ID; (did) <= RGXFW_HOST_DRIVER_ID; (did)++)
+
+#define FOREACH_ACTIVE_DRIVER(did) FOREACH_SUPPORTED_DRIVER(did)
+#define END_FOREACH_ACTIVE_DRIVER
+
+#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */
+
+#define FOREACH_VALIDATION_OSID(osid) for ((osid)=0; (osid) < GPUVIRT_VALIDATION_NUM_OS; (osid)++)
+#define FOREACH_HW_OSID(osid) for ((osid)=0; (osid) < RGXFW_MAX_NUM_OSIDS; (osid)++)
+#define FOREACH_DRIVER_RAW_HEAP(did) for ((did)=RGX_FIRST_RAW_HEAP_DRIVER_ID; (did) < RGX_NUM_DRIVERS_SUPPORTED; (did)++)
+
+#endif /* RGX_FWIF_KM_H */
+
+/******************************************************************************
+ End of file (rgx_fwif_km.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX firmware interface structures
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX firmware interface structures shared by both host client
+ and host server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_FWIF_SHARED_H)
+#define RGX_FWIF_SHARED_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "rgx_common.h"
+#include "powervr/mem_types.h"
+#include "devicemem_typedefs.h"
+
+/* Indicates the number of RTDATAs per RTDATASET */
+#if defined(SUPPORT_AGP)
+#if defined(SUPPORT_AGP4)
+#define RGXMKIF_NUM_RTDATAS 4U
+#define RGXMKIF_NUM_GEOMDATAS 4U
+#define RGXMKIF_NUM_RTDATA_FREELISTS 20U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */
+#define RGX_NUM_GEOM_CORES (4U)
+#else
+#define RGXMKIF_NUM_RTDATAS 4U
+#define RGXMKIF_NUM_GEOMDATAS 4U
+#define RGXMKIF_NUM_RTDATA_FREELISTS 12U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */
+#define RGX_NUM_GEOM_CORES (2U)
+#endif
+#else
+#define RGXMKIF_NUM_RTDATAS 2U
+#define RGXMKIF_NUM_GEOMDATAS 1U
+#define RGXMKIF_NUM_RTDATA_FREELISTS 2U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */
+#define RGX_NUM_GEOM_CORES (1U)
+#endif
+
+/* Maximum number of UFOs in a CCB command.
+ * The number is based on having 32 sync prims (as originally), plus 32 sync
+ * checkpoints.
+ * Once the use of sync prims is no longer supported, we will retain
+ * the same total (64) as the number of sync checkpoints which may be
+ * supporting a fence is not visible to the client driver and has to
+ * allow for the number of different timelines involved in fence merges.
+ */
+#define RGXFWIF_CCB_CMD_MAX_UFOS (32U+32U)
+
+/*
+ * This is a generic limit imposed on any DM (TA,3D,CDM,TDM,2D,TRANSFER)
+ * command passed through the bridge.
+ * Just across the bridge in the server, any incoming kick command size is
+ * checked against this maximum limit.
+ * In case the incoming command size is larger than the specified limit,
+ * the bridge call is retired with error.
+ */
+#define RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U)
+
+typedef struct RGXFWIF_DEV_VIRTADDR_
+{
+ IMG_UINT32 ui32Addr;
+} RGXFWIF_DEV_VIRTADDR;
+
+typedef struct
+{
+ IMG_DEV_VIRTADDR RGXFW_ALIGN psDevVirtAddr;
+ RGXFWIF_DEV_VIRTADDR pbyFWAddr;
+} UNCACHED_ALIGN RGXFWIF_DMA_ADDR;
+
+typedef IMG_UINT8 RGXFWIF_CCCB;
+
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_UFO_ADDR;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CLEANUP_CTL;
+
+
+/*!
+ * @InGroup ClientCCBTypes
+ * @Brief Command data for fence & update types Client CCB commands.
+ */
+typedef struct
+{
+ PRGXFWIF_UFO_ADDR puiAddrUFO; /*!< Address to be checked/updated */
+ IMG_UINT32 ui32Value; /*!< Value to check-against/update-to */
+} RGXFWIF_UFO;
+
+/*!
+ * @InGroup RenderTarget
+ * @Brief Track pending and completed workloads of HWRTDATA and ZSBUFFER
+ */
+typedef struct
+{
+ IMG_UINT32 ui32SubmittedCommands; /*!< Number of commands received by the FW */
+ IMG_UINT32 ui32ExecutedCommands; /*!< Number of commands executed by the FW */
+} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL;
+
+#define RGXFWIF_PRBUFFER_START IMG_UINT32_C(0)
+#define RGXFWIF_PRBUFFER_ZSBUFFER IMG_UINT32_C(0)
+#define RGXFWIF_PRBUFFER_MSAABUFFER IMG_UINT32_C(1)
+#define RGXFWIF_PRBUFFER_MAXSUPPORTED IMG_UINT32_C(2)
+
+typedef IMG_UINT32 RGXFWIF_PRBUFFER_TYPE;
+
+typedef enum
+{
+ RGXFWIF_PRBUFFER_UNBACKED = 0,
+ RGXFWIF_PRBUFFER_BACKED,
+ RGXFWIF_PRBUFFER_BACKING_PENDING,
+ RGXFWIF_PRBUFFER_UNBACKING_PENDING,
+}RGXFWIF_PRBUFFER_STATE;
+
+/*!
+ * @InGroup RenderTarget
+ * @Brief OnDemand Z/S/MSAA Buffers
+ */
+typedef struct
+{
+ IMG_UINT32 ui32BufferID; /*!< Buffer ID*/
+ IMG_BOOL bOnDemand; /*!< Needs On-demand Z/S/MSAA Buffer allocation */
+ RGXFWIF_PRBUFFER_STATE eState; /*!< Z/S/MSAA -Buffer state */
+ RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Cleanup state */
+ IMG_UINT32 ui32PRBufferFlags; /*!< Compatibility and other flags */
+} UNCACHED_ALIGN RGXFWIF_PRBUFFER;
+
+/*
+ * Used to share frame numbers across UM-KM-FW,
+ * frame number is set in UM,
+ * frame number is required in both KM for HTB and FW for FW trace.
+ *
+ * May be used to house Kick flags in the future.
+ */
+typedef struct
+{
+ IMG_UINT32 ui32FrameNum; /*!< associated frame number */
+} CMD_COMMON;
+
+/*
+ * TA and 3D commands require set of firmware addresses that are stored in the
+ * Kernel. Client has handle(s) to Kernel containers storing these addresses,
+ * instead of raw addresses. We have to patch/write these addresses in KM to
+ * prevent UM from controlling FW addresses directly.
+ * Typedefs for TA and 3D commands are shared between Client and Firmware (both
+ * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use
+ * TA|3D CMD type definitions directly. Therefore we have a SHARED block that
+ * is shared between UM-KM-FW across all BVNC configurations.
+ */
+typedef struct
+{
+ CMD_COMMON sCmn; /*!< Common command attributes */
+ RGXFWIF_DEV_VIRTADDR sHWRTData; /* RTData associated with this command,
+ this is used for context selection and for storing out HW-context,
+ when TA is switched out for continuing later */
+
+ RGXFWIF_DEV_VIRTADDR asPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /* Supported PR Buffers like Z/S/MSAA Scratch */
+
+} CMDTA3D_SHARED;
+
+/*!
+ * Client Circular Command Buffer (CCCB) control structure.
+ * This is shared between the Server and the Firmware and holds byte offsets
+ * into the CCCB as well as the wrapping mask to aid wrap around. A given
+ * snapshot of this queue with Cmd 1 running on the GPU might be:
+ *
+ * Roff Doff Woff
+ * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........]
+ * < runnable commands >< !ready to run >
+ *
+ * Cmd 1 : Currently executing on the GPU data master.
+ * Cmd 2,3,4: Fence dependencies met, commands runnable.
+ * Cmd 5... : Fence dependency not met yet.
+ */
+typedef struct
+{
+ IMG_UINT32 ui32WriteOffset; /*!< Host write offset into CCB. This
+ * must be aligned to 16 bytes. */
+ IMG_UINT32 ui32ReadOffset; /*!< Firmware read offset into CCB.
+ Points to the command that is
+ runnable on GPU, if R!=W */
+ IMG_UINT32 ui32DepOffset; /*!< Firmware fence dependency offset.
+ Points to commands not ready, i.e.
+ fence dependencies are not met. */
+ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask, total capacity
+ in bytes of the CCB-1 */
+#if defined(SUPPORT_AGP)
+ IMG_UINT32 ui32ReadOffset2;
+#if defined(SUPPORT_AGP4)
+ IMG_UINT32 ui32ReadOffset3;
+ IMG_UINT32 ui32ReadOffset4;
+#endif
+#endif
+} UNCACHED_ALIGN RGXFWIF_CCCB_CTL;
+
+
+typedef IMG_UINT32 RGXFW_FREELIST_TYPE;
+
+#define RGXFW_LOCAL_FREELIST IMG_UINT32_C(0)
+#define RGXFW_GLOBAL_FREELIST IMG_UINT32_C(1)
+#if defined(SUPPORT_AGP4)
+#define RGXFW_GLOBAL2_FREELIST IMG_UINT32_C(2)
+#define RGXFW_GLOBAL3_FREELIST IMG_UINT32_C(3)
+#define RGXFW_GLOBAL4_FREELIST IMG_UINT32_C(4)
+#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL4_FREELIST + 1U)
+#elif defined(SUPPORT_AGP)
+#define RGXFW_GLOBAL2_FREELIST IMG_UINT32_C(2)
+#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL2_FREELIST + 1U)
+#else
+#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL_FREELIST + 1U)
+#endif
+#define RGXFW_MAX_HWFREELISTS (2U)
+
+/*!
+ * @Defgroup ContextSwitching Context switching data interface
+ * @Brief Types grouping data structures and defines used in realising the Context Switching (CSW) functionality
+ * @{
+ */
+
+/*!
+ * @Brief GEOM DM or TA register controls for context switch
+ */
+typedef struct
+{
+ IMG_UINT64 uTAReg_DCE_CONTEXT_STATE_BASE_ADDR;
+ IMG_UINT64 uTAReg_TA_CONTEXT_STATE_BASE_ADDR; /*!< The base address of the TA's context state buffer */
+
+ struct
+ {
+ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_VDM0;
+ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_VDM1;
+ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_VDM2;
+
+ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_DDM0;
+ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_DDM1;
+ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_DDM2;
+ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_XFB;
+
+ /* VDM resume state update controls */
+ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_VDM0;
+ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_VDM1;
+ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_VDM2;
+
+
+ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_DDM0;
+ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_DDM1;
+ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_DDM2;
+ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_XFB;
+ } asTAState[2];
+
+} RGXFWIF_TAREGISTERS_CSWITCH;
+/*! @} End of Defgroup ContextSwitching */
+
+typedef struct
+{
+ IMG_UINT64 u3DReg_IPP_CONTEXT_ADDR;
+} RGXFWIF_3DREGISTERS_CSWITCH;
+
+typedef struct
+{
+ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0;
+ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS1;
+ IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS;
+ IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS1;
+
+ /* CDM resume controls */
+ IMG_UINT64 uCDMReg_CDM_RESUME_PDS0;
+ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0_B;
+ IMG_UINT64 uCDMReg_CDM_RESUME_PDS0_B;
+
+#if defined(SUPPORT_SECURE_CONTEXT_SWITCH)
+ IMG_UINT64 uCDMReg_CDM_CONTEXT_SECURE_PDS0;
+ IMG_UINT64 uCDMReg_CDM_CONTEXT_SECURE_PDS0_B;
+
+ IMG_UINT64 uCDMReg_CDM_RESUME_SECURE_PDS0;
+ IMG_UINT64 uCDMReg_CDM_RESUME_SECURE_PDS0_B;
+
+ IMG_DEV_VIRTADDR uCDMReg_CDM_CONTEXT_sSRBuffer;
+ IMG_DEV_VIRTADDR uCDMReg_CDM_CONTEXT_sSRBuffer_B;
+
+ IMG_UINT64 uCDMReg_CDM_CONTEXT_STATE_BASE_ADDR;
+#endif
+} RGXFWIF_CDM_REGISTERS_CSWITCH;
+
+static_assert((sizeof(RGXFWIF_CDM_REGISTERS_CSWITCH) % 8U) == 0U,
+ "the size of the structure must be multiple of 8");
+
+#define RGXFWIF_CDM_REGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_CDM_REGISTERS_CSWITCH)
+
+/*!
+ * @InGroup ContextSwitching
+ * @Brief Render context static register controls for context switch
+ */
+typedef struct
+{
+ RGXFWIF_TAREGISTERS_CSWITCH RGXFW_ALIGN asCtxSwitch_GeomRegs[RGX_NUM_GEOM_CORES];
+ RGXFWIF_3DREGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_3DRegs; /*!< 3D registers for ctx switch */
+} RGXFWIF_STATIC_RENDERCONTEXT_STATE;
+
+#define RGXFWIF_STATIC_RENDERCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RENDERCONTEXT_STATE)
+
+typedef struct
+{
+ RGXFWIF_CDM_REGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs; /*!< CDM registers for ctx switch */
+} RGXFWIF_STATIC_COMPUTECONTEXT_STATE;
+
+#define RGXFWIF_STATIC_COMPUTECONTEXT_SIZE sizeof(RGXFWIF_STATIC_COMPUTECONTEXT_STATE)
+
+typedef struct
+{
+ IMG_UINT64 uRDMReg_RDM_CONTEXT_STATE_BASE_ADDR;
+} RGXFWIF_RDM_REGISTERS_CSWITCH;
+
+static_assert((sizeof(RGXFWIF_RDM_REGISTERS_CSWITCH) % 8U) == 0U,
+ "the size of the structure must be multiple of 8");
+
+#define RGXFWIF_RDM_REGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_RDM_REGISTERS_CSWITCH)
+
+typedef struct
+{
+ RGXFWIF_RDM_REGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs; /*!< RDM registers for ctx switch */
+} RGXFWIF_STATIC_RAYCONTEXT_STATE;
+
+#define RGXFWIF_STATIC_RAYCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RAYCONTEXT_STATE)
+
+/*!
+ @Brief Context reset reason. Last reset reason for a reset context.
+*/
+typedef enum
+{
+ RGX_CONTEXT_RESET_REASON_NONE = 0, /*!< No reset reason recorded */
+ RGX_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1, /*!< Caused a reset due to locking up */
+ RGX_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2, /*!< Affected by another context locking up */
+ RGX_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3, /*!< Overran the global deadline */
+ RGX_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4, /*!< Affected by another context overrunning */
+ RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5, /*!< Forced reset to ensure scheduling requirements */
+ RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM = 6, /*!< CDM Mission/safety checksum mismatch */
+ RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM = 7, /*!< TRP checksum mismatch */
+ RGX_CONTEXT_RESET_REASON_GPU_ECC_OK = 8, /*!< GPU ECC error (corrected, OK) */
+ RGX_CONTEXT_RESET_REASON_GPU_ECC_HWR = 9, /*!< GPU ECC error (uncorrected, HWR) */
+ RGX_CONTEXT_RESET_REASON_FW_ECC_OK = 10, /*!< FW ECC error (corrected, OK) */
+ RGX_CONTEXT_RESET_REASON_FW_ECC_ERR = 11, /*!< FW ECC error (uncorrected, ERR) */
+ RGX_CONTEXT_RESET_REASON_FW_WATCHDOG = 12, /*!< FW Safety watchdog triggered */
+ RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13, /*!< FW page fault (no HWR) */
+ RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14, /*!< FW execution error (GPU reset requested) */
+ RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15, /*!< Host watchdog detected FW error */
+ RGX_CONTEXT_GEOM_OOM_DISABLED = 16, /*!< Geometry DM OOM event is not allowed */
+} RGX_CONTEXT_RESET_REASON;
+
+/*!
+ @Brief Context reset data shared with the host
+*/
+typedef struct
+{
+ RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reset reason */
+ IMG_UINT32 ui32ResetExtJobRef; /*!< External Job ID */
+} RGX_CONTEXT_RESET_REASON_DATA;
+
+#define RGX_HEAP_UM_PDS_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY
+#define RGX_HEAP_UM_PDS_RESERVED_REGION_OFFSET 0
+#define RGX_HEAP_PDS_RESERVED_TOTAL_SIZE RGX_HEAP_UM_PDS_RESERVED_SIZE
+
+#define RGX_HEAP_UM_USC_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY
+#define RGX_HEAP_UM_USC_RESERVED_REGION_OFFSET 0
+#define RGX_HEAP_USC_RESERVED_TOTAL_SIZE RGX_HEAP_UM_USC_RESERVED_SIZE
+
+#define RGX_HEAP_UM_GENERAL_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY
+#define RGX_HEAP_UM_GENERAL_RESERVED_REGION_OFFSET 0
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#define RGX_HEAP_KM_GENERAL_RESERVED_SIZE DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY
+#else
+#define RGX_HEAP_KM_GENERAL_RESERVED_SIZE 0
+#endif
+#define RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET RGX_HEAP_UM_GENERAL_RESERVED_SIZE
+
+#define RGX_HEAP_GENERAL_RESERVED_TOTAL_SIZE (RGX_HEAP_UM_GENERAL_RESERVED_SIZE + RGX_HEAP_KM_GENERAL_RESERVED_SIZE)
+
+/*
+ * 4 dwords reserved for shared register management.
+ * The first dword is the number of shared register blocks to reload.
+ * Should be a multiple of 4 dwords, size in bytes.
+ */
+#define RGX_LLS_SHARED_REGS_RESERVE_SIZE (16U)
+
+#endif /* RGX_FWIF_SHARED_H */
+
+/******************************************************************************
+ End of file (rgx_fwif_shared.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX heap definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGX_HEAPS_H)
+#define RGX_HEAPS_H
+
+/*
+ Identify heaps by their names
+*/
+#define RGX_GENERAL_SVM_HEAP_IDENT "General SVM" /*!< SVM (shared virtual memory) Heap Identifier */
+#define RGX_GENERAL_HEAP_IDENT "General" /*!< RGX General Heap Identifier */
+#define RGX_GENERAL_NON4K_HEAP_IDENT "General NON-4K" /*!< RGX General non-4K Heap Identifier */
+#define RGX_PDSCODEDATA_HEAP_IDENT "PDS Code and Data" /*!< RGX PDS Code/Data Heap Identifier */
+#define RGX_USCCODE_HEAP_IDENT "USC Code" /*!< RGX USC Code Heap Identifier */
+#define RGX_USCCODE_BPH_HEAP_IDENT "BP Handler USC Code" /*!< RGX USC Code for breakpoint handlers Heap Identifier */
+#define RGX_VK_CAPT_REPLAY_HEAP_IDENT "Vulkan Capture Replay" /*!< RGX vulkan capture replay buffer Heap Identifier */
+#define RGX_SIGNALS_HEAP_IDENT "Signals" /*!< Compute Signals Heap Identifier */
+#define RGX_COMPONENT_CTRL_HEAP_IDENT "Component Control" /*!< RGX DCE Component Control Heap Identifier */
+#define RGX_FBCDC_HEAP_IDENT "FBCDC" /*!< RGX FBCDC State Table Heap Identifier */
+#define RGX_FBCDC_LARGE_HEAP_IDENT "Large FBCDC" /*!< RGX Large FBCDC State Table Heap Identifier */
+#define RGX_PDS_INDIRECT_STATE_HEAP_IDENT "PDS Indirect State" /*!< PDS Indirect State Table Heap Identifier */
+#define RGX_CMP_MISSION_RMW_HEAP_IDENT "Compute Mission RMW" /*!< Compute Mission RMW Heap Identifier */
+#define RGX_CMP_SAFETY_RMW_HEAP_IDENT "Compute Safety RMW" /*!< Compute Safety RMW Heap Identifier */
+#define RGX_TEXTURE_STATE_HEAP_IDENT "Texture State" /*!< Texture State Heap Identifier */
+#define RGX_VISIBILITY_TEST_HEAP_IDENT "Visibility Test" /*!< Visibility Test Heap Identifier */
+
+#endif /* RGX_HEAPS_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX HWPerf and Debug Types and Defines Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Common data types definitions for hardware performance API
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_HWPERF_H_
+#define RGX_HWPERF_H_
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* These structures are used on both GPU and CPU and must be a size that is a
+ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at
+ * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this.
+ */
+
+/******************************************************************************
+ * Includes and Defines
+ *****************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+
+#include "rgx_common.h"
+#include "rgx_hwperf_common.h"
+#include "pvrsrv_tlcommon.h"
+#include "pvrsrv_sync_km.h"
+
+
+#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER)
+/* HWPerf interface assumption checks */
+static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U,
+ "Cluster count too large for HWPerf protocol definition");
+#endif
+
+/*! Perf counter control words */
+#define RGX_HWPERF_CTRL_NOP (0) /*!< only update HW counters */
+#define RGX_HWPERF_CTRL_STATE_UPDATE_EN (1U << 31) /*!< persistent state update; see other flags below */
+#define RGX_HWPERF_CTRL_GEOM_FULLRANGE (1U) /*!< selectable geom and 3D counters are full range */
+#define RGX_HWPERF_CTRL_COMP_FULLRANGE (2U) /*!< selectable compute counters are full range */
+#define RGX_HWPERF_CTRL_TDM_FULLRANGE (4U) /*!< selectable TDM counters are full range */
+
+
+/******************************************************************************
+ * Data Stream Common Types
+ *****************************************************************************/
+
+/*! All the Data Masters HWPerf is aware of. When a new DM is added to this
+ * list, it should be appended at the end to maintain backward compatibility
+ * of HWPerf data.
+ */
+typedef enum {
+
+ RGX_HWPERF_DM_GP,
+ RGX_HWPERF_DM_TDM,
+ RGX_HWPERF_DM_GEOM,
+ RGX_HWPERF_DM_3D,
+ RGX_HWPERF_DM_CDM,
+ RGX_HWPERF_DM_RTU,
+
+ RGX_HWPERF_DM_LAST,
+
+ RGX_HWPERF_DM_INVALID = 0x1FFFFFFF
+} RGX_HWPERF_DM;
+
+/*! Enum containing bit position for 32bit feature flags used in hwperf and api */
+typedef enum {
+ RGX_HWPERF_FEATURE_PERFBUS_FLAG = 0x0001,
+ RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG = 0x0002,
+ RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG = 0x0004,
+ RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG = 0x0008,
+ RGX_HWPERF_FEATURE_ROGUEXE_FLAG = 0x0010,
+ RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG = 0x0020,
+ RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG = 0x0040,
+ RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION = 0x0080,
+ RGX_HWPERF_FEATURE_MULTICORE_FLAG = 0x0100,
+ RGX_HWPERF_FEATURE_RAYTRACING_FLAG = 0x0200,
+ RGX_HWPERF_FEATURE_CXT_TOP_INFRASTRUCTURE_FLAG = 0x0400,
+ RGX_HWPERF_FEATURE_VOLCANIC_FLAG = 0x0800,
+ RGX_HWPERF_FEATURE_ROGUE_FLAG = 0x1000,
+ RGX_HWPERF_FEATURE_RESERVED1_FLAG = 0x2000
+} RGX_HWPERF_FEATURE_FLAGS;
+
+/*! This structure holds the data of a firmware packet. */
+typedef struct
+{
+ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */
+ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */
+ IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */
+ IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */
+ IMG_UINT32 ui32TimeCorrIndex; /*!< Internal field */
+ IMG_UINT32 ui32Padding; /*!< Reserved */
+} RGX_HWPERF_FW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA);
+
+/*! This structure holds the data of a hardware packet, including counters. */
+typedef struct
+{
+ IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */
+ IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */
+ IMG_UINT32 ui32PID; /*!< Process identifier */
+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */
+ IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */
+ IMG_UINT32 ui32ExtJobRef; /*!< Client driver context job reference used for tracking/debugging */
+ IMG_UINT32 ui32IntJobRef; /*!< RGX Data master context job reference used for tracking/debugging */
+ IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */
+ IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counter block stream offset */
+ IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */
+ IMG_UINT32 ui32CtxPriority; /*!< Context priority */
+ IMG_UINT32 ui32GPUIdMask; /*!< GPU IDs active within this event */
+ IMG_UINT32 ui32KickInfo; /*!< <31..8> Reserved <7..0> GPU Pipeline DM kick ID, 0 if not using Pipeline DMs */
+ IMG_UINT32 ui32Padding; /*!< Reserved. To ensure correct alignment */
+ IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Optional variable length Counter data */
+ IMG_UINT32 ui32Padding2; /*!< Reserved. To ensure correct alignment (not written in the packet) */
+} RGX_HWPERF_HW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA);
+RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_HW_DATA, aui32CountBlksStream);
+
+/*! Mask for use with the aui32CountBlksStream field when decoding the
+ * counter block ID and mask word. */
+#define RGX_HWPERF_CNTBLK_ID_MASK 0xFFFF0000U
+#define RGX_HWPERF_CNTBLK_ID_SHIFT 16U
+
+/*! MAX value used in server handling of counter config arrays */
+#define RGX_CNTBLK_COUNTERS_MAX PVRSRV_HWPERF_COUNTERS_PERBLK
+
+
+/*! Obtains the counter block ID word from an aui32CountBlksStream field.
+ * The word combines Control bits (15-12), GPU-Id (11-8), Group (7-4), Unit
+ * within group (3-0) */
+#define RGX_HWPERF_GET_CNTBLK_IDW(_word) ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT))
+
+/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address
+ * and stream index. May be used in decoding the counter block stream words of
+ * a RGX_HWPERF_HW_DATA structure. */
+#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) RGX_HWPERF_GET_CNTBLK_IDW((_data_addr)->aui32CountBlksStream[(_idx)])
+
+/*! Obtains the GPU ID from the supplied RGX_HWPERF_HW_DATA CNTBLK_IDW */
+#define RGX_HWPERF_GET_CNTBLK_GPUW(_word) ((IMG_UINT16)(((_word)&RGX_CNTBLK_ID_MC_GPU_MASK)>>RGX_CNTBLK_ID_MC_GPU_SHIFT))
+
+#define RGX_HWPERF_GET_CNT_MASKW(_word) ((IMG_UINT16)((_word)&(~RGX_HWPERF_CNTBLK_ID_MASK)))
+
+/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address
+ * and stream index. May be used in decoding the counter block stream words
+ * of a RGX_HWPERF_HW_DATA structure. */
+#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) RGX_HWPERF_GET_CNT_MASKW((_data_addr)->aui32CountBlksStream[(_idx)])
+
+/*! Context switch packet event */
+typedef struct
+{
+ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */
+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */
+ IMG_UINT32 ui32FrameNum; /*!< Client Frame number (TA, 3D only) */
+ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */
+ IMG_UINT32 ui32PerfCycle; /*!< Cycle count. Used to measure HW context store latency */
+ IMG_UINT32 ui32PerfPhase; /*!< Phase. Used to determine geometry content */
+ IMG_UINT32 ui32Padding[2]; /*!< Padding to 8 DWords */
+} RGX_HWPERF_CSW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA);
+
+/*! Enumeration of clocks supporting this event */
+typedef enum
+{
+ RGX_HWPERF_CLKS_CHG_INVALID = 0,
+
+ RGX_HWPERF_CLKS_CHG_NAME_CORE = 1,
+
+ RGX_HWPERF_CLKS_CHG_LAST,
+} RGX_HWPERF_CLKS_CHG_NAME;
+
+/*! This structure holds the data of a clocks change packet. */
+typedef struct
+{
+ IMG_UINT64 ui64NewClockSpeed; /*!< New Clock Speed (in Hz) */
+ RGX_HWPERF_CLKS_CHG_NAME eClockName; /*!< Clock name */
+ IMG_UINT32 ui32CalibratedClockSpeed; /*!< Calibrated new GPU clock speed (in Hz) */
+ IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */
+ IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and
+ correlated to OSTimeStamp */
+} RGX_HWPERF_CLKS_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA);
+
+/*! Enumeration of GPU utilisation states supported by this event */
+typedef IMG_UINT32 RGX_HWPERF_GPU_STATE;
+
+/*! This structure holds the data of a GPU utilisation state change packet. */
+typedef struct
+{
+ RGX_HWPERF_GPU_STATE eState; /*!< New GPU utilisation state */
+ IMG_UINT32 uiUnused1; /*!< Padding */
+ IMG_UINT32 uiUnused2; /*!< Padding */
+ IMG_UINT32 uiUnused3; /*!< Padding */
+} RGX_HWPERF_GPU_STATE_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA);
+
+
+/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */
+#define HWPERF_PWR_EST_V1_SIG 0x48504531
+
+/*! Macros to obtain a component field from a counter ID word */
+#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31)
+#define RGX_HWPERF_GET_PWR_EST_GPUID(_word) (((_word)&0x70000000)>>28)
+/*!< Obtains the GPU ID from a counter ID word */
+#define RGX_HWPERF_GET_PWR_EST_UNIT(_word) (((_word)&0x0F000000)>>24)
+#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word) ((_word)&0x0000FFFF)
+
+#define RGX_HWPERF_PWR_EST_HIGH_OFFSET (31)
+#define RGX_HWPERF_PWR_EST_GPUID_OFFSET (28)
+#define RGX_HWPERF_PWR_EST_GPUID_MASK (0x7U)
+#define RGX_HWPERF_PWR_EST_UNIT_OFFSET (24)
+#define RGX_HWPERF_PWR_EST_UNIT_MASK (0xFU)
+#define RGX_HWPERF_PWR_EST_VALUE_MASK (0xFFFFU)
+
+/*! This macro constructs a counter ID for a power estimate data stream from
+ * the component parts of: high word flag, unit id, GPU id, counter number */
+#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _core, _number) \
+ ((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<<RGX_HWPERF_PWR_EST_HIGH_OFFSET) | \
+ ((IMG_UINT32) ((IMG_UINT32)(_unit)&RGX_HWPERF_PWR_EST_UNIT_MASK)<<RGX_HWPERF_PWR_EST_UNIT_OFFSET) | \
+ ((IMG_UINT32) ((IMG_UINT32)(_core)&RGX_HWPERF_PWR_EST_GPUID_MASK)<<RGX_HWPERF_PWR_EST_GPUID_OFFSET) | \
+ ((_number)&RGX_HWPERF_PWR_EST_VALUE_MASK)))
+
+/*! This structure holds the data for a power estimate packet. */
+typedef struct
+{
+ IMG_UINT32 ui32StreamVersion; /*!< Version word, HWPERF_PWR_EST_V1_SIG */
+ IMG_UINT32 ui32StreamSize; /*!< Size of array in bytes of stream data
+ held in the aui32StreamData member */
+ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Counter data */
+ IMG_UINT32 ui32Padding; /*!< Reserved. To ensure correct alignment */
+} RGX_HWPERF_PWR_EST_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_EST_DATA);
+
+/*! Enumeration of the kinds of power change events that can occur */
+typedef enum
+{
+ RGX_HWPERF_PWR_UNDEFINED = 0,
+ RGX_HWPERF_PWR_ON = 1, /*!< Whole device powered on */
+ RGX_HWPERF_PWR_OFF = 2, /*!< Whole device powered off */
+ RGX_HWPERF_PWR_UP = 3, /*!< Power turned on to a HW domain */
+ RGX_HWPERF_PWR_DOWN = 4, /*!< Power turned off to a HW domain */
+ RGX_HWPERF_PWR_SAFETY_RESET = 5, /*!< Resetting the GPU HW units for safety reasons */
+ RGX_HWPERF_PWR_PHR_FULL = 6, /*!< Periodic HW full GPU Reset */
+ RGX_HWPERF_PWR_RD_UP = 7, /*!< Power turned on to a Rascal+Dust */
+ RGX_HWPERF_PWR_RD_DOWN = 8, /*!< Power turned off to a Rascal+Dust */
+ RGX_HWPERF_PWR_SPU_UP = 9, /*!< Power turned on to a SPU */
+ RGX_HWPERF_PWR_SPU_DOWN = 10, /*!< Power turned off to a SPU */
+ RGX_HWPERF_PWR_CLUSTER_UP = 11, /*!< Power turned on to a cluster */
+ RGX_HWPERF_PWR_CLUSTER_DOWN = 12, /*!< Power turned off to a cluster */
+ RGX_HWPERF_PWR_RAC_UP = 13, /*!< Power turned on to a RAC */
+ RGX_HWPERF_PWR_RAC_DOWN = 14, /*!< Power turned off to a RAC */
+
+ RGX_HWPERF_PWR_LAST,
+} RGX_HWPERF_PWR;
+
+/*! This structure holds the data of a power packet. */
+typedef struct
+{
+ RGX_HWPERF_PWR eChange; /*!< Defines the type of power change */
+ IMG_UINT32 ui32Domains; /*!< HW Domains affected */
+ IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */
+ IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and
+ correlated to OSTimeStamp */
+ IMG_UINT32 ui32CalibratedClockSpeed; /*!< GPU clock speed (in Hz) at the time
+ the two timers were correlated */
+ IMG_UINT32 ui32Unused1; /*!< Padding */
+} RGX_HWPERF_PWR_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_CHG_DATA);
+
+
+/*
+ * PDVFS, GPU clock frequency changes and workload estimation profiling
+ * data.
+ */
+/*! DVFS and work estimation events. */
+typedef enum
+{
+ RGX_HWPERF_DVFS_EV_INVALID, /*!< Invalid value. */
+ RGX_HWPERF_DVFS_EV_PROACTIVE_EST_START, /*!< Proactive DVFS estimate start */
+ RGX_HWPERF_DVFS_EV_PROACTIVE_EST_FINISHED, /*!< Proactive DVFS estimate finished */
+ RGX_HWPERF_DVFS_EV_REACTIVE_EST_START, /*!< Reactive DVFS estimate start */
+ RGX_HWPERF_DVFS_EV_REACTIVE_EST_FINISHED, /*!< Reactive DVFS estimate finished */
+ /* workload estimation */
+ RGX_HWPERF_DVFS_EV_WORK_EST_START, /*!< Workload estimation start */
+ RGX_HWPERF_DVFS_EV_WORK_EST_FINISHED, /*!< Workload estimation finished */
+ RGX_HWPERF_DVFS_EV_FREQ_CHG, /*!< DVFS OPP/clock frequency change */
+
+ RGX_HWPERF_DVFS_EV_LAST /*!< Number of element. */
+} RGX_HWPERF_DVFS_EV;
+
+/*! Enumeration of DVFS transitions that can occur */
+typedef enum
+{
+ RGX_HWPERF_DVFS_OPP_NONE = 0x0, /*!< No OPP change, already operating at required freq */
+#if defined(SUPPORT_PDVFS_IDLE)
+ RGX_HWPERF_DVFS_OPP_IDLE = 0x1, /*!< GPU is idle, defer the OPP change */
+#endif
+ /* 0x2 to 0xF reserved */
+ RGX_HWPERF_DVFS_OPP_UPDATE = 0x10, /*!< OPP change, new point is encoded in bits [3:0] */
+ RGX_HWPERF_DVFS_OPP_LAST = 0x20,
+} RGX_HWPERF_DVFS_OPP;
+
+typedef union
+{
+ /*! This structure holds the data of a proactive DVFS calculation packet. */
+ struct
+ {
+ IMG_UINT64 ui64DeadlineInus; /*!< Next deadline in microseconds */
+ IMG_UINT32 ui32Frequency; /*!< Required freq to meet deadline at 90% utilisation */
+ IMG_UINT32 ui32WorkloadCycles; /*!< Current workload estimate in cycles */
+ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */
+ } sProDVFSCalc;
+
+ /*! This structure holds the data of a reactive DVFS calculation packet. */
+ struct
+ {
+ IMG_UINT32 ui32Frequency; /*!< Required freq to achieve average 90% utilisation */
+ IMG_UINT32 ui32Utilisation; /*!< GPU utilisation since last update */
+ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */
+ } sDVFSCalc;
+
+ /*! This structure holds the data of a work estimation packet. */
+ struct
+ {
+ IMG_UINT32 ui32CyclesPrediction; /*!< Predicted cycle count for this workload */
+ IMG_UINT32 ui32CyclesTaken; /*!< Actual cycle count for this workload */
+ RGXFWIF_DM eDM; /*!< Target DM */
+ IMG_UINT32 ui32ReturnDataIndex; /*!< Index into workload estimation table */
+ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */
+ } sWorkEst;
+
+ /*! This structure holds the data of an OPP clock frequency transition packet. */
+ struct
+ {
+ IMG_UINT32 ui32OPPData; /*!< OPP transition */
+ } sOPP;
+
+} RGX_HWPERF_DVFS_DETAIL;
+
+/*! DVFS sub-event data structure */
+typedef struct {
+ RGX_HWPERF_DVFS_EV eEventType; /*!< DVFS sub-event type */
+ RGX_HWPERF_DVFS_DETAIL uData; /*!< DVFS sub-event data */
+} RGX_HWPERF_DVFS_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_DVFS_DATA);
+
+
+/*! Firmware Activity event. */
+typedef enum
+{
+ RGX_HWPERF_FWACT_EV_INVALID, /*!< Invalid value. */
+ RGX_HWPERF_FWACT_EV_REGS_SET, /*!< Registers set. */
+ RGX_HWPERF_FWACT_EV_HWR_DETECTED, /*!< HWR detected. */
+ RGX_HWPERF_FWACT_EV_HWR_RESET_REQUIRED, /*!< Reset required. */
+ RGX_HWPERF_FWACT_EV_HWR_RECOVERED, /*!< HWR recovered. */
+ RGX_HWPERF_FWACT_EV_HWR_FREELIST_READY, /*!< Freelist ready. */
+ RGX_HWPERF_FWACT_EV_FEATURES, /*!< Features present */
+ RGX_HWPERF_FWACT_EV_FILTER_SET, /*!< Event filter set. */
+ RGX_HWPERF_FWACT_EV_KICK_CANCEL, /*!< A previous pipelined kick cancel. */
+
+ RGX_HWPERF_FWACT_EV_LAST /*!< Number of element. */
+} RGX_HWPERF_FWACT_EV;
+
+/*! Cause of the HWR event. */
+typedef enum
+{
+ RGX_HWPERF_HWR_REASON_INVALID, /*!< Invalid value. */
+ RGX_HWPERF_HWR_REASON_LOCKUP, /*!< Lockup. */
+ RGX_HWPERF_HWR_REASON_PAGEFAULT, /*!< Page fault. */
+ RGX_HWPERF_HWR_REASON_POLLFAIL, /*!< Poll fail. */
+ RGX_HWPERF_HWR_REASON_DEADLINE_OVERRUN, /*!< Deadline overrun. */
+ RGX_HWPERF_HWR_REASON_CSW_DEADLINE_OVERRUN, /*!< Hard Context Switch deadline overrun. */
+
+ RGX_HWPERF_HWR_REASON_LAST /*!< Number of elements. */
+} RGX_HWPERF_HWR_REASON;
+
+
+/* Fixed size for BVNC string so it does not alter packet data format
+ * Check it is large enough against official BVNC string length maximum
+ */
+#define RGX_HWPERF_MAX_BVNC_LEN (24)
+static_assert((RGX_HWPERF_MAX_BVNC_LEN >= RGX_BVNC_STR_SIZE_MAX),
+ "Space inside HWPerf packet data for BVNC string insufficient");
+
+#define RGX_HWPERF_MAX_BVNC_BLOCK_LEN (20U)
+
+/*! BVNC Features */
+typedef struct
+{
+ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */
+ IMG_UINT16 ui16BlockID;
+
+ /*! Number of counters in this block type */
+ IMG_UINT16 ui16NumCounters;
+
+ /*! Number of blocks of this type */
+ IMG_UINT16 ui16NumBlocks;
+
+ /*! Reserved for future use */
+ IMG_UINT16 ui16Reserved;
+} RGX_HWPERF_BVNC_BLOCK;
+
+/*! BVNC Features */
+typedef struct
+{
+ IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*!< BVNC string */
+ IMG_UINT32 ui32BvncKmFeatureFlags; /*!< See RGX_HWPERF_FEATURE_FLAGS */
+ IMG_UINT16 ui16BvncBlocks; /*!< Number of blocks described in aBvncBlocks */
+ IMG_UINT16 ui16BvncGPUCores; /*!< Number of GPU cores present */
+ RGX_HWPERF_BVNC_BLOCK aBvncBlocks[RGX_HWPERF_MAX_BVNC_BLOCK_LEN]; /*!< Supported Performance Blocks for BVNC. See RGX_HWPERF_BVNC_BLOCK */
+} RGX_HWPERF_BVNC;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC);
+
+/*! Performance Counter Configuration data element. */
+typedef struct
+{
+ IMG_UINT32 ui32BlockID; /*!< Counter Block ID. See RGX_HWPERF_CNTBLK_ID */
+ IMG_UINT32 ui32NumCounters; /*!< Number of counters configured */
+ IMG_UINT32 ui32CounterVals[RGX_CNTBLK_COUNTERS_MAX]; /*!< Counters configured (ui32NumCounters worth of entries) */
+} RGX_HWPERF_COUNTER_CFG_DATA_EL;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG_DATA_EL);
+
+/*! Performance Counter Configuration data. */
+typedef struct
+{
+ IMG_UINT32 ui32EnabledBlocks; /*!< Number of Enabled Blocks. */
+ RGX_HWPERF_COUNTER_CFG_DATA_EL uData; /*!< Start of variable length data. See RGX_HWPERF_COUNTER_CFG_DATA_EL */
+ IMG_UINT32 ui32Padding; /*!< reserved */
+} RGX_HWPERF_COUNTER_CFG;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG);
+
+/*! Sub-event's data. */
+typedef union
+{
+ struct
+ {
+ RGX_HWPERF_DM eDM; /*!< Data Master ID. */
+ RGX_HWPERF_HWR_REASON eReason; /*!< Reason of the HWR. */
+ IMG_UINT32 ui32DMContext; /*!< FW render context */
+ } sHWR; /*!< HWR sub-event data. */
+
+ RGX_HWPERF_BVNC sBVNC; /*!< BVNC Features. See RGX_HWPERF_BVNC */
+ struct
+ {
+ IMG_UINT32 ui32EvMaskLo; /*!< Low order 32 bits of Filter Mask */
+ IMG_UINT32 ui32EvMaskHi; /*!< High order 32 bits of Filter Mask */
+ } sEvMsk; /*!< HW Filter Mask */
+ RGX_HWPERF_COUNTER_CFG sPCC; /*!< Performance Counter Config. See RGX_HWPERF_COUNTER_CFG */
+
+ struct
+ {
+ RGX_HWPERF_DM eDM; /*!< Data Master ID. */
+ IMG_UINT32 ui32DMContext; /*!< FW context */
+ IMG_UINT32 ui32CoreMask; /*!< Multicore mask. */
+ IMG_UINT32 ui32KickID; /*!< Kick Id cancelled. */
+ } sKickCancel; /*!< Kick cancel sub-event data. */
+} RGX_HWPERF_FWACT_DETAIL;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL);
+
+/*! This structure holds the data of a FW activity event packet */
+typedef struct
+{
+ RGX_HWPERF_FWACT_EV eEvType; /*!< Event type. */
+ RGX_HWPERF_FWACT_DETAIL uFwActDetail; /*!< Data of the sub-event. */
+ IMG_UINT32 ui32Padding; /*!< Reserved. */
+} RGX_HWPERF_FWACT_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA);
+
+
+typedef enum {
+ RGX_HWPERF_UFO_EV_UPDATE, /*!< Update on the UFO objects. */
+ RGX_HWPERF_UFO_EV_CHECK_SUCCESS, /*!< Successful check on UFO objects. */
+ RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS, /*!< Successful partial render check on UFO objects. */
+ RGX_HWPERF_UFO_EV_CHECK_FAIL, /*!< Unsuccessful check on UFO objects. */
+ RGX_HWPERF_UFO_EV_PRCHECK_FAIL, /*!< Unsuccessful partial render check on UFO objects. */
+ RGX_HWPERF_UFO_EV_FORCE_UPDATE, /*!< Forced erroring of the UFO objects. */
+
+ RGX_HWPERF_UFO_EV_LAST /*!< Reserved. Do not use. */
+} RGX_HWPERF_UFO_EV;
+
+/*! Data stream tuple. */
+typedef union
+{
+ struct
+ {
+ IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */
+ IMG_UINT32 ui32Value; /*!< Value of the UFO object */
+ } sCheckSuccess;
+ struct
+ {
+ IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */
+ IMG_UINT32 ui32Value; /*!< Value of the UFO object */
+ IMG_UINT32 ui32Required; /*!< Value of the UFO object required by the fence */
+ } sCheckFail;
+ struct
+ {
+ IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */
+ IMG_UINT32 ui32OldValue; /*!< Value of UFO object before update */
+ IMG_UINT32 ui32NewValue; /*!< Value of UFO object after update */
+ } sUpdate;
+} RGX_HWPERF_UFO_DATA_ELEMENT;
+
+/*! This structure holds the packet payload data for UFO event. */
+typedef struct
+{
+ RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event. See RGX_HWPERF_UFO_EV */
+ IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the timer correlation data
+ at the time the packet was generated.
+ Used to approximate Host timestamps for
+ these events. */
+ IMG_UINT32 ui32PID; /*!< Client process identifier */
+ IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX
+ API to track submitted work (for
+ debugging/trace purposes) */
+ IMG_UINT32 ui32IntJobRef; /*!< Internal reference used to track
+ submitted work (for debugging / trace
+ purposes) */
+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */
+ IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the
+ stream and stream data offset in the
+ payload */
+ RGX_HWPERF_DM eDM; /*!< Data Master number, see RGX_HWPERF_DM */
+ IMG_UINT32 ui32Padding; /*!< Unused, reserved */
+ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Series of tuples holding UFO objects data */
+} RGX_HWPERF_UFO_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA);
+
+
+/*!
+ * RGX_HWPERF_KICK_TYPE describes the type of kick for events received / sent
+ * between KICK_START / KICK_END inclusively for all event types.
+ */
+typedef enum
+{
+ RGX_HWPERF_KICK_TYPE_RESERVED_0, /*!< Replaced by separate TA and 3D types (Deprecated) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_1, /*!< Compute Data Master Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_CDM) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_2, /*!< Ray Store Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_RS) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_3, /*!< Scene Hierarchy Generator Kick (Deprecated) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_4, /*!< TQ 2D Data Master Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_TQTDM) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_5, /*!< Sync Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_SYNC) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_6, /*!< TA Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_GEOM) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_7, /*!< 3D Kick (Deprecated - Use RGX_HWPERF_KICK_TYPE2_3D) */
+ RGX_HWPERF_KICK_TYPE_RESERVED_8,
+
+ RGX_HWPERF_KICK_TYPE_UNIFIED = 0x10,
+
+ RGX_HWPERF_KICK_TYPE2_TQ2D, /*!< 2D TQ Kick */
+ RGX_HWPERF_KICK_TYPE2_TQ3D, /*!< 3D TQ Kick */
+ RGX_HWPERF_KICK_TYPE2_TQTDM, /*!< 2D Data Master TQ Kick */
+ RGX_HWPERF_KICK_TYPE2_CDM, /*!< Compute Kick */
+ RGX_HWPERF_KICK_TYPE2_GEOM, /*!< GEOM Kick */
+ RGX_HWPERF_KICK_TYPE2_3D, /*!< 3D Kick */
+ RGX_HWPERF_KICK_TYPE2_SYNC, /*!< Sync Kick */
+ RGX_HWPERF_KICK_TYPE2_RS, /*!< Ray Store Kick */
+ RGX_HWPERF_KICK_TYPE2_LAST,
+
+ RGX_HWPERF_KICK_TYPE_FORCE_32BIT = 0x7fffffff
+} RGX_HWPERF_KICK_TYPE;
+
+typedef struct
+{
+ RGX_HWPERF_KICK_TYPE ui32EnqType; /*!< Workload type sent to FW for
+ scheduling on GPU hardware.
+ See RGX_HWPERF_KICK_TYPE */
+ IMG_UINT32 ui32PID; /*!< Client process identifier */
+ IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX API
+ to track submitted work (for debugging /
+ trace purposes) */
+ IMG_UINT32 ui32IntJobRef; /*!< internal reference used to track submitted
+ work (for debugging / trace purposes) */
+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */
+ IMG_UINT32 ui32Padding; /*!< Unused, reserved */
+ IMG_UINT64 ui64CheckFence_UID; /*!< ID of fence gating work execution on GPU */
+ IMG_UINT64 ui64UpdateFence_UID; /*!< ID of fence triggered after work completes on GPU */
+ IMG_UINT64 ui64DeadlineInus; /*!< Workload deadline in system monotonic time */
+ IMG_UINT32 ui32CycleEstimate; /*!< Estimated cycle time for the workload */
+ PVRSRV_FENCE hCheckFence; /*!< Fence this enqueue task waits for, before starting */
+ PVRSRV_FENCE hUpdateFence; /*!< Fence this enqueue task signals, on completion */
+ PVRSRV_TIMELINE hUpdateTimeline; /*!< Timeline on which the above hUpdateFence is created */
+
+ /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_ENQ_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+ "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+ RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event */
+ IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the stream and
+ stream data offset in the payload */
+#ifdef __CHECKER__
+ /* Since we're not conforming to the C99 standard by not using a flexible
+ * array member need to add a special case for Smatch static code analyser. */
+ IMG_UINT32 aui32StreamData[];
+#else
+ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+ /*!< Series of tuples holding UFO objects data */
+
+ IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */
+#endif
+} RGX_HWPERF_HOST_UFO_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+ "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+/*!
+ * RGX_HWPERF_HOST_RESOURCE_TYPE describes the type of resource which has been
+ * Allocated, Freed or Modified. The values are used to determine which event
+ * data structure to use to decode the data from the event stream
+ */
+typedef enum
+{
+ RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID, /*!< Invalid */
+ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, /*!< SyncPrim */
+ RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED,
+ /*!< Timeline resource packets are
+ now emitted in client hwperf buffer */
+ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /*!< Fence for use on GPU (SYNC_CP backed) */
+ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, /*!< Sync Checkpoint */
+ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, /*!< Fence created on SW timeline */
+
+ RGX_HWPERF_HOST_RESOURCE_TYPE_LAST /*!< End of enumeration */
+} RGX_HWPERF_HOST_RESOURCE_TYPE;
+
+typedef union
+{
+ /*! Data for TYPE_TIMELINE (*Deprecated*). This sub-event is no longer
+ * generated in the HOST stream. Timeline data is now provided in the
+ * CLIENT stream instead.
+ */
+ struct
+ {
+ IMG_UINT32 uiPid; /*!< Identifier of owning process */
+ IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for timeline resource */
+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+ /*!< Label or name given to the sync resource */
+ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */
+ } sTimelineAlloc;
+
+ /*! Data for TYPE_FENCE_PVR */
+ struct
+ {
+ IMG_PID uiPID; /*!< Identifier of owning process */
+ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */
+ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier of the check point
+ backing this fence on the GPU */
+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+ /*!< Label or name given to the sync resource */
+ } sFenceAlloc;
+
+ /*! Data for TYPE_SYNC_CP */
+ struct
+ {
+ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */
+ PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */
+ IMG_PID uiPID; /*!< Identifier of owning process */
+ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */
+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+ /*!< Label or name given to the sync resource */
+ } sSyncCheckPointAlloc;
+
+ /*! Data for TYPE_FENCE_SW */
+ struct
+ {
+ IMG_PID uiPID; /*!< Identifier of owning process */
+ PVRSRV_FENCE hSWFence; /*!< Unique identifier for the SWFence resource */
+ PVRSRV_TIMELINE hSWTimeline; /*!< Unique identifier for the timeline resource */
+ IMG_UINT64 ui64SyncPtIndex; /*!< Sync-pt index where this SW timeline has reached */
+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+ /*!< Label or name given to the sync resource */
+ } sSWFenceAlloc;
+
+ /*! Data for TYPE_SYNC */
+ struct
+ {
+ IMG_UINT32 ui32FWAddr; /*!< Identifier of sync resource */
+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+ /*!< Label or name given to the sync resource */
+ } sSyncAlloc;
+} RGX_HWPERF_HOST_ALLOC_DETAIL;
+
+typedef struct
+{
+ RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType;
+ /*!< This describes the type of the resource
+ allocated in the driver. See
+ RGX_HWPERF_HOST_RESOURCE_TYPE */
+ RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail;
+ /*!< Union of structures providing further
+ data regarding the resource allocated.
+ Size of data varies with union member that
+ is present, check ``ui32AllocType`` value
+ to decode */
+} RGX_HWPERF_HOST_ALLOC_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+ "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef union
+{
+ /*! Data for TYPE_TIMELINE (*Deprecated*) */
+ struct
+ {
+ IMG_UINT32 uiPid; /*!< Identifier of owning process */
+ IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for the timeline resource */
+ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */
+ } sTimelineDestroy;
+
+ /*! Data for TYPE_FENCE_PVR */
+ struct
+ {
+ IMG_UINT64 ui64Fence_UID; /*!< Unique identifier for the fence resource */
+ IMG_UINT32 ui32Padding; /*!< Reserved. */
+ } sFenceDestroy;
+
+ /*! Data for TYPE_SYNC_CP */
+ struct
+ {
+ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */
+ } sSyncCheckPointFree;
+
+ /*! Data for TYPE_SYNC */
+ struct
+ {
+ IMG_UINT32 ui32FWAddr; /*!< Unique identifier for the sync resource */
+ } sSyncFree;
+} RGX_HWPERF_HOST_FREE_DETAIL;
+
+typedef struct
+{
+ RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType;
+ /*!< This describes the type of the resource
+ freed or released by the driver. See
+ RGX_HWPERF_HOST_RESOURCE_TYPE */
+ RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail;
+ /*!< Union of structures providing further data
+ regarding the resource freed. Size of data
+ varies with union member that is present,
+ check ``ui32FreeType`` value to decode */
+ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_FREE_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+ "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+ IMG_UINT64 ui64CRTimestamp; /*!< CR timer value from the latest entry of
+ the time domains correlation table */
+ IMG_UINT64 ui64OSTimestamp; /*!< OS timestamp from the latest entry of the
+ time domains correlation table */
+ IMG_UINT32 ui32ClockSpeed; /*!< GPU clock speed from the latest entry of
+ the time domains correlation table */
+ IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */
+} RGX_HWPERF_HOST_CLK_SYNC_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+ "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef union
+{
+ /*! Data for TYPE_FENCE_PVR */
+ struct
+ {
+ IMG_UINT64 ui64NewFence_UID; /*!< Unique identifier for the new merged fence
+ resource that has been created */
+ IMG_UINT64 ui64InFence1_UID; /*!< Unique identifier for the fence resource */
+ IMG_UINT64 ui64InFence2_UID; /*!< Unique identifier of the check point backing
+ the fence on the GPU */
+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+ /*!< Label or name given to the sync resource */
+ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */
+ } sFenceMerge;
+} RGX_HWPERF_HOST_MODIFY_DETAIL;
+
+typedef struct
+{
+ RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType;
+ /*!< Describes the type of the resource
+ modified by the driver. See
+ RGX_HWPERF_HOST_RESOURCE_TYPE */
+
+ RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail;
+ /*!< Union of structures providing further
+ data regarding the resource modified.
+ Size of data varies with union member that
+ is present.
+ Check ``uiModifyType`` value to decode */
+} RGX_HWPERF_HOST_MODIFY_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+ "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, /*!< Invalid */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK, /*!< Device OK */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_NOT_RESPONDING,/*!< Device not responding to requests */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD, /*!< Device not responding */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT, /*!< Device has faulted */
+
+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST
+} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS;
+
+typedef enum
+{
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0, /*!< Invalid */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE, /*!< No underlying health reason. */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED, /*!< Device has asserted. */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING, /*!< Device poll has failed. */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS, /*!< Device timeout has fired. */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, /*!< Queue has become corrupt. */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED, /*!< Queue has stalled. */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING, /*!< Device is idling. */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING, /*!< Device restarting. */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS,/*!< Interrupts have been discarded. */
+
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST
+} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON;
+
+/*! Data for device status event */
+typedef struct
+{
+ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus;
+ /*!< Device's health status */
+ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason;
+ /*!< Reason for device's health status */
+} RGX_HWPERF_HOST_DEVICE_HEALTH;
+
+/*! RGX_HWPERF_DEV_INFO_EV values */
+typedef enum
+{
+ RGX_HWPERF_DEV_INFO_EV_HEALTH, /*!< Health sub-event */
+ RGX_HWPERF_DEV_INFO_EV_FEATURES, /*!< Features sub-event */
+
+ RGX_HWPERF_DEV_INFO_EV_LAST /*!< Last enumeration value */
+} RGX_HWPERF_DEV_INFO_EV;
+
+/*! RGX_HWPERF_HOST_DEV_INFO_DETAIL is a union of structures providing
+ * further data regarding the device's status
+ */
+typedef union
+{
+ RGX_HWPERF_HOST_DEVICE_HEALTH sDeviceStatus; /*!< Device health status */
+ RGX_HWPERF_BVNC sBVNC; /*!< Device features */
+} RGX_HWPERF_HOST_DEV_INFO_DETAIL;
+
+/*! RGX_HWPERF_HOST_DEV_INFO_DATA contains device health status information */
+typedef struct
+{
+ IMG_UINT32 ui32Padding;
+ /*!< Reserved. Align structure size to 8 bytes */
+ RGX_HWPERF_DEV_INFO_EV eEvType;
+ /*!< Type of the sub-event. See
+ RGX_HWPERF_DEV_INFO_EV */
+ RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevInfoDetail;
+ /*!< Union of structures providing further data
+ regarding the device's status. Size of data
+ varies with union member that is present,
+ check ``eEvType`` value to decode */
+} RGX_HWPERF_HOST_DEV_INFO_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+ "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+/*! RGX_HWPERF_INFO_EV event subtype for RGX_HWPERF_HOST_INFO_DATA events */
+typedef enum
+{
+ RGX_HWPERF_INFO_EV_RESERVED_0,
+ RGX_HWPERF_INFO_EV_MEM64_USAGE, /*!< 64-bit Memory usage event */
+ RGX_HWPERF_INFO_EV_LAST /*!< End of enumeration */
+} RGX_HWPERF_INFO_EV;
+
+/*! RGX_HWPERF_HOST_INFO_DETAIL contains the data payload for the
+ * RGX_HWPERF_HOST_INFO_DATA event.
+ */
+typedef union
+{
+ /*! Host Memory usage statistics */
+ struct
+ {
+ IMG_UINT64 ui64TotalMemoryUsage; /*!< Total memory usage (bytes) */
+ /*! Detailed memory usage */
+ struct
+ {
+ IMG_UINT32 ui32Pid; /*!< Process ID */
+ IMG_UINT32 ui32Padding; /*!< Padding */
+ IMG_UINT64 ui64KernelMemUsage; /*!< Kernel memory usage (bytes) */
+ IMG_UINT64 ui64GraphicsMemUsage; /*!< GPU memory usage (bytes) */
+ } sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS];
+ } sMemUsageStats;
+} RGX_HWPERF_HOST_INFO_DETAIL;
+
+/*! RGX_HWPERF_HOST_INFO_DATA. Host Info data event payload contains device
+ * memory usage information.
+ */
+typedef struct
+{
+ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */
+ RGX_HWPERF_INFO_EV eEvType; /*!< Type of subevent. See RGX_HWPERF_INFO_EV */
+ RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail;
+ /*!< Union of structures providing further data
+ regarding memory usage. Size varies with union
+ member that is present, check ``eEvType``
+ value to decode */
+} RGX_HWPERF_HOST_INFO_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+ "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+/*! FENCE_WAIT_TYPE definitions */
+typedef enum
+{
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0, /*!< Begin */
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END, /*!< End */
+
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST, /*!< Do not use */
+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE;
+
+/*! FENCE_WAIT_RESULT definitions */
+typedef enum
+{
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0, /*!< Invalid */
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT, /*!< Timed Out */
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED, /*!< Passed */
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR, /*!< Errored */
+
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST, /*!< Do not use */
+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT;
+
+/*! FENCE_WAIT_DETAIL Event Payload */
+typedef union
+{
+/*! Data for SYNC_FENCE_WAIT_TYPE_BEGIN */
+ struct
+ {
+ IMG_UINT32 ui32TimeoutInMs; /*!< Wait timeout (ms) */
+ } sBegin;
+
+ /*! Data for SYNC_FENCE_WAIT_TYPE_END */
+ struct
+ {
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult; /*!< Wait result */
+ } sEnd;
+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL;
+
+/*! RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA Event Payload. This data structure
+ * is received whenever the host driver handles a wait for sync event request.
+ */
+typedef struct
+{
+ IMG_PID uiPID; /*!< Identifier of the owning process */
+ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType;
+ /*!< Type of the subevent, see
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE */
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail;
+ /*!< Union of structures providing further data
+ regarding device's status. Size of data varies with
+ union member that is present, check ``eType`` value
+ to decode */
+
+} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA;
+
+static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+ "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+/*! RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA.
+ * Software Timeline Advanced Event Payload. This data structure is received
+ * whenever the host driver processes a Software Timeline Advanced event.
+ */
+typedef struct
+{
+ IMG_PID uiPID; /*!< Identifier of the owning process */
+ PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */
+ IMG_UINT64 ui64SyncPtIndex; /*!< Index of the sync point to which the
+ timeline has advanced */
+
+} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA;
+
+static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+ "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_INVALID = 0, /*!< Invalid */
+ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME, /*!< Process Name */
+
+ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_LAST, /*!< Do not use */
+} RGX_HWPERF_HOST_CLIENT_INFO_TYPE;
+
+typedef struct
+{
+ IMG_PID uiClientPID; /*!< Client process identifier */
+ IMG_UINT32 ui32Length; /*!< Number of bytes present in ``acName`` */
+ IMG_CHAR acName[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Process name string, null terminated */
+} RGX_HWPERF_HOST_CLIENT_PROC_NAME;
+
+#define RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen) \
+ ((IMG_UINT32)(offsetof(RGX_HWPERF_HOST_CLIENT_PROC_NAME, acName) + (ui32NameLen)))
+
+typedef union
+{
+ struct
+ {
+ IMG_UINT32 ui32Count; /*!< Number of elements in ``asProcNames`` */
+ RGX_HWPERF_HOST_CLIENT_PROC_NAME asProcNames[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+ } sProcName;
+} RGX_HWPERF_HOST_CLIENT_INFO_DETAIL;
+
+typedef struct
+{
+ IMG_UINT32 uiReserved1; /*!< Reserved. Align structure size to 8 bytes */
+ RGX_HWPERF_HOST_CLIENT_INFO_TYPE eType;
+ /*!< Type of the subevent, see
+ RGX_HWPERF_HOST_CLIENT_INFO_TYPE */
+ RGX_HWPERF_HOST_CLIENT_INFO_DETAIL uDetail;
+ /*!< Union of structures. Size of data
+ varies with union member that is present,
+ check ``eType`` value to decode */
+
+} RGX_HWPERF_HOST_CLIENT_INFO_DATA;
+
+static_assert((sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U,
+ "sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_NONE,
+ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_DEFAULT_FRAMEBUFFER,
+ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_OFFSCREEN_FB_ATTACHMENTS,
+ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_TILE_LIFETIME_DATA,
+
+ RGX_HWPERF_RESOURCE_TYPE_COUNT
+} RGX_HWPERF_RESOURCE_CAPTURE_TYPE;
+
+typedef struct
+{
+ IMG_UINT32 ui32Height;
+ IMG_UINT32 ui32Width;
+ IMG_UINT32 ui32BPP;
+ IMG_UINT32 ui32PixFormat;
+} RGX_RESOURCE_PER_SURFACE_INFO, *PRGX_RESOURCE_PER_SURFACE_INFO;
+
+typedef struct
+{
+ IMG_INT32 i32XOffset; /*!< render surface X shift */
+ IMG_INT32 i32YOffset; /*!< render surface Y shift */
+ IMG_UINT32 ui32WidthInTiles; /*!< number of TLT data points in X */
+ IMG_UINT32 ui32HeightInTiles; /*!< number of TLT data points in Y */
+} RGX_RESOURCE_PER_TLT_BUFFER_INFO, *PRGX_RESOURCE_PER_TLT_BUFFER_INFO;
+
+typedef union
+{
+ struct RGX_RESOURCE_CAPTURE_RENDER_SURFACES
+ {
+ IMG_UINT32 ui32RenderSurfaceCount;
+ RGX_RESOURCE_PER_SURFACE_INFO sSurface[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+ } sRenderSurfaces;
+
+ struct RGX_RESOURCE_CAPTURE_TILE_LIFETIME_BUFFERS
+ {
+ RGX_RESOURCE_PER_TLT_BUFFER_INFO sTLTBufInfo[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+ } sTLTBuffers;
+} RGX_RESOURCE_CAPTURE_DETAIL;
+
+typedef struct
+{
+ RGX_HWPERF_RESOURCE_CAPTURE_TYPE eType;
+ IMG_PID uPID;
+ IMG_UINT32 ui32ContextID;
+ IMG_UINT32 ui32FrameNum;
+ IMG_UINT32 ui32CapturedTaskJobRef; /* The job ref of the HW task that emitted the data */
+ IMG_INT32 eClientModule; /* RGX_HWPERF_CLIENT_API - ID that the capture is originating from. */
+ RGX_RESOURCE_CAPTURE_DETAIL uDetail; /* eType determines the value of the union */
+} RGX_RESOURCE_CAPTURE_INFO, *PRGX_RESOURCE_CAPTURE_INFO;
+
+#define RGX_RESOURCE_CAPTURE_INFO_BASE_SIZE() offsetof(RGX_RESOURCE_CAPTURE_INFO, uDetail)
+
+/*! Tile Lifetime Tracking header size. Only available if
+ * RGX_FEATURE_ISP_TILE_LIFETIME_TRACKING is present and enabled via
+ * SUPPORT_TLT_PERF
+ */
+#define RGX_TLT_HARDWARE_HDR_SIZE (16U)
+
+/* PVRSRVGetHWPerfResourceCaptureResult */
+typedef enum
+{
+ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NONE = 0,
+ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK, /* We got data ok, expect more packets for this request. */
+ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NOT_READY, /* Signals a timeout on the connection - no data available yet. */
+ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_SUCCESS, /* The request completed successfully, signals the end of packets for the request. */
+ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_FAILURE /* The request failed, signals the end of packets for the request. */
+} RGX_HWPERF_RESOURCE_CAPTURE_RESULT_STATUS;
+
+typedef struct
+{
+ IMG_PID uPID; /* In case of a failed request pass the caller the PID and context ID. */
+ IMG_UINT32 ui32CtxID;
+ RGX_RESOURCE_CAPTURE_INFO *psInfo; /* Various meta-data regarding the captured resource which aid the requester when,
+ unpacking the resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */
+ IMG_BYTE *pbData; /* Buffer containing the captured resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */
+} RGX_RESOURCE_CAPTURE_RESULT;
+
+/*! This type is a union of packet payload data structures associated with
+ * various FW and Host events */
+typedef union
+{
+ RGX_HWPERF_FW_DATA sFW; /*!< Firmware event packet data,
+ events ``0x01-0x06`` */
+ RGX_HWPERF_HW_DATA sHW; /*!< Hardware event packet data,
+ events ``0x07-0x19``, ``0x28-0x29``
+ See RGX_HWPERF_HW_DATA */
+ RGX_HWPERF_CLKS_CHG_DATA sCLKSCHG; /*!< Clock change event packet
+ data, events ``0x1A`` */
+ RGX_HWPERF_GPU_STATE_CHG_DATA sGPUSTATECHG; /*!< GPU utilisation state
+ change event packet data,
+ events ``0x1B`` */
+ RGX_HWPERF_PWR_EST_DATA sPWREST; /*!< Power estimate event
+ packet data,
+ events ``0x20-0x22`` */
+ RGX_HWPERF_PWR_CHG_DATA sPWR; /*!< Power event packet data,
+ events ``0x23`` */
+ RGX_HWPERF_CSW_DATA sCSW; /*!< Context switch packet data,
+ events ``0x30-0x31`` */
+ RGX_HWPERF_DVFS_DATA sDVFS; /*!< DVFS activity data,
+ events ``0x32`` */
+ RGX_HWPERF_UFO_DATA sUFO; /*!< UFO data, events ``0x38`` */
+ RGX_HWPERF_FWACT_DATA sFWACT; /*!< Firmware activity event
+ packet data,
+ events ``0x39`` */
+ /* */
+ RGX_HWPERF_HOST_ENQ_DATA sENQ; /*!< Host ENQ data,
+ events ``0x01`` (Host) */
+ RGX_HWPERF_HOST_UFO_DATA sHUFO; /*!< Host UFO data,
+ events ``0x02`` (Host) */
+ RGX_HWPERF_HOST_ALLOC_DATA sHALLOC; /*!< Host Alloc data,
+ events ``0x03`` (Host) */
+ RGX_HWPERF_HOST_CLK_SYNC_DATA sHCLKSYNC; /*!< Host CLK_SYNC data,
+ events ``0x04`` (Host) */
+ RGX_HWPERF_HOST_FREE_DATA sHFREE; /*!< Host Free data,
+ events ``0x05`` (Host) */
+ RGX_HWPERF_HOST_MODIFY_DATA sHMOD; /*!< Host Modify data,
+ events ``0x06`` (Host) */
+ RGX_HWPERF_HOST_DEV_INFO_DATA sHDEVINFO; /*!< Host device info data,
+ events ``0x07`` (Host) */
+ RGX_HWPERF_HOST_INFO_DATA sHINFO; /*!< Host info data,
+ events ``0x08`` (Host) */
+ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT; /*!< Host fence-wait data,
+ events ``0x09`` (Host) */
+ RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance
+ data, events ``0x0A`` (Host) */
+ RGX_HWPERF_HOST_CLIENT_INFO_DATA sHClientInfo; /*!< Host client info,
+ events ``0x0B`` (Host) */
+} RGX_HWPERF_V2_PACKET_DATA, *RGX_PHWPERF_V2_PACKET_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA);
+
+#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR))))
+
+#define RGX_HWPERF_GET_DVFS_EVENT_TYPE_PTR(_packet_addr) \
+ ((RGX_HWPERF_DVFS_EV*) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR) + offsetof(RGX_HWPERF_DVFS_DATA,eEventType))))
+
+/******************************************************************************
+ * API Types
+ *****************************************************************************/
+
+/*! Counter block IDs for all the hardware blocks with counters.
+ * Directly addressable blocks must have a value between 0..15 [0..0xF].
+ * Indirect groups have following encoding:
+ * First hex digit (LSB) represents a unit number within the group
+ * and the second hex digit represents the group number.
+ * Group 0 is the direct group, all others are indirect groups.
+ */
+typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID;
+
+/*! Directly addressable non bank-switched counter blocks */
+#define RGX_CNTBLK_ID_JONES 0x0000U
+#define RGX_CNTBLK_ID_SLC 0x0001U /*!< SLC-specific counter control */
+#define RGX_CNTBLK_ID_FBCDC 0x0002U
+#define RGX_CNTBLK_ID_FW_CUSTOM 0x0003U /*!< Custom FW provided counters */
+
+/*! Directly addressable SLC counter blocks - presence depends on GPU. */
+#define RGX_CNTBLK_ID_SLCBANK0 0x0004U /*!< SLCBANK0 counter control */
+#define RGX_CNTBLK_ID_SLCBANK1 0x0005U /*!< SLCBANK1 counter control */
+#define RGX_CNTBLK_ID_SLCBANK2 0x0006U /*!< SLCBANK2 counter control */
+#define RGX_CNTBLK_ID_SLCBANK3 0x0007U /*!< SLCBANK3 counter control */
+#define RGX_CNTBLK_ID_SLCBANK_ALL 0x4004U /*!< SLC ALL block ID */
+
+#define RGX_CNTBLK_ID_DIRECT_LAST 0x0008U /*!< Indirect blocks start from here */
+
+/*! Indirectly addressable counter blocks */
+#define RGX_CNTBLK_ID_ISP0 0x0010U /*!< ISP 1..N ISP */
+#define RGX_CNTBLK_ID_ISP1 0x0011U
+#define RGX_CNTBLK_ID_ISP2 0x0012U
+#define RGX_CNTBLK_ID_ISP3 0x0013U
+#define RGX_CNTBLK_ID_ISP4 0x0014U
+#define RGX_CNTBLK_ID_ISP5 0x0015U
+#define RGX_CNTBLK_ID_ISP6 0x0016U
+#define RGX_CNTBLK_ID_ISP7 0x0017U
+#define RGX_CNTBLK_ID_ISP_ALL 0x4010U
+
+#define RGX_CNTBLK_ID_MERCER0 0x0020U /*!< MERCER 1..N MERCER */
+#define RGX_CNTBLK_ID_MERCER1 0x0021U
+#define RGX_CNTBLK_ID_MERCER2 0x0022U
+#define RGX_CNTBLK_ID_MERCER3 0x0023U
+#define RGX_CNTBLK_ID_MERCER4 0x0024U
+#define RGX_CNTBLK_ID_MERCER5 0x0025U
+#define RGX_CNTBLK_ID_MERCER6 0x0026U
+#define RGX_CNTBLK_ID_MERCER7 0x0027U
+#define RGX_CNTBLK_ID_MERCER_ALL 0x4020U
+
+#define RGX_CNTBLK_ID_PBE0 0x0030U /*!< PBE 1..N PBE_PER_SPU x N SPU */
+#define RGX_CNTBLK_ID_PBE1 0x0031U
+#define RGX_CNTBLK_ID_PBE2 0x0032U
+#define RGX_CNTBLK_ID_PBE3 0x0033U
+#define RGX_CNTBLK_ID_PBE4 0x0034U
+#define RGX_CNTBLK_ID_PBE5 0x0035U
+#define RGX_CNTBLK_ID_PBE6 0x0036U
+#define RGX_CNTBLK_ID_PBE7 0x0037U
+#define RGX_CNTBLK_ID_PBE_ALL 0x4030U
+
+#define RGX_CNTBLK_ID_PBE_SHARED0 0x0040U /*!< PBE_SHARED 1..N SPU */
+#define RGX_CNTBLK_ID_PBE_SHARED1 0x0041U
+#define RGX_CNTBLK_ID_PBE_SHARED2 0x0042U
+#define RGX_CNTBLK_ID_PBE_SHARED3 0x0043U
+#define RGX_CNTBLK_ID_PBE_SHARED_ALL 0x4040U
+
+#define RGX_CNTBLK_ID_USC0 0x0050U /*!< USC 1..N USC */
+#define RGX_CNTBLK_ID_USC1 0x0051U
+#define RGX_CNTBLK_ID_USC2 0x0052U
+#define RGX_CNTBLK_ID_USC3 0x0053U
+#define RGX_CNTBLK_ID_USC4 0x0054U
+#define RGX_CNTBLK_ID_USC5 0x0055U
+#define RGX_CNTBLK_ID_USC6 0x0056U
+#define RGX_CNTBLK_ID_USC7 0x0057U
+#define RGX_CNTBLK_ID_USC_ALL 0x4050U
+
+#define RGX_CNTBLK_ID_TPU0 0x0060U /*!< TPU 1..N TPU */
+#define RGX_CNTBLK_ID_TPU1 0x0061U
+#define RGX_CNTBLK_ID_TPU2 0x0062U
+#define RGX_CNTBLK_ID_TPU3 0x0063U
+#define RGX_CNTBLK_ID_TPU4 0x0064U
+#define RGX_CNTBLK_ID_TPU5 0x0065U
+#define RGX_CNTBLK_ID_TPU6 0x0066U
+#define RGX_CNTBLK_ID_TPU7 0x0067U
+#define RGX_CNTBLK_ID_TPU_ALL 0x4060U
+
+#define RGX_CNTBLK_ID_SWIFT0 0x0070U /*!< SWIFT 1..N SWIFT */
+#define RGX_CNTBLK_ID_SWIFT1 0x0071U
+#define RGX_CNTBLK_ID_SWIFT2 0x0072U
+#define RGX_CNTBLK_ID_SWIFT3 0x0073U
+#define RGX_CNTBLK_ID_SWIFT4 0x0074U
+#define RGX_CNTBLK_ID_SWIFT5 0x0075U
+#define RGX_CNTBLK_ID_SWIFT6 0x0076U
+#define RGX_CNTBLK_ID_SWIFT7 0x0077U
+#define RGX_CNTBLK_ID_SWIFT_ALL 0x4070U
+
+#define RGX_CNTBLK_ID_TEXAS0 0x0080U /*!< TEXAS 1..N TEXAS */
+#define RGX_CNTBLK_ID_TEXAS1 0x0081U
+#define RGX_CNTBLK_ID_TEXAS2 0x0082U
+#define RGX_CNTBLK_ID_TEXAS3 0x0083U
+#define RGX_CNTBLK_ID_TEXAS_ALL 0x4080U
+
+#define RGX_CNTBLK_ID_RAC0 0x0090U /*!< RAC 1..N RAC */
+#define RGX_CNTBLK_ID_RAC1 0x0091U
+#define RGX_CNTBLK_ID_RAC2 0x0092U
+#define RGX_CNTBLK_ID_RAC3 0x0093U
+#define RGX_CNTBLK_ID_RAC_ALL 0x4090U
+
+#define RGX_CNTBLK_ID_LAST 0x0094U /*!< End of RAC block */
+
+/*! Masks for the counter block ID*/
+#define RGX_CNTBLK_ID_UNIT_MASK (0x000FU) /*!< Unit within group */
+#define RGX_CNTBLK_ID_GROUP_MASK (0x00F0U) /*!< Group value */
+#define RGX_CNTBLK_ID_GROUP_SHIFT (4U)
+#define RGX_CNTBLK_ID_MC_GPU_MASK (0x0F00U) /*!< GPU ID for MC use */
+#define RGX_CNTBLK_ID_MC_GPU_SHIFT (8U)
+#define RGX_CNTBLK_ID_UNIT_ALL_MASK (0x4000U) /*!< Program all units within a group */
+
+static_assert(
+ ((RGX_CNTBLK_ID_DIRECT_LAST + ((RGX_CNTBLK_ID_LAST & RGX_CNTBLK_ID_GROUP_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT)) <= RGX_HWPERF_MAX_BVNC_BLOCK_LEN),
+ "RGX_HWPERF_MAX_BVNC_BLOCK_LEN insufficient");
+
+#define RGX_HWPERF_EVENT_MASK_VALUE(e) (IMG_UINT64_C(1) << (e))
+
+/* When adding new counters here, make sure changes are made to rgxfw_hwperf_fwblk_valid() as well */
+#define RGX_CUSTOM_FW_CNTRS \
+ X(TA_LOCAL_FL_SIZE, 0x0, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \
+ \
+ X(TA_GLOBAL_FL_SIZE, 0x1, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \
+ \
+ X(3D_LOCAL_FL_SIZE, 0x2, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \
+ \
+ X(3D_GLOBAL_FL_SIZE, 0x3, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \
+ \
+ X(ISP_TILES_IN_FLIGHT, 0x4, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DSPMKICK))
+
+/*! Counter IDs for the firmware held statistics */
+typedef enum
+{
+#define X(ctr, id, allow_mask) RGX_CUSTOM_FW_CNTR_##ctr = id,
+ RGX_CUSTOM_FW_CNTRS
+#undef X
+
+ /* always the last entry in the list */
+ RGX_CUSTOM_FW_CNTR_LAST
+} RGX_HWPERF_CUSTOM_FW_CNTR_ID;
+
+/*! Identifier for each counter in a performance counting module */
+typedef IMG_UINT32 RGX_HWPERF_CNTBLK_COUNTER_ID;
+
+/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */
+#define MASK_RANGE_IMPL(b1, b2) ((IMG_UINT64)((IMG_UINT64_C(1) << ((IMG_UINT32)(b2)-(IMG_UINT32)(b1) + 1U)) - 1U) << (IMG_UINT32)b1)
+#define MASK_RANGE(R) MASK_RANGE_IMPL(R##_FIRST_TYPE, R##_LAST_TYPE)
+#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) (IMG_UINT32_C(1) << (e))
+
+/*! Mask macros for use with RGXCtrlHWPerf() API.
+ */
+#define RGX_HWPERF_EVENT_MASK_NONE (IMG_UINT64_C(0x0000000000000000))
+#define RGX_HWPERF_EVENT_MASK_DEFAULT RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_FWACT) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG)
+#define RGX_HWPERF_EVENT_MASK_ALL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+
+/*! HWPerf Firmware event masks
+ * @par
+ * All FW Start/End/Debug (SED) events. */
+#define RGX_HWPERF_EVENT_MASK_FW_SED (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE))
+
+#define RGX_HWPERF_EVENT_MASK_FW_UFO (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO))
+#define RGX_HWPERF_EVENT_MASK_FW_CSW (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED))
+/*! All FW events. */
+#define RGX_HWPERF_EVENT_MASK_ALL_FW (RGX_HWPERF_EVENT_MASK_FW_SED |\
+ RGX_HWPERF_EVENT_MASK_FW_UFO |\
+ RGX_HWPERF_EVENT_MASK_FW_CSW)
+
+/*! HW Periodic events (1ms interval). */
+#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC))
+/*! All HW Kick/Finish events. */
+#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\
+ MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\
+ ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC))
+
+#define RGX_HWPERF_EVENT_MASK_ALL_HW (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\
+ RGX_HWPERF_EVENT_MASK_HW_PERIODIC)
+
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE))
+
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG))
+
+/*! HWPerf Host event masks
+ */
+#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ))
+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO))
+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC))
+
+
+/*! Type used in the RGX API RGXConfigHWPerfCounters() */
+typedef struct
+{
+ /*! Reserved for future use */
+ IMG_UINT32 ui32Reserved;
+
+ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */
+ IMG_UINT16 ui16BlockID;
+
+ /*! Number of configured counters within this block */
+ IMG_UINT16 ui16NumCounters;
+
+ /*! Counter register values */
+ IMG_UINT16 ui16Counters[RGX_CNTBLK_COUNTERS_MAX];
+
+} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK);
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* RGX_HWPERF_H_ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title HWPerf counter table header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Utility functions used internally for HWPerf data retrieval
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGX_HWPERF_TABLE_H
+#define RGX_HWPERF_TABLE_H
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "rgx_fwif_hwperf.h"
+
+/*****************************************************************************/
+
+/* Forward declaration */
+typedef struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ RGXFW_HWPERF_CNTBLK_TYPE_MODEL;
+
+/* Function pointer type for functions to check dynamic power state of
+ * counter block instance. Used only in firmware. */
+typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_POWERED)(
+ RGX_HWPERF_CNTBLK_ID eBlkType,
+ IMG_UINT8 ui8UnitId);
+
+/* Counter block run-time info */
+typedef struct
+{
+ IMG_UINT32 uiNumUnits; /* Number of instances of this block type in the core */
+} RGX_HWPERF_CNTBLK_RT_INFO;
+
+/* Function pointer type for functions to check block is valid and present
+ * on that RGX Device at runtime. It may have compile logic or run-time
+ * logic depending on where the code executes: server, srvinit or firmware.
+ * Values in the psRtInfo output parameter are only valid if true returned.
+ */
+typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_PRESENT)(
+ const struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_* psBlkTypeDesc,
+ const void *pvDev_km,
+ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo);
+
+/* This structure encodes properties of a type of performance counter block.
+ * The structure is sometimes referred to as a block type descriptor. These
+ * properties contained in this structure represent the columns in the block
+ * type model table variable below. These values vary depending on the build
+ * BVNC and core type.
+ * Each direct block has a unique type descriptor and each indirect group has
+ * a type descriptor.
+ */
+struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_
+{
+ IMG_UINT32 uiCntBlkIdBase; /* The starting block id for this block type */
+ IMG_UINT32 uiIndirectReg; /* 0 if direct type otherwise the indirect register value to select indirect unit */
+ IMG_UINT32 uiNumUnits; /* Number of instances of this block type in the core (compile time use) */
+ const IMG_CHAR *pszBlockNameComment; /* Name of the PERF register. Used while dumping the perf counters to pdumps */
+ PFN_RGXFW_HWPERF_CNTBLK_POWERED pfnIsBlkPowered; /* A function to determine dynamic power state for the block type */
+ PFN_RGXFW_HWPERF_CNTBLK_PRESENT pfnIsBlkPresent; /* A function to determine presence on RGX Device at run-time */
+ IMG_UINT16 *pszBlkCfgValid; /* Array of supported counters per block type */
+};
+
+/*****************************************************************************/
+
+/* Shared compile-time context ASSERT macro */
+#if defined(RGX_FIRMWARE)
+/* firmware context */
+# define DBG_ASSERT(_c) RGXFW_ASSERT((_c))
+#else
+/* host client/server context */
+# define DBG_ASSERT(_c) PVR_ASSERT((_c))
+#endif
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered()
+
+ Referenced in gasCntBlkTypeModel[] table below and only called from
+ RGX_FIRMWARE run-time context. Therefore compile time configuration is used.
+ *****************************************************************************/
+
+#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS)
+# include "rgxfw_pow.h"
+# include "rgxfw_utils.h"
+
+static inline IMG_BOOL rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId);
+static inline IMG_BOOL rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+ PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+
+ switch (eBlkType)
+ {
+ case RGX_CNTBLK_ID_JONES:
+ case RGX_CNTBLK_ID_SLC:
+ case RGX_CNTBLK_ID_SLCBANK0:
+ case RGX_CNTBLK_ID_FBCDC:
+ case RGX_CNTBLK_ID_FW_CUSTOM:
+ return IMG_TRUE;
+
+#if !defined(RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE)
+ case RGX_CNTBLK_ID_SLCBANK1:
+ if (RGX_FEATURE_NUM_MEMBUS > 1U)
+ {
+ return IMG_TRUE;
+ }
+ else
+ {
+ return IMG_FALSE;
+ }
+
+ case RGX_CNTBLK_ID_SLCBANK2:
+ case RGX_CNTBLK_ID_SLCBANK3:
+ if (RGX_FEATURE_NUM_MEMBUS > 2U)
+ {
+ return IMG_TRUE;
+ }
+ else
+ {
+ return IMG_FALSE;
+ }
+#endif /* !defined(RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE) */
+
+ default:
+ return IMG_FALSE;
+ }
+}
+
+/* Only use conditional compilation when counter blocks appear in different
+ * islands for different Rogue families.
+ */
+static inline IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId);
+static inline IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+ PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+
+ IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_units();
+
+ // We don't have any Dusts Enabled until first DC opens the GPU. This makes
+ // setting the PDump HWPerf trace buffers very difficult.
+ // To work around this we special-case some of the 'have to be there'
+ // indirect registers (e.g., TPU0)
+
+ switch (eBlkType)
+ {
+ case RGX_CNTBLK_ID_TPU0:
+ return IMG_TRUE;
+ /*NOTREACHED*/
+ break;
+ default:
+ if (((gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U) &&
+ (ui32NumDustsEnabled > 0U))
+ {
+ return IMG_TRUE;
+ }
+ else
+ {
+ return IMG_FALSE;
+ }
+ /*NOTREACHED*/
+ break;
+ }
+ return IMG_TRUE;
+}
+
+#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */
+
+# define rgxfw_hwperf_pow_st_direct ((void *)NULL)
+# define rgxfw_hwperf_pow_st_indirect ((void *)NULL)
+
+#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end
+ *****************************************************************************/
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start
+
+ Referenced in gasCntBlkTypeModel[] table below and called from all build
+ contexts:
+ RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server).
+
+ Therefore each function has two implementations, one for compile time and one
+ run time configuration depending on the context. The functions will inform the
+ caller whether this block is valid for this particular RGX device. Other
+ run-time dependent data is returned in psRtInfo for the caller to use.
+ *****************************************************************************/
+
+
+/* Used for all block types: Direct and Indirect */
+static inline IMG_BOOL rgx_hwperf_blk_present(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+#if defined(__KERNEL__) /* Server context -- Run-time Only */
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ PVRSRV_DEVICE_NODE *psNode;
+ IMG_UINT32 ui32MaxTPUPerSPU;
+ IMG_UINT32 ui32NumMemBus;
+ IMG_UINT32 ui32RTArchVal;
+
+ DBG_ASSERT(psDevInfo != NULL);
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+
+ if (((psDevInfo == NULL) || (psBlkTypeDesc == NULL)) || (psRtInfo == NULL))
+ {
+ return IMG_FALSE;
+ }
+
+ psNode = psDevInfo->psDeviceNode;
+ DBG_ASSERT(psNode != NULL);
+
+ if (psNode == NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ ui32MaxTPUPerSPU =
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, MAX_TPU_PER_SPU);
+
+ if (PVRSRV_IS_FEATURE_SUPPORTED(psNode, CATURIX_TOP_INFRASTRUCTURE))
+ {
+ ui32NumMemBus = 1U;
+ }
+ else
+ {
+ ui32NumMemBus =
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_MEMBUS);
+ }
+
+ ui32RTArchVal =
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, RAY_TRACING_ARCH);
+
+ switch (psBlkTypeDesc->uiCntBlkIdBase)
+ {
+ case RGX_CNTBLK_ID_JONES:
+ case RGX_CNTBLK_ID_SLC:
+ case RGX_CNTBLK_ID_SLCBANK0:
+ case RGX_CNTBLK_ID_FBCDC:
+ case RGX_CNTBLK_ID_FW_CUSTOM:
+ psRtInfo->uiNumUnits = 1;
+ break;
+
+#if !defined(RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE)
+ case RGX_CNTBLK_ID_SLCBANK1:
+ if (ui32NumMemBus >= 2U)
+ {
+ psRtInfo->uiNumUnits = 1;
+ }
+ else
+ {
+ psRtInfo->uiNumUnits = 0;
+ }
+ break;
+
+ case RGX_CNTBLK_ID_SLCBANK2:
+ case RGX_CNTBLK_ID_SLCBANK3:
+ if (ui32NumMemBus > 2U)
+ {
+ psRtInfo->uiNumUnits = 1;
+ }
+ else
+ {
+ psRtInfo->uiNumUnits = 0;
+ }
+ break;
+#endif /* !defined(RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE) */
+
+ case RGX_CNTBLK_ID_TPU0:
+ case RGX_CNTBLK_ID_SWIFT0:
+ psRtInfo->uiNumUnits =
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU);
+ psRtInfo->uiNumUnits *= ui32MaxTPUPerSPU;
+ break;
+
+ case RGX_CNTBLK_ID_TEXAS0:
+ case RGX_CNTBLK_ID_PBE_SHARED0:
+
+ psRtInfo->uiNumUnits =
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU);
+ break;
+
+ case RGX_CNTBLK_ID_RAC0:
+ if (ui32RTArchVal > 2U)
+ {
+ psRtInfo->uiNumUnits =
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, SPU0_RAC_PRESENT) +
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, SPU1_RAC_PRESENT) +
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, SPU2_RAC_PRESENT) +
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, SPU3_RAC_PRESENT);
+ }
+ else
+ {
+ psRtInfo->uiNumUnits = 0;
+ }
+ break;
+
+ case RGX_CNTBLK_ID_USC0:
+ case RGX_CNTBLK_ID_MERCER0:
+ psRtInfo->uiNumUnits =
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_CLUSTERS);
+ break;
+
+ case RGX_CNTBLK_ID_PBE0:
+
+ psRtInfo->uiNumUnits =
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, PBE_PER_SPU);
+ psRtInfo->uiNumUnits *=
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU);
+ break;
+
+ case RGX_CNTBLK_ID_ISP0:
+
+ psRtInfo->uiNumUnits =
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_ISP_PER_SPU);
+ /* Adjust by NUM_SPU */
+
+ psRtInfo->uiNumUnits *=
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU);
+ break;
+
+ default:
+ return IMG_FALSE;
+ }
+ /* Verify that we have at least one unit present */
+ if (psRtInfo->uiNumUnits > 0U)
+ {
+ return IMG_TRUE;
+ }
+ else
+ {
+ return IMG_FALSE;
+ }
+#else /* FW context -- Compile-time only */
+ IMG_UINT32 ui32NumMemBus;
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+
+ if (unlikely(psBlkTypeDesc == NULL))
+ {
+ return IMG_FALSE;
+ }
+
+#if !defined(RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE)
+ ui32NumMemBus = RGX_FEATURE_NUM_MEMBUS;
+#else
+ ui32NumMemBus = 1U;
+#endif /* !defined(RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE) */
+
+ switch (psBlkTypeDesc->uiCntBlkIdBase)
+ {
+ /* Handle the dynamic-sized SLC blocks which are only present if
+ * RGX_FEATURE_NUM_MEMBUS is appropriately set.
+ */
+ case RGX_CNTBLK_ID_SLCBANK1:
+ if (ui32NumMemBus >= 2U)
+ {
+ psRtInfo->uiNumUnits = 1;
+ }
+ else
+ {
+ psRtInfo->uiNumUnits = 0;
+ }
+ break;
+
+ case RGX_CNTBLK_ID_SLCBANK2:
+ case RGX_CNTBLK_ID_SLCBANK3:
+ if (ui32NumMemBus > 2U)
+ {
+ psRtInfo->uiNumUnits = 1;
+ }
+ else
+ {
+ psRtInfo->uiNumUnits = 0;
+ }
+ break;
+
+ default:
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+ break;
+ }
+ if (psRtInfo->uiNumUnits > 0U)
+ {
+ return IMG_TRUE;
+ }
+ else
+ {
+ return IMG_FALSE;
+ }
+#endif /* defined(__KERNEL__) */
+}
+
+#if !defined(__KERNEL__) /* Firmware or User-mode context */
+
+/* Used to instantiate a null row in the block type model table below where the
+ * block is not supported for a given build BVNC in firmware/user mode context.
+ * This is needed as the blockid to block type lookup uses the table as well
+ * and clients may try to access blocks not in the hardware. */
+#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) X(_blkid, 0, 0, #_blkid, NULL, NULL, NULL)
+
+#endif
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end
+ *****************************************************************************/
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table
+
+ This table holds the entries for the performance counter block type model.
+ Where the block is not present on an RGX device in question the
+ pfnIsBlkPresent() returns false, if valid and present it returns true.
+ Columns in the table with a ** indicate the value is a default and the value
+ returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent()should
+ be used at runtime by the caller. These columns are only valid for compile
+ time BVNC configured contexts.
+
+ Order of table rows must match order of counter block IDs in the enumeration
+ RGX_HWPERF_CNTBLK_ID.
+
+ Table contains Xmacro styled entries. Each includer of this file must define
+ a gasCntBlkTypeModel[] structure which is local to itself. Only the layout is
+ defined here.
+
+ uiCntBlkIdBase : Block-ID
+ uiIndirectReg : 0 => Direct, non-zero => INDIRECT register address
+ uiNumUnits : Number of units present on the GPU
+ pszBlockNameComment : Name of the Performance Block
+ pfnIsBlkPowered : Function to determine power state of block
+ pfnIsBlkPresent : Function to determine block presence on the core
+ pszBlkCfgValid : Array of counters valid within this block type
+ *****************************************************************************/
+
+ // Furian 8XT V2 layout:
+
+ /* uiCntBlkIdBase, uiIndirectReg, uiNumUnits**, pszBlockNameComment, pfnIsBlkPowered, pfnIsBlkPresent */
+
+ /* RGX_CNTBLK_ID_JONES */
+#if defined(RGX_FIRMWARE) || defined(__KERNEL__)
+
+/* Furian 8XT Direct Performance counter blocks */
+
+#define RGX_CNT_BLK_TYPE_MODEL_DIRECT_LIST \
+ /* uiCntBlkIdBase, uiIndirectReg, uiNumUnits**, pszBlockNameComment, pfnIsBlkPowered, pfnIsBlkPresent */ \
+X(RGX_CNTBLK_ID_JONES, 0, 1, "PERF_BLK_JONES", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiJONES), \
+X(RGX_CNTBLK_ID_SLC, 0, 1, "PERF_BLK_SLC", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC), \
+X(RGX_CNTBLK_ID_FBCDC, 0, 1, "PERF_BLK_FBCDC", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiFBCDC), \
+X(RGX_CNTBLK_ID_FW_CUSTOM, 0, 1, "PERF_BLK_FW_CUSTOM", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiFWCUSTOM), \
+X(RGX_CNTBLK_ID_SLCBANK0, 0, 1, "PERF_BLK_SLC0", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC0), \
+X(RGX_CNTBLK_ID_SLCBANK1, 0, 1, "PERF_BLK_SLC1", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC1), \
+X(RGX_CNTBLK_ID_SLCBANK2, 0, 1, "PERF_BLK_SLC2", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC2), \
+X(RGX_CNTBLK_ID_SLCBANK3, 0, 1, "PERF_BLK_SLC3", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC3)
+
+/* Furian 8XT Indirect Performance counter blocks */
+
+#if !defined(RGX_CR_RAC_INDIRECT)
+#define RGX_CR_RAC_INDIRECT (0x8398U)
+#endif
+
+#define RGX_CNT_BLK_TYPE_MODEL_INDIRECT_LIST \
+ /* uiCntBlkIdBase, uiIndirectReg, uiNumUnits**, pszBlockNameComment, pfnIsBlkPowered, pfnIsBlkPresent */ \
+X(RGX_CNTBLK_ID_ISP0, RGX_CR_ISP_INDIRECT, RGX_HWPERF_NUM_SPU * RGX_HWPERF_NUM_ISP_PER_SPU, "PERF_BLK_ISP", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiISP), \
+X(RGX_CNTBLK_ID_MERCER0, RGX_CR_MERCER_INDIRECT, RGX_HWPERF_NUM_MERCER, "PERF_BLK_MERCER", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiMERCER), \
+X(RGX_CNTBLK_ID_PBE0, RGX_CR_PBE_INDIRECT, RGX_HWPERF_NUM_PBE, "PERF_BLK_PBE", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiPBE), \
+X(RGX_CNTBLK_ID_PBE_SHARED0, RGX_CR_PBE_SHARED_INDIRECT, RGX_HWPERF_NUM_PBE_SHARED, "PERF_BLK_PBE_SHARED", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiPBE_SHARED), \
+X(RGX_CNTBLK_ID_USC0, RGX_CR_USC_INDIRECT, RGX_HWPERF_NUM_USC, "PERF_BLK_USC", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiUSC), \
+X(RGX_CNTBLK_ID_TPU0, RGX_CR_TPU_INDIRECT, RGX_HWPERF_NUM_TPU, "PERF_BLK_TPU", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiTPU), \
+X(RGX_CNTBLK_ID_SWIFT0, RGX_CR_SWIFT_INDIRECT, RGX_HWPERF_NUM_SWIFT, "PERF_BLK_SWIFT", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiSWIFT), \
+X(RGX_CNTBLK_ID_TEXAS0, RGX_CR_TEXAS_INDIRECT, RGX_HWPERF_NUM_TEXAS, "PERF_BLK_TEXAS", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiTEXAS), \
+X(RGX_CNTBLK_ID_RAC0, RGX_CR_RAC_INDIRECT, RGX_HWPERF_NUM_RAC, "PERF_BLK_RAC", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiRAC)
+
+#else /* !defined(RGX_FIRMWARE) && !defined(__KERNEL__) */
+
+#error "RGX_FIRMWARE or __KERNEL__ *MUST* be defined"
+
+#endif /* defined(RGX_FIRMWARE) || defined(__KERNEL__) */
+
+#endif /* RGX_HWPERF_TABLE_H */
+
+/******************************************************************************
+ End of file (rgx_hwperf_table.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX build options
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Each build option listed here is packed into a dword which provides up to
+ * log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM and
+ * (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM.
+ * The corresponding bit is set if the build option was enabled at compile
+ * time.
+ *
+ * IMPORTANT: add new options to unused bits or define a new dword
+ * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield
+ * remains backwards compatible.
+ */
+
+#ifndef RGX_OPTIONS_H
+#define RGX_OPTIONS_H
+
+#define OPTIONS_NO_HARDWARE_EN (0x1UL << 0)
+#define OPTIONS_PDUMP_EN (0x1UL << 1)
+#define OPTIONS_SECURE_CONTEXT_SWITCH_EN (0x1UL << 2)
+#define OPTIONS_SECURE_ALLOC_KM_EN (0x1UL << 3)
+#define OPTIONS_RGX_EN (0x1UL << 4)
+#define OPTIONS_SECURE_EXPORT_EN (0x1UL << 5)
+#define OPTIONS_INSECURE_EXPORT_EN (0x1UL << 6)
+#define OPTIONS_VFP_EN (0x1UL << 7)
+#define OPTIONS_WORKLOAD_ESTIMATION_EN (0x1UL << 8)
+#define OPTIONS_PDVFS_EN (0x1UL << 9)
+#define OPTIONS_DEBUG_EN (0x1UL << 10)
+#define OPTIONS_BUFFER_SYNC_EN (0x1UL << 11)
+#define OPTIONS_AUTOVZ_EN (0x1UL << 12)
+#define OPTIONS_AUTOVZ_HW_REGS_EN (0x1UL << 13)
+#define OPTIONS_UNUSED2_EN (0x1UL << 14)
+#define OPTIONS_VALIDATION_EN (0x1UL << 15)
+
+#define OPTIONS_PERCONTEXT_FREELIST_EN (0x1UL << 31)
+
+#define RGX_BUILD_OPTIONS_MASK_KM \
+ (OPTIONS_NO_HARDWARE_EN | \
+ OPTIONS_PDUMP_EN | \
+ OPTIONS_SECURE_CONTEXT_SWITCH_EN | \
+ OPTIONS_SECURE_ALLOC_KM_EN | \
+ OPTIONS_RGX_EN | \
+ OPTIONS_SECURE_EXPORT_EN | \
+ OPTIONS_INSECURE_EXPORT_EN | \
+ OPTIONS_VFP_EN | \
+ OPTIONS_WORKLOAD_ESTIMATION_EN | \
+ OPTIONS_PDVFS_EN | \
+ OPTIONS_DEBUG_EN | \
+ OPTIONS_BUFFER_SYNC_EN | \
+ OPTIONS_AUTOVZ_EN | \
+ OPTIONS_AUTOVZ_HW_REGS_EN | \
+ OPTIONS_VALIDATION_EN)
+
+#define RGX_BUILD_OPTIONS_MASK_FW \
+ (RGX_BUILD_OPTIONS_MASK_KM & \
+ ~OPTIONS_BUFFER_SYNC_EN)
+
+/* Build options that the FW must have if the present on the KM */
+#define FW_OPTIONS_STRICT ((RGX_BUILD_OPTIONS_MASK_KM | \
+ OPTIONS_PERCONTEXT_FREELIST_EN) & \
+ ~(OPTIONS_DEBUG_EN | \
+ OPTIONS_WORKLOAD_ESTIMATION_EN | \
+ OPTIONS_PDVFS_EN))
+
+/* Build options that the UM must have if the present on the KM */
+#define UM_OPTIONS_STRICT ((RGX_BUILD_OPTIONS_MASK_KM | \
+ OPTIONS_PERCONTEXT_FREELIST_EN) & \
+ ~(OPTIONS_DEBUG_EN | \
+ OPTIONS_WORKLOAD_ESTIMATION_EN | \
+ OPTIONS_PDVFS_EN))
+
+/* Build options that the KM must have if the present on the UM */
+#define KM_OPTIONS_STRICT ((RGX_BUILD_OPTIONS_MASK_KM | \
+ OPTIONS_PERCONTEXT_FREELIST_EN) & \
+ ~(OPTIONS_DEBUG_EN | \
+ OPTIONS_WORKLOAD_ESTIMATION_EN | \
+ OPTIONS_PDVFS_EN | \
+ OPTIONS_BUFFER_SYNC_EN))
+
+#define NO_HARDWARE_OPTION "NO_HARDWARE "
+#if defined(NO_HARDWARE)
+ #define OPTIONS_BIT0 OPTIONS_NO_HARDWARE_EN
+ #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT0 0x0UL
+#endif /* NO_HARDWARE */
+
+#define PDUMP_OPTION "PDUMP "
+#if defined(PDUMP)
+ #define OPTIONS_BIT1 OPTIONS_PDUMP_EN
+ #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT1 0x0UL
+#endif /* PDUMP */
+
+#define SECURE_CONTEXT_SWITCH_OPTION "SECURE_CONTEXT_SWITCH "
+#if defined(SUPPORT_SECURE_CONTEXT_SWITCH)
+ #define OPTIONS_BIT2 OPTIONS_SECURE_CONTEXT_SWITCH_EN
+ #if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT2 0x0UL
+#endif /* SUPPORT_SECURE_CONTEXT_SWITCH */
+
+#define SECURE_ALLOC_KM_OPTION "SECURE_ALLOC_KM "
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+ #define OPTIONS_BIT3 OPTIONS_SECURE_ALLOC_KM_EN
+ #if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT3 0x0UL
+#endif /* SUPPORT_SECURE_ALLOC_KM */
+
+#define RGX_OPTION " "
+#if defined(SUPPORT_RGX)
+ #define OPTIONS_BIT4 OPTIONS_RGX_EN
+ #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT4 0x0UL
+#endif /* SUPPORT_RGX */
+
+#define SECURE_EXPORT_OPTION "SECURE_EXPORTS "
+#if defined(SUPPORT_SECURE_EXPORT)
+ #define OPTIONS_BIT5 OPTIONS_SECURE_EXPORT_EN
+ #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT5 0x0UL
+#endif /* SUPPORT_SECURE_EXPORT */
+
+#define INSECURE_EXPORT_OPTION "INSECURE_EXPORTS "
+#if defined(SUPPORT_INSECURE_EXPORT)
+ #define OPTIONS_BIT6 OPTIONS_INSECURE_EXPORT_EN
+ #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT6 0x0UL
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+#define VFP_OPTION "VFP "
+#if defined(SUPPORT_VFP)
+ #define OPTIONS_BIT7 OPTIONS_VFP_EN
+ #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT7 0x0UL
+#endif /* SUPPORT_VFP */
+
+#define WORKLOAD_ESTIMATION_OPTION "WORKLOAD_ESTIMATION "
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ #define OPTIONS_BIT8 OPTIONS_WORKLOAD_ESTIMATION_EN
+ #if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT8 0x0UL
+#endif /* SUPPORT_WORKLOAD_ESTIMATION */
+
+#define PDVFS_OPTION "PDVFS "
+#if defined(SUPPORT_PDVFS)
+ #define OPTIONS_BIT9 OPTIONS_PDVFS_EN
+ #if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT9 0x0UL
+#endif /* SUPPORT_PDVFS */
+
+#define DEBUG_OPTION "DEBUG "
+#if defined(DEBUG)
+ #define OPTIONS_BIT10 OPTIONS_DEBUG_EN
+ #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT10 0x0UL
+#endif /* DEBUG */
+
+#define BUFFER_SYNC_OPTION "BUFFER_SYNC "
+#if defined(SUPPORT_BUFFER_SYNC)
+ #define OPTIONS_BIT11 OPTIONS_BUFFER_SYNC_EN
+ #if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT11 0x0UL
+#endif /* SUPPORT_BUFFER_SYNC */
+
+#define AUTOVZ_OPTION "AUTOVZ "
+#if defined(SUPPORT_AUTOVZ)
+ #define OPTIONS_BIT12 OPTIONS_AUTOVZ_EN
+ #if OPTIONS_BIT12 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT12 0x0UL
+#endif /* SUPPORT_AUTOVZ */
+
+#define AUTOVZ_HW_REGS_OPTION "AUTOVZ_HW_REGS "
+#if defined(SUPPORT_AUTOVZ_HW_REGS)
+ #define OPTIONS_BIT13 OPTIONS_AUTOVZ_HW_REGS_EN
+ #if OPTIONS_BIT13 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT13 0x0UL
+#endif /* SUPPORT_AUTOVZ_HW_REGS */
+
+/* UNUSED bit, kept for rogue/volcanic compatibility */
+#define INTERNAL_UNUSED2_OPTION "INTERNAL_UNUSED2 "
+#if defined(INTERNAL_UNUSED2)
+ #define OPTIONS_BIT14 OPTIONS_UNUSED2_EN
+ #if OPTIONS_BIT14 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT14 0x0UL
+#endif
+
+#define VALIDATION_OPTION "VALIDATION "
+#if defined(SUPPORT_VALIDATION)
+ #define OPTIONS_BIT15 OPTIONS_VALIDATION_EN
+ #if OPTIONS_BIT15 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT15 0x0UL
+#endif /* SUPPORT_VALIDATION */
+
+#define OPTIONS_BIT31 OPTIONS_PERCONTEXT_FREELIST_EN
+#if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM
+#error "Bit exceeds reserved range"
+#endif
+
+#define RGX_BUILD_OPTIONS_KM \
+ (OPTIONS_BIT0 |\
+ OPTIONS_BIT1 |\
+ OPTIONS_BIT2 |\
+ OPTIONS_BIT3 |\
+ OPTIONS_BIT4 |\
+ OPTIONS_BIT5 |\
+ OPTIONS_BIT6 |\
+ OPTIONS_BIT7 |\
+ OPTIONS_BIT8 |\
+ OPTIONS_BIT9 |\
+ OPTIONS_BIT10 |\
+ OPTIONS_BIT11 |\
+ OPTIONS_BIT12 |\
+ OPTIONS_BIT13 |\
+ OPTIONS_BIT14 |\
+ OPTIONS_BIT15)
+
+#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31)
+
+#define RGX_BUILD_OPTIONS_LIST \
+ { \
+ NO_HARDWARE_OPTION, \
+ PDUMP_OPTION, \
+ SECURE_CONTEXT_SWITCH_OPTION, \
+ SECURE_ALLOC_KM_OPTION, \
+ RGX_OPTION, \
+ SECURE_EXPORT_OPTION, \
+ INSECURE_EXPORT_OPTION, \
+ VFP_OPTION, \
+ WORKLOAD_ESTIMATION_OPTION, \
+ PDVFS_OPTION, \
+ DEBUG_OPTION, \
+ BUFFER_SYNC_OPTION, \
+ AUTOVZ_OPTION, \
+ AUTOVZ_HW_REGS_OPTION, \
+ INTERNAL_UNUSED2_OPTION, \
+ VALIDATION_OPTION \
+ }
+
+#endif /* RGX_OPTIONS_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX Device virtual memory map
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Memory heaps device specific configuration
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXHEAPCONFIG_H
+#define RGXHEAPCONFIG_H
+
+#include "rgxdefs_km.h"
+
+
+#define RGX_HEAP_SIZE_4KiB IMG_UINT64_C(0x0000001000)
+#define RGX_HEAP_SIZE_64KiB IMG_UINT64_C(0x0000010000)
+#define RGX_HEAP_SIZE_256KiB IMG_UINT64_C(0x0000040000)
+
+#define RGX_HEAP_SIZE_1MiB IMG_UINT64_C(0x0000100000)
+#define RGX_HEAP_SIZE_2MiB IMG_UINT64_C(0x0000200000)
+#define RGX_HEAP_SIZE_4MiB IMG_UINT64_C(0x0000400000)
+#define RGX_HEAP_SIZE_16MiB IMG_UINT64_C(0x0001000000)
+#define RGX_HEAP_SIZE_32MiB IMG_UINT64_C(0x0002000000)
+#define RGX_HEAP_SIZE_256MiB IMG_UINT64_C(0x0010000000)
+
+#define RGX_HEAP_SIZE_1GiB IMG_UINT64_C(0x0040000000)
+#define RGX_HEAP_SIZE_2GiB IMG_UINT64_C(0x0080000000)
+#define RGX_HEAP_SIZE_4GiB IMG_UINT64_C(0x0100000000)
+#define RGX_HEAP_SIZE_16GiB IMG_UINT64_C(0x0400000000)
+#define RGX_HEAP_SIZE_32GiB IMG_UINT64_C(0x0800000000)
+#define RGX_HEAP_SIZE_64GiB IMG_UINT64_C(0x1000000000)
+#define RGX_HEAP_SIZE_128GiB IMG_UINT64_C(0x2000000000)
+#define RGX_HEAP_SIZE_256GiB IMG_UINT64_C(0x4000000000)
+#define RGX_HEAP_SIZE_512GiB IMG_UINT64_C(0x8000000000)
+
+/*
+ RGX Device Virtual Address Space Definitions
+
+ NOTES:
+ Base addresses have to be a multiple of 4MiB
+
+ This file defines the RGX virtual address heaps that are used in
+ application memory contexts. It also shows where the Firmware memory heap
+ fits into this, but the firmware heap is only ever created in the
+ Services KM/server component.
+
+ RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed,
+ on a global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_*
+ respectively. Therefore if clients use multiple configs they must still
+ be consistent with their definitions for these heaps.
+
+ Shared virtual memory (GENERAL_SVM) support requires half of the address
+ space (512 GiB) be reserved for SVM allocations to mirror application CPU
+ addresses.
+
+ The GENERAL non-SVM region is 512 GiB to 768 GiB and is shared between the
+ general (4KiB) heap and the general non-4K heap. The first 128 GiB is used
+ for the GENERAL_HEAP (4KiB) and the last 32 GiB is used for the
+ GENERAL_NON4K_HEAP. This heap has a default page-size of 16K.
+ AppHint PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE can be used to forced it
+ to these values: 4K,64K,256K,1M,2M.
+
+ Heaps must not start at 0x0000000000, as this is reserved for internal
+ use within device memory layer.
+ Range comments, those starting in column 0 below are a section heading of
+ sorts and are above the heaps in that range. Often this is the reserved
+ size of the heap within the range.
+*/
+
+
+/* 0x00_0000_0000 ************************************************************/
+
+/* 0x00_0000_0000 - 0x00_0040_0000 **/
+ /* 0 MiB to 4 MiB, size of 4 MiB : RESERVED **/
+
+/* 0x00_0040_0000 - 0x7F_FFC0_0000 **/
+ /* 4 MiB to 512 GiB, size of 512 GiB less 4 MiB : GENERAL_SVM_HEAP **/
+ #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000400000)
+ #define RGX_GENERAL_SVM_HEAP_SIZE (RGX_HEAP_SIZE_512GiB - RGX_HEAP_SIZE_4MiB)
+
+
+/* 0x80_0000_0000 ************************************************************/
+
+/* 0x80_0000_0000 - 0x9F_FFFF_FFFF **/
+ /* 512 GiB to 640 GiB, size of 128 GiB : GENERAL_HEAP **/
+ #define RGX_GENERAL_HEAP_BASE IMG_UINT64_C(0x8000000000)
+ #define RGX_GENERAL_HEAP_SIZE RGX_HEAP_SIZE_128GiB
+
+/* 0xA0_0000_0000 - 0xAF_FFFF_FFFF **/
+ /* 640 GiB to 704 GiB, size of 64 GiB : FREE **/
+
+/* 0xB0_0000_0000 - 0xB1_FFFF_FFFF **/
+ /* 704 GiB to 720 GiB, size of 16 GiB : RESERVED ROGUE **/
+
+/* B4_0000_0000 - 0xB7_FFFF_FFFF **/
+ /* 720 GiB to 736 GiB, size of 16 GiB : FREE **/
+
+/* 0xB8_0000_0000 - 0xBF_FFFF_FFFF **/
+ /* 736 GiB to 768 GiB, size of 32 GiB : GENERAL_NON4K_HEAP **/
+ #define RGX_GENERAL_NON4K_HEAP_BASE IMG_UINT64_C(0xB800000000)
+ #define RGX_GENERAL_NON4K_HEAP_SIZE RGX_HEAP_SIZE_32GiB
+
+
+/* 0xC0_0000_0000 ************************************************************/
+
+/* 0xC0_0000_0000 - 0xD9_FFFF_FFFF **/
+ /* 768 GiB to 872 GiB, size of 104 GiB : FREE **/
+
+/* 0xDA_0000_0000 - 0xDA_FFFF_FFFF **/
+ /* 872 GiB to 876 GiB, size of 4 GiB : PDSCODEDATA_HEAP **/
+ #define RGX_PDSCODEDATA_HEAP_BASE IMG_UINT64_C(0xDA00000000)
+ #define RGX_PDSCODEDATA_HEAP_SIZE RGX_HEAP_SIZE_4GiB
+
+/* 0xDB_0000_0000 - 0xDC_FFFF_FFFF **/
+ /* 876 GiB to 884 GiB, size of 8 GiB : RESERVED ROGUE **/
+
+/* 0xDD_0000_0000 - 0xDF_FFFF_FFFF **/
+ /* 884 GiB to 896 GiB, size of 12 GiB : FREE **/
+
+ /*
+ * The breakpoint handler code heap must share the same 4GiB address
+ * range as the USC shader code heap. The address space split is
+ * configurable.
+ *
+ * The breakpoint handler register is fixed, so the following parts
+ * of the BP handler address are static:
+ * [31:24] = 0xFE for 4064MiB offset after the USC code heap.
+ * [5:0] = 0x00 i.e. aligned to 64 Byte boundary.
+ *
+ * The remaining part of the BP handler is dynamic and encoded in
+ * the USC instruction BABP_target_addr. The BP handler thus
+ * allows a range of 16 MiB with granularity of 64 Bytes.
+ */
+
+/* 0xE0_0000_0000 - 0xE0_FDFF_FFFF **/
+ /* 896 GiB to 900 GiB, size of 4 GiB less 32 MiB : USCCODE_HEAP **/
+ #define RGX_USCCODE_HEAP_BASE IMG_UINT64_C(0xE000000000)
+ #define RGX_USCCODE_HEAP_SIZE (RGX_HEAP_SIZE_4GiB - RGX_HEAP_SIZE_32MiB)
+
+/* 0xE0_FE00_0000 - 0xE0_FEFF_FFFF **/
+ /* 900 GiB less 32 MiB to 900 GiB less 16 MiB, size of 16 MiB : USCCODE_BPH_HEAP **/
+ #define RGX_USCCODE_BPH_HEAP_BASE (IMG_UINT64_C(0xE100000000) - RGX_HEAP_SIZE_32MiB)
+ #define RGX_USCCODE_BPH_HEAP_SIZE RGX_HEAP_SIZE_16MiB
+
+/* 0xE0_FF00_0000 - 0xE0_FFFF_FFFF **/
+ /* 900 GiB less 16 MiB to 900 GiB, size of 16 MiB : RESERVED **/
+
+/* 0xE1_0000_0000 - 0xE1_BFFF_FFFF **/
+ /* 900 GiB to 903 GiB, size of 3 GiB : RESERVED **/
+
+/* 0xE1_C000_0000 - 0xE1_FFFF_FFFF **/
+ /* 903 GiB to 904 GiB, reserved 1 GiB, : FIRMWARE_HEAP **/
+
+ /* Firmware heaps defined in rgx_heap_firmware.h as they are not present in
+ application memory contexts, see:
+ RGX_FIRMWARE_RAW_HEAP_BASE
+ RGX_FIRMWARE_RAW_HEAP_SIZE
+ See header for other sub-heaps details
+ */
+
+/* 0xE2_0000_0000 - 0xE2_FFFF_FFFF **/
+ /* 904 GiB to 908 GiB, size of 4GiB : RESERVED ROGUE **/
+
+/* 0xE3_0000_0000 - 0xE3_FFFF_FFFF **/
+ /* 908 GiB to 912 GiB, size of 4 GiB : FREE **/
+
+/* 0xE4_0000_0000 - 0xE7_FFFF_FFFF **/
+ /* 912 GiB to 928 GiB, size 16 GiB : RESERVED_ROGUE **/
+
+/* 0xE8_0000_0000 - 0xE8_FFFF_FFFF **/
+ /* 928 GiB to 932 GiB, size of 4 GiB : FREE **/
+
+/* 0xE9_0000_0000 - 0xE9_3FFF_FFFF **/
+ /* 932 GiB to 933 GiB, size of 1 GiB : VK_CAPT_REPLAY_HEAP **/
+ #define RGX_VK_CAPT_REPLAY_HEAP_BASE IMG_UINT64_C(0xE900000000)
+ #define RGX_VK_CAPT_REPLAY_HEAP_SIZE RGX_HEAP_SIZE_1GiB
+
+/* 0xE9_4000_0000 - 0xE9_FFFF_FFFF **/
+ /* 933 GiB to 936 GiB, size of 3 GiB : FREE **/
+
+/* 0xEA_0000_0000 - 0xEA_0000_0FFF **/
+ /* 936 GiB to 937 GiB, size of min heap size : SIGNALS_HEAP **/
+ /* CDM Signals heap (31 signals less one reserved for Services).
+ * Size 960B rounded up to minimum heap size */
+ #define RGX_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00000000)
+ #define RGX_SIGNALS_HEAP_SIZE DEVMEM_HEAP_MINIMUM_SIZE
+
+/* 0xEA_4000_0000 - 0xEA_FFFF_FFFF **/
+ /* 937 GiB to 940 GiB, size of 3 GiB : FREE **/
+
+/* 0xEB_0000_0000 - 0xEB_FFFF_FFFF **/
+ /* 940 GiB to 944 GiB, size 4 GiB : COMPONENT_CTRL_HEAP **/
+ #define RGX_COMPONENT_CTRL_HEAP_BASE IMG_UINT64_C(0xEB00000000)
+ #define RGX_COMPONENT_CTRL_HEAP_SIZE RGX_HEAP_SIZE_4GiB
+
+/* 0xEC_0000_0000 - 0xEC_001F_FFFF **/
+ /* 944 GiB to 945 GiB, size 2 MiB : FBCDC_HEAP **/
+ #define RGX_FBCDC_HEAP_BASE IMG_UINT64_C(0xEC00000000)
+ #define RGX_FBCDC_HEAP_SIZE RGX_HEAP_SIZE_2MiB
+
+/* 0xEC_4000_0000 - 0xEC_401F_FFFF **/
+ /* 945 GiB to 946 GiB, size 2 MiB : FBCDC_LARGE_HEAP **/
+ #define RGX_FBCDC_LARGE_HEAP_BASE IMG_UINT64_C(0xEC40000000)
+ #define RGX_FBCDC_LARGE_HEAP_SIZE RGX_HEAP_SIZE_2MiB
+
+/* 0xEC_8000_0000 - 0xEC_FFFF_FFFF **/
+ /* 946 GiB to 948 GiB, size of 3 GiB : FREE **/
+
+/* 0xED_0000_0000 - 0xED_00FF_FFFF */
+ /* 948 GiB to 949 GiB, size 16 MiB : PDS_INDIRECT_STATE_HEAP */
+ #define RGX_PDS_INDIRECT_STATE_HEAP_BASE IMG_UINT64_C(0xED00000000)
+ #define RGX_PDS_INDIRECT_STATE_HEAP_SIZE RGX_HEAP_SIZE_16MiB
+
+/* 0xED_4000_0000 - 0xED_FFFF_FFFF **/
+ /* 949 GiB to 952 GiB, size of 3 GiB : FREE **/
+
+/* 0xEE_0000_0000 - 0xEE_3FFF_FFFF **/
+ /* 952 GiB to 953 GiB, size of 1 GiB : CMP_MISSION_RMW_HEAP **/
+ #define RGX_CMP_MISSION_RMW_HEAP_BASE IMG_UINT64_C(0xEE00000000)
+ #define RGX_CMP_MISSION_RMW_HEAP_SIZE RGX_HEAP_SIZE_1GiB
+
+/* 0xEE_4000_0000 - 0xEE_FFFF_FFFF **/
+ /* 953 GiB to 956 GiB, size of 3 GiB : RESERVED **/
+
+/* 0xEF_0000_0000 - 0xEF_3FFF_FFFF **/
+ /* 956 GiB to 957 GiB, size of 1 GiB : CMP_SAFETY_RMW_HEAP **/
+ #define RGX_CMP_SAFETY_RMW_HEAP_BASE IMG_UINT64_C(0xEF00000000)
+ #define RGX_CMP_SAFETY_RMW_HEAP_SIZE RGX_HEAP_SIZE_1GiB
+
+/* 0xEF_4000_0000 - 0xEF_FFFF_FFFF **/
+ /* 957 GiB to 960 GiB, size of 3 GiB : RESERVED **/
+
+/* 0xF0_0000_0000 - 0xF0_FFFF_FFFF **/
+ /* 960 GiB to 964 GiB, size of 4 GiB : TEXTURE_STATE_HEAP (36-bit aligned) */
+ #define RGX_TEXTURE_STATE_HEAP_BASE IMG_UINT64_C(0xF000000000)
+ #define RGX_TEXTURE_STATE_HEAP_SIZE RGX_HEAP_SIZE_4GiB
+
+/* 0xF1_0000_0000 - 0xF1_FFFF_FFFF **/
+ /* 964 GiB to 968 GiB, size of 4 GiB : FREE **/
+
+/* 0xF2_0000_0000 - 0xF2_001F_FFFF **/
+ /* 968 GiB to 969 GiB, size of 2 MiB : VISIBILITY_TEST_HEAP **/
+ #define RGX_VISIBILITY_TEST_HEAP_BASE IMG_UINT64_C(0xF200000000)
+ #define RGX_VISIBILITY_TEST_HEAP_SIZE RGX_HEAP_SIZE_2MiB
+
+/* 0xF2_4000_0000 - 0xF2_FFFF_FFFF **/
+ /* 969 GiB to 972 GiB, size of 3 GiB : FREE **/
+
+/* 0xF3_0000_0000 - 0xF7_FFFF_FFFF **/
+ /* 972 GiB to 992 GiB, size of 20 GiB : FREE **/
+
+/* 0xF8_0000_0000 - 0xF9_FFFF_FFFF **/
+ /* 992 GiB to 1000 GiB, size 8 GiB : RESERVED ROGUE **/
+
+/* 0xFA_0000_0000 - 0xFF_FFFF_FFFF **/
+ /* 1000 GiB to 1024 GiB, size of 24 GiB : FREE **/
+
+
+/* 0xFF_FFFF_FFFF ************************************************************/
+
+/* End of RGX Device Virtual Address Space definitions */
+
+#endif /* RGXHEAPCONFIG_H */
+
+/******************************************************************************
+ End of file (rgxheapconfig.h)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(TC_CLOCKS_H)
+#define TC_CLOCKS_H
+
+/*
+ * The core clock speed is passed through a multiplier depending on the TC
+ * version.
+ *
+ * On TC_ES1: Multiplier = x3, final speed = 270MHz
+ * On TC_ES2: Multiplier = x6, final speed = 540MHz
+ * On TCF5: Multiplier = 1x final speed = 45MHz
+ *
+ *
+ * The base (unmultiplied speed) can be adjusted using a module parameter
+ * called "sys_core_clk_speed", a number in Hz.
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_core_clk_speed=60000000" /etc/init.d/rc.pvr start
+ *
+ * would result in a core speed of 60MHz xMultiplier.
+ *
+ *
+ * The memory clock is unmultiplied and can be adjusted using a module
+ * parameter called "sys_mem_clk_speed", this should be the number in Hz for
+ * the memory clock speed.
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=100000000" /etc/init.d/rc.pvr start
+ *
+ * would attempt to start the driver with the memory clock speed set to 100MHz.
+ *
+ *
+ * Same applies to the system interface clock speed, "sys_sysif_clk_speed".
+ * Needed for TCF5 but not for TC_ES2/ES1.
+ * As an example:
+ *
+ * PVR_SRVKM_PARAMS="sys_sysif_clk_speed=45000000" /etc/init.d/rc.pvr start
+ *
+ * would attempt to start the driver with the system clock speed set to 45MHz.
+ *
+ *
+ * All parameters can be specified at once, e.g.,
+ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=MEMORY_SPEED sys_core_clk_speed=CORE_SPEED sys_sysif_clk_speed=SYSIF_SPEED" /etc/init.d/rc.pvr start
+ */
+
+#define RGX_TC_SYS_CLOCK_SPEED (45000000) /*< Unused */
+
+#if defined(TC_ODIN_27_5_254_2)
+ #define RGX_TC_CORE_CLOCK_SPEED (94000000)
+ #define RGX_TC_MEM_CLOCK_SPEED (40000000)
+ #define RGX_TC_CLOCK_MULTIPLEX (16)
+#else
+ /* FPGA tcfvuquad with Odin */
+ #define RGX_TC_CORE_CLOCK_SPEED (50000000) /* 3.125MHz */
+ #define RGX_TC_MEM_CLOCK_SPEED (40000000) /* 3.75MHz */
+ #define RGX_TC_CLOCK_MULTIPLEX (1)
+#endif
+
+#endif /* if !defined(TC_CLOCKS_H) */
--- /dev/null
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxkicksync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxkicksync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXKICKSYNC_BRIDGE_H
+#define COMMON_RGXKICKSYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_sync_km.h"
+
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2 PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3)
+
+/*******************************************
+ RGXCreateKickSyncContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+ IMG_HANDLE hPrivData;
+ IMG_UINT32 ui32ContextFlags;
+ IMG_UINT32 ui32PackedCCBSizeU88;
+} __packed PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+ IMG_HANDLE hKickSyncContext;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT;
+
+/*******************************************
+ RGXDestroyKickSyncContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+ IMG_HANDLE hKickSyncContext;
+} __packed PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT;
+
+/*******************************************
+ RGXKickSync2
+ *******************************************/
+
+/* Bridge in structure for RGXKickSync2 */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC2_TAG
+{
+ IMG_HANDLE hKickSyncContext;
+ IMG_UINT32 *pui32UpdateDevVarOffset;
+ IMG_UINT32 *pui32UpdateValue;
+ IMG_CHAR *puiUpdateFenceName;
+ IMG_HANDLE *phUpdateUFODevVarBlock;
+ PVRSRV_FENCE hCheckFenceFD;
+ PVRSRV_TIMELINE hTimelineFenceFD;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_UINT32 ui32ExtJobRef;
+} __packed PVRSRV_BRIDGE_IN_RGXKICKSYNC2;
+
+/* Bridge out structure for RGXKickSync2 */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC2_TAG
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_FENCE hUpdateFenceFD;
+} __packed PVRSRV_BRIDGE_OUT_RGXKICKSYNC2;
+
+/*******************************************
+ RGXSetKickSyncContextProperty
+ *******************************************/
+
+/* Bridge in structure for RGXSetKickSyncContextProperty */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY_TAG
+{
+ IMG_UINT64 ui64Input;
+ IMG_HANDLE hKickSyncContext;
+ IMG_UINT32 ui32Property;
+} __packed PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY;
+
+/* Bridge out structure for RGXSetKickSyncContextProperty */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY_TAG
+{
+ IMG_UINT64 ui64Output;
+ PVRSRV_ERROR eError;
+} __packed PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY;
+
+#endif /* COMMON_RGXKICKSYNC_BRIDGE_H */
#include "img_types.h"
#include "pvrsrv_error.h"
-static_assert(sizeof(IMG_BOOL) == 4, "invalid size of IMG_BOOL");
+static_assert(sizeof(IMG_BOOL) == 1, "invalid size of IMG_BOOL");
static_assert(sizeof(IMG_INT) == 4, "invalid size of IMG_INT");
static_assert(sizeof(IMG_UINT) == 4, "invalid size of IMG_UINT");
static_assert(sizeof(PVRSRV_ERROR) == 4, "invalid size of PVRSRV_ERROR");
All of these should be configurable only through the 'default' value
******************************************************************************/
#define APPHINT_LIST_BUILDVAR_COMMON \
-/* name, type, class, default, helper, */ \
-X(EnableTrustedDeviceAceConfig, BOOL, GPUVIRT_VAL, PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG, NO_PARAM_TABLE ) \
-X(CleanupThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_CLEANUPTHREADPRIORITY, NO_PARAM_TABLE ) \
-X(WatchdogThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY, NO_PARAM_TABLE ) \
-X(HWPerfClientBufferSize, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE, NO_PARAM_TABLE ) \
+/* name, type, class, default, helper, guest, */ \
+X(EnableTrustedDeviceAceConfig, BOOL, GPUVIRT_VAL, PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG, NO_PARAM_TABLE, ALWAYS ) \
+X(CleanupThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_CLEANUPTHREADPRIORITY, NO_PARAM_TABLE, ALWAYS ) \
+X(WatchdogThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY, NO_PARAM_TABLE, ALWAYS ) \
+X(HWPerfClientBufferSize, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE, NO_PARAM_TABLE, ALWAYS ) \
+X(DevmemHistoryBufSizeLog2, UINT32, ALWAYS, PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2, NO_PARAM_TABLE, ALWAYS ) \
+X(DevmemHistoryMaxEntries, UINT32, ALWAYS, PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES, NO_PARAM_TABLE, ALWAYS )
/*
*******************************************************************************
Module parameters
******************************************************************************/
#define APPHINT_LIST_MODPARAM_COMMON \
-/* name, type, class, default, helper, */ \
-X(GeneralNon4KHeapPageSize, UINT32, ALWAYS, PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE, NO_PARAM_TABLE ) \
+/* name, type, class, default, helper, guest, */ \
+X(GeneralNon4KHeapPageSize, UINT32, ALWAYS, PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE, NO_PARAM_TABLE, ALWAYS ) \
\
-X(EnableSignatureChecks, BOOL, PDUMP, PVRSRV_APPHINT_ENABLESIGNATURECHECKS, NO_PARAM_TABLE ) \
-X(SignatureChecksBufSize, UINT32, PDUMP, PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE, NO_PARAM_TABLE ) \
+X(EnableSignatureChecks, BOOL, PDUMP, PVRSRV_APPHINT_ENABLESIGNATURECHECKS, NO_PARAM_TABLE, ALWAYS ) \
+X(SignatureChecksBufSize, UINT32, PDUMP, PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE, NO_PARAM_TABLE, ALWAYS ) \
\
-X(DisableClockGating, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLECLOCKGATING, NO_PARAM_TABLE ) \
-X(DisableDMOverlap, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEDMOVERLAP, NO_PARAM_TABLE ) \
+X(DisableClockGating, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLECLOCKGATING, NO_PARAM_TABLE, ALWAYS ) \
+X(DisableDMOverlap, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEDMOVERLAP, NO_PARAM_TABLE, ALWAYS ) \
\
-X(EnableRandomContextSwitch, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH, NO_PARAM_TABLE ) \
-X(EnableSoftResetContextSwitch, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH, NO_PARAM_TABLE ) \
-X(EnableFWContextSwitch, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH, NO_PARAM_TABLE ) \
-X(FWContextSwitchProfile, UINT32, VALIDATION, PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE, NO_PARAM_TABLE ) \
+X(EnableRandomContextSwitch, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH, NO_PARAM_TABLE, ALWAYS ) \
+X(EnableSoftResetContextSwitch, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH, NO_PARAM_TABLE, ALWAYS ) \
+X(EnableFWContextSwitch, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH, NO_PARAM_TABLE, ALWAYS ) \
+X(FWContextSwitchProfile, UINT32, VALIDATION, PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE, NO_PARAM_TABLE, ALWAYS ) \
\
-X(EnableRDPowerIsland, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLERDPOWERISLAND, NO_PARAM_TABLE ) \
+X(EnableRDPowerIsland, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLERDPOWERISLAND, NO_PARAM_TABLE, ALWAYS ) \
\
-X(DriverMode, UINT32, ALWAYS, PVRSRV_APPHINT_DRIVERMODE, NO_PARAM_TABLE ) \
+X(DriverMode, UINT32, ALWAYS, PVRSRV_APPHINT_DRIVERMODE, NO_PARAM_TABLE, ALWAYS ) \
\
-X(FirmwarePerf, UINT32, VALIDATION, PVRSRV_APPHINT_FIRMWAREPERF, NO_PARAM_TABLE ) \
+X(FirmwarePerf, UINT32, VALIDATION, PVRSRV_APPHINT_FIRMWAREPERF, NO_PARAM_TABLE, ALWAYS ) \
\
-X(HWPerfFWBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB, NO_PARAM_TABLE ) \
-X(HWPerfHostBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB, NO_PARAM_TABLE ) \
-X(HWPerfHostThreadTimeoutInMS, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS, NO_PARAM_TABLE ) \
+X(HWPerfFWBufSizeInKB, UINT32, PDUMP, PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB, NO_PARAM_TABLE, ALWAYS ) \
+X(HWPerfHostBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB, NO_PARAM_TABLE, ALWAYS ) \
+X(HWPerfHostThreadTimeoutInMS, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS, NO_PARAM_TABLE, ALWAYS ) \
\
-X(JonesDisableMask, UINT32, VALIDATION, PVRSRV_APPHINT_JONESDISABLEMASK, NO_PARAM_TABLE ) \
-X(NewFilteringMode, BOOL, VALIDATION, PVRSRV_APPHINT_NEWFILTERINGMODE, NO_PARAM_TABLE ) \
-X(TruncateMode, UINT32, VALIDATION, PVRSRV_APPHINT_TRUNCATEMODE, NO_PARAM_TABLE ) \
-X(EmuMaxFreq, UINT32, ALWAYS, PVRSRV_APPHINT_EMUMAXFREQ, NO_PARAM_TABLE ) \
-X(GPIOValidationMode, UINT32, VALIDATION, PVRSRV_APPHINT_GPIOVALIDATIONMODE, NO_PARAM_TABLE ) \
-X(RGXBVNC, STRING, ALWAYS, PVRSRV_APPHINT_RGXBVNC, NO_PARAM_TABLE ) \
+X(JonesDisableMask, UINT32, VALIDATION, PVRSRV_APPHINT_JONESDISABLEMASK, NO_PARAM_TABLE, ALWAYS ) \
+X(NewFilteringMode, BOOL, VALIDATION, PVRSRV_APPHINT_NEWFILTERINGMODE, NO_PARAM_TABLE, ALWAYS ) \
+X(TruncateMode, UINT32, VALIDATION, PVRSRV_APPHINT_TRUNCATEMODE, NO_PARAM_TABLE, ALWAYS ) \
+X(EmuMaxFreq, UINT32, ALWAYS, PVRSRV_APPHINT_EMUMAXFREQ, NO_PARAM_TABLE, ALWAYS ) \
+X(GPIOValidationMode, UINT32, VALIDATION, PVRSRV_APPHINT_GPIOVALIDATIONMODE, NO_PARAM_TABLE, ALWAYS ) \
+X(RGXBVNC, STRING, ALWAYS, PVRSRV_APPHINT_RGXBVNC, NO_PARAM_TABLE, ALWAYS ) \
\
-X(FWContextSwitchCrossDM, UINT32, ALWAYS, 0, NO_PARAM_TABLE ) \
-X(ValidateIrq, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATEIRQ, NO_PARAM_TABLE ) \
+X(FWContextSwitchCrossDM, UINT32, ALWAYS, 0, NO_PARAM_TABLE, ALWAYS ) \
+X(ValidateIrq, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATEIRQ, NO_PARAM_TABLE, ALWAYS ) \
\
-X(TPUTrilinearFracMaskPDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \
-X(TPUTrilinearFracMaskVDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \
-X(TPUTrilinearFracMaskCDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \
-X(TPUTrilinearFracMaskTDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \
-X(HTBufferSizeInKB, UINT32, ALWAYS, PVRSRV_APPHINT_HTBUFFERSIZE, NO_PARAM_TABLE ) \
-X(FWTraceBufSizeInDWords, UINT32, ALWAYS, PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS, NO_PARAM_TABLE ) \
+X(TPUTrilinearFracMaskPDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \
+X(TPUTrilinearFracMaskVDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \
+X(TPUTrilinearFracMaskCDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \
+X(TPUTrilinearFracMaskTDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \
+X(TPUTrilinearFracMaskRDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE, ALWAYS ) \
+X(HTBufferSizeInKB, UINT32, ALWAYS, PVRSRV_APPHINT_HTBUFFERSIZE, NO_PARAM_TABLE, ALWAYS ) \
+X(FWTraceBufSizeInDWords, UINT32, ALWAYS, PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS, NO_PARAM_TABLE, ALWAYS ) \
\
-X(EnablePageFaultDebug, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG, NO_PARAM_TABLE ) \
-X(EnableFullSyncTracking, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING, NO_PARAM_TABLE ) \
-X(IgnoreHWReportedBVNC, BOOL, ALWAYS, PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC, NO_PARAM_TABLE ) \
+X(EnablePageFaultDebug, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG, NO_PARAM_TABLE, ALWAYS ) \
+X(EnableFullSyncTracking, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING, NO_PARAM_TABLE, ALWAYS ) \
+X(IgnoreHWReportedBVNC, BOOL, ALWAYS, PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC, NO_PARAM_TABLE, ALWAYS ) \
\
-X(PhysMemTestPasses, UINT32, ALWAYS, PVRSRV_APPHINT_PHYSMEMTESTPASSES, NO_PARAM_TABLE ) \
+X(PhysMemTestPasses, UINT32, ALWAYS, PVRSRV_APPHINT_PHYSMEMTESTPASSES, NO_PARAM_TABLE, ALWAYS ) \
\
-X(FBCDCVersionOverride, UINT32, VALIDATION, PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE, NO_PARAM_TABLE ) \
-X(TestSLRInterval, UINT32, VALIDATION, PVRSRV_APPHINT_TESTSLRINTERVAL, NO_PARAM_TABLE ) \
-X(EnablePollOnChecksumErrorStatus, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \
-X(RiscvDmiTest, BOOL, VALIDATION, PVRSRV_APPHINT_RISCVDMITEST, NO_PARAM_TABLE ) \
-X(DevMemFWHeapPolicy, UINT32, ALWAYS, PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY, NO_PARAM_TABLE ) \
+X(FBCDCVersionOverride, UINT32, VALIDATION, PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE, NO_PARAM_TABLE, ALWAYS ) \
+X(TestSLRInterval, UINT32, VALIDATION, PVRSRV_APPHINT_TESTSLRINTERVAL, NO_PARAM_TABLE, ALWAYS ) \
+X(EnablePollOnChecksumErrorStatus, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \
+X(RiscvDmiTest, BOOL, VALIDATION, PVRSRV_APPHINT_RISCVDMITEST, NO_PARAM_TABLE, ALWAYS ) \
+X(DevMemFWHeapPolicy, UINT32, ALWAYS, PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY, NO_PARAM_TABLE, ALWAYS ) \
\
-X(EnableAPMAll, UINT32, VALIDATION, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE ) \
-X(KernelCCBSizeLog2, UINT32, VALIDATION, PVRSRV_APPHINT_KCCB_SIZE_LOG2, NO_PARAM_TABLE )
+X(EnableAPMAll, UINT32, VALIDATION, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE, ALWAYS ) \
+X(KernelCCBSizeLog2, UINT32, VALIDATION, PVRSRV_APPHINT_KCCB_SIZE_LOG2, NO_PARAM_TABLE, ALWAYS ) \
+\
+X(SyncCheckpointPoolMaxLog2, UINT32, ALWAYS, PVRSRV_APPHINT_CHECKPOINTPOOLMAXLOG2, NO_PARAM_TABLE, ALWAYS ) \
+X(SyncCheckpointPoolInitLog2, UINT32, ALWAYS, PVRSRV_APPHINT_CHECKPOINTPOOLINITLOG2, NO_PARAM_TABLE, ALWAYS ) \
+X(PhysHeapMinMemOnConnection, UINT32, ALWAYS, PVRSRV_APPHINT_PHYSHEAPMINMEMONCONNECTION, NO_PARAM_TABLE, ALWAYS ) \
+\
+X(RestrictGpuLocalPhysHeapSizeMB, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS )
+
/*
*******************************************************************************
Debugfs parameters - driver configuration
******************************************************************************/
#define APPHINT_LIST_DEBUGINFO_COMMON \
-/* name, type, class, default, helper, */ \
-X(EnableHTBLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLEHTBLOGGROUP, htb_loggroup_tbl ) \
-X(HTBOperationMode, UINT32List, ALWAYS, PVRSRV_APPHINT_HTBOPERATIONMODE, htb_opmode_tbl ) \
-X(EnableFTraceGPU, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFTRACEGPU, NO_PARAM_TABLE ) \
-X(HWPerfClientFilter_Services, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES, NO_PARAM_TABLE ) \
-X(HWPerfClientFilter_EGL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL, NO_PARAM_TABLE ) \
-X(HWPerfClientFilter_OpenGLES, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES, NO_PARAM_TABLE ) \
-X(HWPerfClientFilter_OpenCL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL, NO_PARAM_TABLE ) \
-X(HWPerfClientFilter_Vulkan, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN, NO_PARAM_TABLE ) \
-X(HWPerfClientFilter_OpenGL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGL, NO_PARAM_TABLE ) \
-X(CacheOpConfig, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPCONFIG, NO_PARAM_TABLE ) \
-X(CacheOpUMKMThresholdSize, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE, NO_PARAM_TABLE ) \
+/* name, type, class, default, helper, guest, */ \
+X(EnableHTBLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLEHTBLOGGROUP, htb_loggroup_tbl, ALWAYS ) \
+X(HTBOperationMode, UINT32List, ALWAYS, PVRSRV_APPHINT_HTBOPERATIONMODE, htb_opmode_tbl, ALWAYS ) \
+X(EnableFTraceGPU, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFTRACEGPU, NO_PARAM_TABLE, NEVER ) \
+X(HWPerfClientFilter_Services, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES, NO_PARAM_TABLE, ALWAYS ) \
+X(HWPerfClientFilter_EGL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL, NO_PARAM_TABLE, ALWAYS ) \
+X(HWPerfClientFilter_OpenGLES, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES, NO_PARAM_TABLE, ALWAYS ) \
+X(HWPerfClientFilter_OpenCL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL, NO_PARAM_TABLE, ALWAYS ) \
+X(HWPerfClientFilter_Vulkan, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN, NO_PARAM_TABLE, ALWAYS ) \
+X(HWPerfClientFilter_OpenGL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGL, NO_PARAM_TABLE, ALWAYS ) \
+X(CacheOpConfig, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPCONFIG, NO_PARAM_TABLE, ALWAYS ) \
+X(CacheOpUMKMThresholdSize, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE, NO_PARAM_TABLE, ALWAYS ) \
/*
*******************************************************************************
Debugfs parameters - device configuration
******************************************************************************/
#define APPHINT_LIST_DEBUGINFO_DEVICE_COMMON \
-/* name, type, class, default, helper, */ \
+/* name, type, class, default, helper, guest, */ \
/* Device Firmware config */\
-X(AssertOnHWRTrigger, BOOL, ALWAYS, APPHNT_BLDVAR_ASSERTONHWRTRIGGER, NO_PARAM_TABLE ) \
-X(AssertOutOfMemory, BOOL, ALWAYS, PVRSRV_APPHINT_ASSERTOUTOFMEMORY, NO_PARAM_TABLE ) \
-X(CheckMList, BOOL, ALWAYS, PVRSRV_APPHINT_CHECKMLIST, NO_PARAM_TABLE ) \
-X(EnableLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLELOGGROUP, fwt_loggroup_tbl ) \
-X(FirmwareLogType, UINT32List, ALWAYS, PVRSRV_APPHINT_FIRMWARELOGTYPE, fwt_logtype_tbl ) \
-X(HWRDebugDumpLimit, UINT32, ALWAYS, PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT, NO_PARAM_TABLE ) \
-X(TimeCorrClock, UINT32List, ALWAYS, PVRSRV_APPHINT_TIMECORRCLOCK, timecorr_clk_tbl ) \
-X(HWPerfFWFilter, UINT64, ALWAYS, PVRSRV_APPHINT_HWPERFFWFILTER, NO_PARAM_TABLE ) \
+X(AssertOnHWRTrigger, BOOL, ALWAYS, APPHNT_BLDVAR_ASSERTONHWRTRIGGER, NO_PARAM_TABLE, ALWAYS ) \
+X(AssertOutOfMemory, BOOL, ALWAYS, PVRSRV_APPHINT_ASSERTOUTOFMEMORY, NO_PARAM_TABLE, ALWAYS ) \
+X(CheckMList, BOOL, ALWAYS, PVRSRV_APPHINT_CHECKMLIST, NO_PARAM_TABLE, ALWAYS ) \
+X(EnableLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLELOGGROUP, fwt_loggroup_tbl, ALWAYS ) \
+X(FirmwareLogType, UINT32List, ALWAYS, PVRSRV_APPHINT_FIRMWARELOGTYPE, fwt_logtype_tbl, ALWAYS ) \
+X(HWRDebugDumpLimit, UINT32, ALWAYS, PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT, NO_PARAM_TABLE, ALWAYS ) \
+X(TimeCorrClock, UINT32List, ALWAYS, PVRSRV_APPHINT_TIMECORRCLOCK, timecorr_clk_tbl, ALWAYS ) \
+X(HWPerfFWFilter, UINT64, ALWAYS, PVRSRV_APPHINT_HWPERFFWFILTER, NO_PARAM_TABLE, ALWAYS ) \
/* Device host config */ \
-X(EnableAPM, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE ) \
-X(DisableFEDLogging, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEFEDLOGGING, NO_PARAM_TABLE ) \
-X(ZeroFreelist, BOOL, ALWAYS, PVRSRV_APPHINT_ZEROFREELIST, NO_PARAM_TABLE ) \
-X(DisablePDumpPanic, BOOL, PDUMP, PVRSRV_APPHINT_DISABLEPDUMPPANIC, NO_PARAM_TABLE ) \
-X(EnableFWPoisonOnFree, BOOL, DEBUG, PVRSRV_APPHINT_ENABLEFWPOISONONFREE, NO_PARAM_TABLE ) \
-X(GPUUnitsPowerChange, BOOL, VALIDATION, PVRSRV_APPHINT_GPUUNITSPOWERCHANGE, NO_PARAM_TABLE ) \
-X(HWPerfHostFilter, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFHOSTFILTER, NO_PARAM_TABLE )
+X(EnableAPM, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE, ALWAYS ) \
+X(DisableFEDLogging, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEFEDLOGGING, NO_PARAM_TABLE, ALWAYS ) \
+X(ZeroFreelist, BOOL, ALWAYS, PVRSRV_APPHINT_ZEROFREELIST, NO_PARAM_TABLE, ALWAYS ) \
+X(DisablePDumpPanic, BOOL, PDUMP, PVRSRV_APPHINT_DISABLEPDUMPPANIC, NO_PARAM_TABLE, ALWAYS ) \
+X(EnableFWPoisonOnFree, BOOL, DEBUG, PVRSRV_APPHINT_ENABLEFWPOISONONFREE, NO_PARAM_TABLE, ALWAYS ) \
+X(GPUUnitsPowerChange, BOOL, VALIDATION, PVRSRV_APPHINT_GPUUNITSPOWERCHANGE, NO_PARAM_TABLE, ALWAYS ) \
+X(HWPerfHostFilter, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFHOSTFILTER, NO_PARAM_TABLE, ALWAYS )
/*
*******************************************************************************
X(VALIDATION) \
X(GPUVIRT_VAL)
+#define APPHINT_RT_CLASS_LIST \
+X(ALWAYS) \
+X(NEVER)
+
/*
*******************************************************************************
Visibility control for module parameters
APPHINT_CLASS_MAX
} APPHINT_CLASS;
+typedef enum {
+#define X(a) APPHINT_RT_CLASS_ ## a,
+ APPHINT_RT_CLASS_LIST
+#undef X
+ APPHINT_RT_CLASS_MAX
+} APPHINT_RT_CLASS;
#endif /* KM_APPHINT_DEFS_COMMON_H */
#include "opaque_types.h"
#include "pmr_impl.h"
#include "physheap_config.h"
+#include "pvrsrv_device.h"
#ifndef PHYSHEAP_H
#define PHYSHEAP_H
+#define B2KB(x) ((x) >> 10)
+#define B2MB(x) ((x) >> 20)
+
+static inline IMG_UINT64 KB2B(IMG_UINT64 ui64Kilobytes) { return ui64Kilobytes << 10; }
+static inline IMG_UINT64 MB2B(IMG_UINT64 ui64Megabytes) { return ui64Megabytes << 20; }
+
typedef struct _PHYS_HEAP_ PHYS_HEAP;
#define INVALID_PHYS_HEAP 0xDEADDEAD
+typedef IMG_UINT32 PHYS_HEAP_POLICY;
+
+/* Heap has default allocation policy and does not require
+ * any additional OS Functionality. Physically contiguous
+ * allocations are required for this physheap.
+ */
+#define PHYS_HEAP_POLICY_DEFAULT (0U)
+
+/*
+ * Heap has allocation strategy that may produce non
+ * physically contiguous allocations, additional OS functionality
+ * is required to map these allocations into the kernel.
+ */
+#define PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG (1U)
+#define PHYS_HEAP_POLOCY_ALLOC_ALLOW_NONCONTIG_MASK (1U)
+
struct _CONNECTION_DATA_;
typedef struct _PG_HANDLE_
@Input psPhysHeap Pointer to Phys Heap.
@Input psConnection Pointer to device connection.
@Input uiSize Allocation size.
-@Input uiChunkSize Chunk size.
@Input ui32NumPhysChunks Physical chunk count.
@Input ui32NumVirtChunks Virtual chunk count.
@Input pui32MappingTable Mapping Table.
typedef PVRSRV_ERROR (*PFN_CREATE_PMR)(PHYS_HEAP *psPhysHeap,
struct _CONNECTION_DATA_ *psConnection,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
} PHEAP_IMPL_FUNCS;
/*************************************************************************/ /*!
-@Function PhysHeapCreateDeviceHeapsFromConfigs
-@Description Create new heaps for a device from configs.
-@Input psDevNode Pointer to device node struct
-@Input pasConfigs Pointer to array of Heap configurations.
-@Input ui32NumConfigs Number of configurations in array.
-@Return PVRSRV_ERROR PVRSRV_OK or error code
+@Function PhysHeapInitDeviceHeaps
+@Description Registers and acquires physical memory heaps
+@Input psDeviceNode pointer to device node
+@Input psDevConfig pointer to device config
+@Return PVRSRV_ERROR PVRSRV_OK on success, or a PVRSRV_ error code
*/ /**************************************************************************/
-PVRSRV_ERROR
-PhysHeapCreateDeviceHeapsFromConfigs(PPVRSRV_DEVICE_NODE psDevNode,
- PHYS_HEAP_CONFIG *pasConfigs,
- IMG_UINT32 ui32NumConfigs);
+PVRSRV_ERROR PhysHeapInitDeviceHeaps(PPVRSRV_DEVICE_NODE psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*************************************************************************/ /*!
+@Function PhysHeapDeInitDeviceHeaps
+@Description Releases and unregisters physical memory heaps
+@Input psDeviceNode pointer to device node
+@Return PVRSRV_ERROR PVRSRV_OK on success, or a PVRSRV_ error code
+*/ /**************************************************************************/
+void PhysHeapDeInitDeviceHeaps(PPVRSRV_DEVICE_NODE psDeviceNode);
/*************************************************************************/ /*!
@Function PhysHeapCreateHeapFromConfig
on heap type.
@Input psDevNode Pointer to device node struct.
@Input psConfig Heap configuration.
-@Output ppsPhysHeap Pointer to the created heap.
+@Output ppsPhysHeap Optional pointer to the created heap. Can be NULL
@Return PVRSRV_ERROR PVRSRV_OK or error code
*/ /**************************************************************************/
PVRSRV_ERROR
Destroy with PhysHeapDestroy when no longer required.
@Input psDevNode Pointer to device node struct
@Input psConfig Heap configuration.
+@Input uiPolicy Phys heap allocation policy.
@Input pvImplData Implementation specific data. Can be NULL.
@Input psImplFuncs Implementation specific function table. Must be
a valid pointer.
-@Output ppsPhysHeap Pointer to the created heap. Must be a valid
- pointer.
+@Output ppsPhysHeap Optional pointer to the created heap. Can be NULL
@Return PVRSRV_ERROR PVRSRV_OK or error code
*/ /**************************************************************************/
PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode,
PHYS_HEAP_CONFIG *psConfig,
+ PHYS_HEAP_POLICY uiPolicy,
PHEAP_IMPL_DATA pvImplData,
PHEAP_IMPL_FUNCS *psImplFuncs,
PHYS_HEAP **ppsPhysHeap);
PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap);
/*************************************************************************/ /*!
-@Function PhysHeapAcquireByUsage
-@Description Acquire PhysHeap by usage flag.
-@Input ui32UsageFlag PhysHeap usage flag
-@Input psDevNode Pointer to device node struct
-@Output ppsPhysHeap PhysHeap if found.
-@Return PVRSRV_ERROR PVRSRV_OK or error code
-*/ /**************************************************************************/
-PVRSRV_ERROR PhysHeapAcquireByUsage(PHYS_HEAP_USAGE_FLAGS ui32UsageFlag,
- PPVRSRV_DEVICE_NODE psDevNode,
- PHYS_HEAP **ppsPhysHeap);
-
-/*************************************************************************/ /*!
-@Function PhysHeapAcquireByDevPhysHeap
+@Function PhysHeapAcquireByID
@Description Acquire PhysHeap by DevPhysHeap.
@Input eDevPhysHeap Device Phys Heap.
@Input psDevNode Pointer to device node struct
@Output ppsPhysHeap PhysHeap if found.
@Return PVRSRV_ERROR PVRSRV_OK or error code
*/ /**************************************************************************/
-PVRSRV_ERROR PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP eDevPhysHeap,
- PPVRSRV_DEVICE_NODE psDevNode,
- PHYS_HEAP **ppsPhysHeap);
+PVRSRV_ERROR PhysHeapAcquireByID(PVRSRV_PHYS_HEAP eDevPhysHeap,
+ PPVRSRV_DEVICE_NODE psDevNode,
+ PHYS_HEAP **ppsPhysHeap);
void PhysHeapRelease(PHYS_HEAP *psPhysHeap);
PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap);
+/*************************************************************************/ /*!
+@Function PhysHeapGetPolicy
+@Description Get phys heap allocation policy flags.
+@Input psPhysHeap Pointer to physical heap.
+@Return PHYS_HEAP_POLICY Phys heap policy flags.
+*/ /**************************************************************************/
+PHYS_HEAP_POLICY PhysHeapGetPolicy(PHYS_HEAP *psPhysHeap);
+
/*************************************************************************/ /*!
@Function PhysHeapGetFlags
@Description Get phys heap usage flags.
PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap,
IMG_UINT64 *puiSize);
-/*************************************************************************/ /*!
-@Function PVRSRVGetDevicePhysHeapCount
-@Description Get the physical heap count supported by the device.
-@Input psDevNode Device node, the heap count is requested for.
-@Output pui32PhysHeapCount Buffer that holds the heap count
-@Return None
-*/ /**************************************************************************/
-void PVRSRVGetDevicePhysHeapCount(PPVRSRV_DEVICE_NODE psDevNode,
- IMG_UINT32 *pui32PhysHeapCount);
-
/*************************************************************************/ /*!
@Function PhysHeapGetMemInfo
@Description Get phys heap memory statistics for a given physical heap ID.
*/ /**************************************************************************/
PVRSRV_ERROR
PhysHeapGetMemInfo(PPVRSRV_DEVICE_NODE psDevNode,
- IMG_UINT32 ui32PhysHeapCount,
- PVRSRV_PHYS_HEAP *paePhysHeapID,
- PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats);
-
-/*************************************************************************/ /*!
-@Function PhysHeapGetMemInfoPkd
-@Description Get phys heap memory statistics for a given physical heap ID.
-@Input psDevNode Pointer to device node struct
-@Input ui32PhysHeapCount Physical heap count
-@Input paePhysHeapID Physical heap ID
-@Output paPhysHeapMemStats Buffer that holds the memory statistics
-@Return PVRSRV_ERROR PVRSRV_OK or error code
-*/ /**************************************************************************/
-PVRSRV_ERROR
-PhysHeapGetMemInfoPkd(PPVRSRV_DEVICE_NODE psDevNode,
- IMG_UINT32 ui32PhysHeapCount,
- PVRSRV_PHYS_HEAP *paePhysHeapID,
- PHYS_HEAP_MEM_STATS_PKD_PTR paPhysHeapMemStats);
-
-/*************************************************************************/ /*!
-@Function PhysheapGetPhysMemUsage
-@Description Get memory statistics for a given physical heap.
-@Input psPhysHeap Physical heap
-@Output pui64TotalSize Buffer that holds the total memory size of the
- given physical heap.
-@Output pui64FreeSize Buffer that holds the free memory available in
- a given physical heap.
-@Return none
-*/ /**************************************************************************/
-void PhysheapGetPhysMemUsage(PHYS_HEAP *psPhysHeap,
- IMG_UINT64 *pui64TotalSize,
- IMG_UINT64 *pui64FreeSize);
+ IMG_UINT32 ui32PhysHeapCount,
+ PVRSRV_PHYS_HEAP *paePhysHeapID,
+ PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats);
PVRSRV_ERROR PhysHeapGetDevPAddr(PHYS_HEAP *psPhysHeap,
IMG_DEV_PHYADDR *psDevPAddr);
PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap,
struct _CONNECTION_DATA_ *psConnection,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
PMR **ppsPMRPtr,
IMG_UINT32 ui32PDumpFlags);
-PVRSRV_ERROR PhysHeapInit(void);
-void PhysHeapDeinit(void);
-
/*************************************************************************/ /*!
@Function PhysHeapDeviceNode
@Description Get pointer to the device node this heap belongs to.
PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap);
/*************************************************************************/ /*!
-@Function PhysHeapPVRLayerAcquire
-@Description Is phys heap to be acquired in PVR layer?
+@Function PhysHeapInitByPVRLayer
+@Description Is phys heap to be initialised in PVR layer?
@Input ePhysHeap phys heap
@Return IMG_BOOL return IMG_TRUE if yes
*/ /**************************************************************************/
-IMG_BOOL PhysHeapPVRLayerAcquire(PVRSRV_PHYS_HEAP ePhysHeap);
+IMG_BOOL PhysHeapInitByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap);
/*************************************************************************/ /*!
@Function PhysHeapUserModeAlloc
*/ /**************************************************************************/
IMG_BOOL PhysHeapUserModeAlloc(PVRSRV_PHYS_HEAP ePhysHeap);
-/*************************************************************************/ /*!
-@Function PhysHeapMMUPxSetup
-@Description Setup MMU Px allocation function pointers.
-@Input psDeviceNode Pointer to device node struct
-@Return PVRSRV_ERROR PVRSRV_OK on success.
-*/ /**************************************************************************/
-PVRSRV_ERROR PhysHeapMMUPxSetup(PPVRSRV_DEVICE_NODE psDeviceNode);
-
-/*************************************************************************/ /*!
-@Function PhysHeapMMUPxDeInit
-@Description Deinit after PhysHeapMMUPxSetup.
-@Input psDeviceNode Pointer to device node struct
-*/ /**************************************************************************/
-void PhysHeapMMUPxDeInit(PPVRSRV_DEVICE_NODE psDeviceNode);
-
#if defined(SUPPORT_GPUVIRT_VALIDATION)
PVRSRV_ERROR PhysHeapPagesAllocGPV(PHYS_HEAP *psPhysHeap,
size_t uiSize,
*/ /**************************************************************************/
IMG_UINT32 PhysHeapGetPageShift(PHYS_HEAP *psPhysHeap);
+/*************************************************************************/ /*!
+@Function PhysHeapFreeMemCheck
+@Description Check a physheap has the required amount of free memory.
+
+@Input psPhysHeap Pointer to physical heap.
+@Input ui64MinRequiredMem The minimum free memory for success (bytes).
+@Output pui64FreeMem The free memory in the physical heap (bytes).
+
+@Return PVRSRV_ERROR If successful PVRSRV_OK else a PVRSRV_ERROR code.
+*/ /**************************************************************************/
+PVRSRV_ERROR PhysHeapFreeMemCheck(PHYS_HEAP *psPhysHeap,
+ IMG_UINT64 ui64MinRequiredMem,
+ IMG_UINT64 *pui64FreeMem);
+
#endif /* PHYSHEAP_H */
typedef IMG_UINT32 PHYS_HEAP_USAGE_FLAGS;
-#define PHYS_HEAP_USAGE_GPU_LOCAL (1<<PVRSRV_PHYS_HEAP_GPU_LOCAL)
-#define PHYS_HEAP_USAGE_CPU_LOCAL (1<<PVRSRV_PHYS_HEAP_CPU_LOCAL)
-#define PHYS_HEAP_USAGE_FW_MAIN (1<<PVRSRV_PHYS_HEAP_FW_MAIN)
-#define PHYS_HEAP_USAGE_FW_CONFIG (1<<PVRSRV_PHYS_HEAP_FW_CONFIG)
-#define PHYS_HEAP_USAGE_EXTERNAL (1<<PVRSRV_PHYS_HEAP_EXTERNAL)
-#define PHYS_HEAP_USAGE_GPU_PRIVATE (1<<PVRSRV_PHYS_HEAP_GPU_PRIVATE)
-#define PHYS_HEAP_USAGE_GPU_COHERENT (1<<PVRSRV_PHYS_HEAP_GPU_COHERENT)
-#define PHYS_HEAP_USAGE_GPU_SECURE (1<<PVRSRV_PHYS_HEAP_GPU_SECURE)
-#define PHYS_HEAP_USAGE_FW_CODE (1<<PVRSRV_PHYS_HEAP_FW_CODE)
-#define PHYS_HEAP_USAGE_FW_PRIV_DATA (1<<PVRSRV_PHYS_HEAP_FW_PRIV_DATA)
-#define PHYS_HEAP_USAGE_WRAP (1<<30)
-#define PHYS_HEAP_USAGE_DISPLAY (1<<31)
+/**
+ * ! IMPORTANT !
+ * If you update the PHYS_HEAP_USAGE_FLAGS definitions, you must update the
+ * g_asPhysHeapUsageFlagStrings structure within physheap.c and the
+ * PHYS_HEAP_USAGE_MASK.
+ */
+#define PHYS_HEAP_USAGE_GPU_LOCAL (1U <<PVRSRV_PHYS_HEAP_GPU_LOCAL)
+#define PHYS_HEAP_USAGE_CPU_LOCAL (1U <<PVRSRV_PHYS_HEAP_CPU_LOCAL)
+#define PHYS_HEAP_USAGE_GPU_PRIVATE (1U <<PVRSRV_PHYS_HEAP_GPU_PRIVATE)
+#define PHYS_HEAP_USAGE_EXTERNAL (1U <<PVRSRV_PHYS_HEAP_EXTERNAL)
+#define PHYS_HEAP_USAGE_GPU_COHERENT (1U <<PVRSRV_PHYS_HEAP_GPU_COHERENT)
+#define PHYS_HEAP_USAGE_GPU_SECURE (1U <<PVRSRV_PHYS_HEAP_GPU_SECURE)
+#define PHYS_HEAP_USAGE_FW_CODE (1U <<PVRSRV_PHYS_HEAP_FW_CODE)
+#define PHYS_HEAP_USAGE_FW_PRIV_DATA (1U <<PVRSRV_PHYS_HEAP_FW_PRIV_DATA)
+#define PHYS_HEAP_USAGE_FW_PREMAP_PT (1U <<PVRSRV_PHYS_HEAP_FW_PREMAP_PT)
+#define PHYS_HEAP_USAGE_FW_PREMAP (1U <<PVRSRV_PHYS_HEAP_FW_PREMAP0)
+#define PHYS_HEAP_USAGE_WRAP (1U <<PVRSRV_PHYS_HEAP_WRAP)
+#define PHYS_HEAP_USAGE_DISPLAY (1U <<PVRSRV_PHYS_HEAP_DISPLAY)
+#define PHYS_HEAP_USAGE_FW_SHARED (1U <<30)
+#define PHYS_HEAP_USAGE_FW_PRIVATE (1U <<31)
+
+#define PHYS_HEAP_USAGE_MASK (PHYS_HEAP_USAGE_GPU_LOCAL | \
+ PHYS_HEAP_USAGE_CPU_LOCAL | \
+ PHYS_HEAP_USAGE_GPU_PRIVATE | \
+ PHYS_HEAP_USAGE_EXTERNAL | \
+ PHYS_HEAP_USAGE_GPU_COHERENT | \
+ PHYS_HEAP_USAGE_GPU_SECURE | \
+ PHYS_HEAP_USAGE_FW_CODE | \
+ PHYS_HEAP_USAGE_FW_PRIV_DATA | \
+ PHYS_HEAP_USAGE_FW_SHARED | \
+ PHYS_HEAP_USAGE_FW_PRIVATE | \
+ PHYS_HEAP_USAGE_FW_PREMAP | \
+ PHYS_HEAP_USAGE_FW_PREMAP_PT | \
+ PHYS_HEAP_USAGE_WRAP | \
+ PHYS_HEAP_USAGE_DISPLAY)
+
+#define FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM PVRSRV_PHYS_HEAP_FW_CODE
typedef void (*CpuPAddrToDevPAddr)(IMG_HANDLE hPrivData,
IMG_UINT32 ui32NumOfAddr,
#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE)
#include "common_rgxregconfig_bridge.h"
#endif
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
#include "common_rgxkicksync_bridge.h"
+#endif
#include "common_rgxtimerquery_bridge.h"
#if defined(SUPPORT_RGXRAY_BRIDGE)
#include "common_rgxray_bridge.h"
/* 136: RGX kicksync interface */
#define PVRSRV_BRIDGE_RGXKICKSYNC 136UL
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST + 1)
#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST)
-
+#else
+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST)
+#endif
/* 137: RGX TQ2 interface */
#define PVRSRV_BRIDGE_RGXTQ2 137UL
#if defined(SUPPORT_FASTRENDER_DM)
| (1U << (PVRSRV_BRIDGE_RGXFWDBG - PVRSRV_BRIDGE_RGX_FIRST))
#if defined(PDUMP)
| (1U << (PVRSRV_BRIDGE_RGXPDUMP - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
+ | (1U << (PVRSRV_BRIDGE_RGXKICKSYNC - PVRSRV_BRIDGE_RGX_FIRST))
#endif
| (1U << (PVRSRV_BRIDGE_RGXHWPERF - PVRSRV_BRIDGE_RGX_FIRST))
#if defined(SUPPORT_REGCONFIG)
* FILE_SIZE
*/
-#define FW_INFO_VERSION (1)
+#define FW_INFO_VERSION (2)
typedef struct
{
+ /* FW_INFO_VERSION 1 */
IMG_UINT32 ui32InfoVersion; /* FW info version */
IMG_UINT32 ui32HeaderLen; /* Header length */
IMG_UINT32 ui32LayoutEntryNum; /* Number of entries in the layout table */
IMG_UINT64 RGXFW_ALIGN ui64BVNC; /* BVNC */
IMG_UINT32 ui32FwPageSize; /* Page size of processor on which firmware executes */
IMG_UINT32 ui32Flags; /* Compatibility flags */
+
+ /* FW_INFO_VERSION 2 */
+ IMG_UINT16 ui16PVRVersionMajor; /* DDK major version number */
+ IMG_UINT16 ui16PVRVersionMinor; /* DDK minor version number */
+ IMG_UINT32 ui32PVRVersionBuild; /* DDK build number */
} RGX_FW_INFO_HEADER;
typedef struct
#define TQ_PREP_FLAGS_COMMAND_MASK (0xfU)
#define TQ_PREP_FLAGS_COMMAND_SHIFT 0
#define TQ_PREP_FLAGS_PDUMPCONTINUOUS (1U << 4)
-#define TQ_PREP_FLAGS_START (1U << 5)
-#define TQ_PREP_FLAGS_END (1U << 6)
#define TQ_PREP_FLAGS_COMMAND_SET(m) \
((TQ_PREP_FLAGS_COMMAND_##m << TQ_PREP_FLAGS_COMMAND_SHIFT) & TQ_PREP_FLAGS_COMMAND_MASK)
#if !defined(RGXSHADERHEADER_H)
#define RGXSHADERHEADER_H
+#include "pvrversion.h"
+
typedef struct _RGX_SHADER_HEADER_
{
IMG_UINT32 ui32Version;
IMG_UINT32 ui32SizeClientMem;
} RGX_SHADER_HEADER;
+/* TQ shaders version is used to check compatibility between the
+ binary TQ shaders file and the DDK. This number should be incremented
+ if a change to the TQ shader factory breaks compatibility. */
+#define RGX_TQ_SHADERS_VERSION 1U
+
+#define RGX_TQ_SHADERS_VERSION_PACK \
+ (((RGX_TQ_SHADERS_VERSION & 0xFFU) << 16) | ((PVRVERSION_MAJ & 0xFFU) << 8) | ((PVRVERSION_MIN & 0xFFU) << 0))
+
#endif /* RGXSHADERHEADER_H */
Module parameters (rogue-specific)
******************************************************************************/
#define APPHINT_LIST_MODPARAM \
-/* name, type, class, default, helper, */ \
-X(EnableCDMKillingRandMode, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE, NO_PARAM_TABLE ) \
+/* name, type, class, default, helper, guest, */ \
+X(EnableCDMKillingRandMode, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE, NO_PARAM_TABLE, ALWAYS ) \
\
-X(HWPerfDisableCustomCounterFilter, BOOL, VALIDATION, PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER, NO_PARAM_TABLE ) \
-X(ValidateSOCUSCTimer, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATESOCUSCTIMERS, NO_PARAM_TABLE ) \
-X(ECCRAMErrInj, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \
+X(HWPerfDisableCustomCounterFilter, BOOL, VALIDATION, PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER, NO_PARAM_TABLE, ALWAYS ) \
+X(ValidateSOCUSCTimer, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATESOCUSCTIMERS, NO_PARAM_TABLE, ALWAYS ) \
+X(ECCRAMErrInj, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \
\
-X(TFBCCompressionControlGroup, UINT32, VALIDATION, PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP, NO_PARAM_TABLE ) \
-X(TFBCCompressionControlScheme, UINT32, VALIDATION, PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME, NO_PARAM_TABLE ) \
-X(TFBCCompressionControlYUVFormat, BOOL, VALIDATION, 0, NO_PARAM_TABLE ) \
+X(TFBCCompressionControlGroup, UINT32, VALIDATION, PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP, NO_PARAM_TABLE, ALWAYS ) \
+X(TFBCCompressionControlScheme, UINT32, VALIDATION, PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME, NO_PARAM_TABLE, ALWAYS ) \
+X(TFBCCompressionControlYUVFormat, BOOL, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \
/*
*******************************************************************************
Debugfs parameters (rogue-specific) - driver configuration
******************************************************************************/
#define APPHINT_LIST_DEBUGINFO \
-/* name, type, class, default, helper, */ \
+/* name, type, class, default, helper, guest, */ \
/*
*******************************************************************************
Debugfs parameters (rogue-specific) - device configuration
******************************************************************************/
#define APPHINT_LIST_DEBUGINFO_DEVICE \
-/* name, type, class, default, helper, */ \
+/* name, type, class, default, helper, guest, */ \
/*
*******************************************************************************
******************************************************************************/
/* Unique ID for all AppHints */
typedef enum {
-#define X(a, b, c, d, e) APPHINT_ID_ ## a,
+#define X(a, b, c, d, e, f) APPHINT_ID_ ## a,
APPHINT_LIST_ALL
#undef X
APPHINT_ID_MAX
/* ID for build variable Apphints - used for build variable only structures */
typedef enum {
-#define X(a, b, c, d, e) APPHINT_BUILDVAR_ID_ ## a,
+#define X(a, b, c, d, e, f) APPHINT_BUILDVAR_ID_ ## a,
APPHINT_LIST_BUILDVAR_COMMON
APPHINT_LIST_BUILDVAR
#undef X
/* ID for Modparam Apphints - used for modparam only structures */
typedef enum {
-#define X(a, b, c, d, e) APPHINT_MODPARAM_ID_ ## a,
+#define X(a, b, c, d, e, f) APPHINT_MODPARAM_ID_ ## a,
APPHINT_LIST_MODPARAM_COMMON
APPHINT_LIST_MODPARAM
#undef X
/* ID for Debugfs Apphints - used for debugfs only structures */
typedef enum {
-#define X(a, b, c, d, e) APPHINT_DEBUGINFO_ID_ ## a,
+#define X(a, b, c, d, e, f) APPHINT_DEBUGINFO_ID_ ## a,
APPHINT_LIST_DEBUGINFO_COMMON
APPHINT_LIST_DEBUGINFO
#undef X
/* ID for Debugfs Device Apphints - used for debugfs device only structures */
typedef enum {
-#define X(a, b, c, d, e) APPHINT_DEBUGINFO_DEVICE_ID_ ## a,
+#define X(a, b, c, d, e, f) APPHINT_DEBUGINFO_DEVICE_ID_ ## a,
APPHINT_LIST_DEBUGINFO_DEVICE_COMMON
APPHINT_LIST_DEBUGINFO_DEVICE
#undef X
typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_ _SYNC_CHECKPOINT_CONTEXT_CTL, *_PSYNC_CHECKPOINT_CONTEXT_CTL;
-typedef struct SYNC_CHECKPOINT_CONTEXT_TAG
-{
- PPVRSRV_DEVICE_NODE psDevNode;
- IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the RA */
- RA_ARENA *psSubAllocRA; /*!< RA context */
- IMG_CHAR azSpanName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the span RA */
- RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */
- ATOMIC_T hRefCount; /*!< Ref count for this context */
- ATOMIC_T hCheckpointCount; /*!< Checkpoint count for this context */
- POS_LOCK hLock;
- _PSYNC_CHECKPOINT_CONTEXT_CTL psContextCtl;
-#if defined(PDUMP)
- DLLIST_NODE sSyncCheckpointBlockListHead; /*!< List head for the sync chkpt blocks in this context*/
- POS_LOCK hSyncCheckpointBlockListLock; /*!< sync chkpt blocks list lock*/
- DLLIST_NODE sListNode; /*!< List node for the sync chkpt context list*/
-#endif
-} _SYNC_CHECKPOINT_CONTEXT;
+typedef struct SYNC_CHECKPOINT_CONTEXT_TAG _SYNC_CHECKPOINT_CONTEXT;
typedef struct _SYNC_CHECKPOINT_BLOCK_
{
ATOMIC_T hRefCount; /*!< Ref count for this sync block */
POS_LOCK hLock;
_SYNC_CHECKPOINT_CONTEXT *psContext; /*!< Our copy of the services connection */
- PPVRSRV_DEVICE_NODE psDevNode;
IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync checkpoint block */
IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */
DEVMEM_MEMDESC *hMemDesc; /*!< DevMem allocation for block */
volatile IMG_UINT32 *pui32LinAddr; /*!< Server-code CPU mapping */
- IMG_UINT64 uiSpanBase; /*!< Base of this import (FW DevMem) in the span RA */
+ RA_BASE_T uiSpanBase; /*!< Base of this import (FW DevMem) in the span RA */
#if defined(PDUMP)
DLLIST_NODE sListNode; /*!< List node for the sync chkpt blocks */
#endif
typedef struct SYNC_CHECKPOINT_TAG
{
- //_SYNC_CHECKPOINT_CONTEXT *psContext; /*!< pointer to the parent context of this checkpoint */
/* A sync checkpoint is assigned a unique ID, to avoid any confusion should
* the same memory be re-used later for a different checkpoint
*/
ATOMIC_T hRefCount; /*!< Ref count for this sync */
ATOMIC_T hEnqueuedCCBCount; /*!< Num times sync has been put in CCBs */
SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< Synchronisation block this checkpoint is allocated on */
- IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */
+ RA_BASE_T uiAllocatedAddr; /*!< Allocated address of the sync */
volatile SYNC_CHECKPOINT_FW_OBJ *psSyncCheckpointFwObj; /*!< CPU view of the data held in the sync block */
PRGXFWIF_UFO_ADDR sCheckpointUFOAddr; /*!< PRGXFWIF_UFO_ADDR struct used to pass update address to FW */
IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the checkpoint */
PVRSRV_TIMELINE hTimeline; /*!< Timeline on which this sync checkpoint was created */
- IMG_UINT32 ui32ValidationCheck;
IMG_PID uiProcess; /*!< The Process ID of the process which created this sync checkpoint */
PSYNC_CHECKPOINT_RECORD_HANDLE hRecord; /*!< Sync record handle */
DLLIST_NODE sListNode; /*!< List node for the global sync chkpt list */
DLLIST_NODE sDeferredFreeListNode; /*!< List node for the deferred free sync chkpt list */
IMG_UINT32 ui32FWAddr; /*!< FWAddr stored at sync checkpoint alloc time */
+#if defined(PDUMP)
PDUMP_FLAGS_T ui32PDumpFlags; /*!< Pdump Capture mode to be used for POL*/
+#endif
+#if defined(DEBUG)
+ IMG_UINT32 ui32ValidationCheck; /*!< Structure validity pattern */
+#endif
} SYNC_CHECKPOINT;
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Services AppHint definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "km_apphint_defs_common.h"
+
+#ifndef KM_APPHINT_DEFS_H
+#define KM_APPHINT_DEFS_H
+
+/* NB: The 'DEVICE' AppHints must be last in this list as they will be
+ * duplicated in the case of a driver supporting multiple devices
+ */
+#define APPHINT_LIST_ALL \
+ APPHINT_LIST_BUILDVAR_COMMON \
+ APPHINT_LIST_BUILDVAR \
+ APPHINT_LIST_MODPARAM_COMMON \
+ APPHINT_LIST_MODPARAM \
+ APPHINT_LIST_DEBUGINFO_COMMON \
+ APPHINT_LIST_DEBUGINFO \
+ APPHINT_LIST_DEBUGINFO_DEVICE_COMMON \
+ APPHINT_LIST_DEBUGINFO_DEVICE
+
+
+/*
+*******************************************************************************
+ Build variables (volcanic-specific)
+ All of these should be configurable only through the 'default' value
+******************************************************************************/
+#define APPHINT_LIST_BUILDVAR
+
+/*
+*******************************************************************************
+ Module parameters (volcanic-specific)
+******************************************************************************/
+#define APPHINT_LIST_MODPARAM \
+/* name, type, class, default, helper, guest, */ \
+X(FabricCoherencyOverride, UINT32, ALWAYS, PVRSRV_APPHINT_FABRICCOHERENCYOVERRIDE, NO_PARAM_TABLE, ALWAYS ) \
+\
+X(EnableGenericDMKillingRandMode, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLEDMKILLINGRANDMODE, NO_PARAM_TABLE, ALWAYS ) \
+X(KillingCtl, UINT32, VALIDATION, PVRSRV_APPHINT_KILLINGCTL, NO_PARAM_TABLE, ALWAYS ) \
+X(CDMTDMKillingCtl, UINT32, VALIDATION, PVRSRV_APPHINT_CDMTDM_KILLINGCTL, NO_PARAM_TABLE, ALWAYS ) \
+X(HWValEnableSPUPowerMaskChange, BOOL, VALIDATION, PVRSRV_APPHINT_HWVALENABLESPUPOWERMASKCHANGE, NO_PARAM_TABLE, ALWAYS ) \
+X(HWValAvailableSPUMask, UINT32, VALIDATION, PVRSRV_APPHINT_HWVALAVAILABLESPUMASK, NO_PARAM_TABLE, ALWAYS ) \
+X(HWValAvailableRACMask, UINT32, VALIDATION, PVRSRV_APPHINT_HWVALAVAILABLERACMASK, NO_PARAM_TABLE, ALWAYS ) \
+X(EnableSPUClockGating, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLESPUCLOCKGATING, NO_PARAM_TABLE, ALWAYS ) \
+\
+X(HWPerfDisableCounterFilter, BOOL, VALIDATION, PVRSRV_APPHINT_HWPERFDISABLECOUNTERFILTER, NO_PARAM_TABLE, ALWAYS ) \
+\
+X(ISPSchedulingLatencyMode, UINT32, ALWAYS, PVRSRV_APPHINT_ISPSCHEDULINGLATENCYMODE, NO_PARAM_TABLE, ALWAYS ) \
+X(ValidateSOCUSCTimer, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATESOCUSCTIMERS, NO_PARAM_TABLE, ALWAYS ) \
+\
+X(USRMNumRegionsVDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \
+X(USRMNumRegionsCDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \
+X(USRMNumRegionsDDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \
+X(USRMNumRegionsPDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \
+X(USRMNumRegionsTDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \
+\
+X(UVBRMNumRegionsVDM, UINT64, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \
+X(UVBRMNumRegionsDDM, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \
+\
+X(CDMArbitrationOverride, UINT32, ALWAYS, PVRSRV_APPHINT_CDMARBITRATIONOVERRIDE, NO_PARAM_TABLE, ALWAYS ) \
+\
+X(DualLockstepFWProcessor, BOOL, VALIDATION, 1, NO_PARAM_TABLE, ALWAYS ) \
+X(GPUStatePin, BOOL, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \
+X(PowerDomainKickInterval, UINT32, VALIDATION, 0, NO_PARAM_TABLE, ALWAYS ) \
+\
+X(RCEDisableMask, UINT64, VALIDATION, PVRSRV_APPHINT_RCEDISABLEMASK, NO_PARAM_TABLE, ALWAYS ) \
+X(PCGPktDropThresh, UINT32, VALIDATION, PVRSRV_APPHINT_PCGPKTDROPTHRESH, NO_PARAM_TABLE, ALWAYS ) \
+X(RaySLCMMUAutoCacheOps, UINT32, VALIDATION, PVRSRV_APPHINT_RAYSLCMMUAUTOCACHEOPS, NO_PARAM_TABLE, ALWAYS ) \
+/*
+*******************************************************************************
+ Debugfs parameters (volcanic-specific) - driver configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGINFO \
+/* name, type, class, default, helper, guest, */ \
+
+/*
+*******************************************************************************
+ Debugfs parameters (volcanic-specific) - device configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGINFO_DEVICE \
+/* name, type, class, default, helper, guest, */ \
+
+/*
+*******************************************************************************
+ Mapping between debugfs parameters and module parameters.
+ This mapping is used to initialise device specific apphints from module
+ parameters.
+******************************************************************************/
+#define APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT \
+/* debuginfo device apphint name modparam name */
+
+/*
+*******************************************************************************
+
+ Table generated enums
+
+******************************************************************************/
+/* Unique ID for all AppHints */
+typedef enum {
+#define X(a, b, c, d, e, f) APPHINT_ID_ ## a,
+ APPHINT_LIST_ALL
+#undef X
+ APPHINT_ID_MAX
+} APPHINT_ID;
+
+/* ID for build variable Apphints - used for build variable only structures */
+typedef enum {
+#define X(a, b, c, d, e, f) APPHINT_BUILDVAR_ID_ ## a,
+ APPHINT_LIST_BUILDVAR_COMMON
+ APPHINT_LIST_BUILDVAR
+#undef X
+ APPHINT_BUILDVAR_ID_MAX
+} APPHINT_BUILDVAR_ID;
+
+/* ID for Modparam Apphints - used for modparam only structures */
+typedef enum {
+#define X(a, b, c, d, e, f) APPHINT_MODPARAM_ID_ ## a,
+ APPHINT_LIST_MODPARAM_COMMON
+ APPHINT_LIST_MODPARAM
+#undef X
+ APPHINT_MODPARAM_ID_MAX
+} APPHINT_MODPARAM_ID;
+
+/* ID for Debugfs Apphints - used for debugfs only structures */
+typedef enum {
+#define X(a, b, c, d, e, f) APPHINT_DEBUGINFO_ID_ ## a,
+ APPHINT_LIST_DEBUGINFO_COMMON
+ APPHINT_LIST_DEBUGINFO
+#undef X
+ APPHINT_DEBUGINFO_ID_MAX
+} APPHINT_DEBUGINFO_ID;
+
+/* ID for Debugfs Device Apphints - used for debugfs device only structures */
+typedef enum {
+#define X(a, b, c, d, e, f) APPHINT_DEBUGINFO_DEVICE_ID_ ## a,
+ APPHINT_LIST_DEBUGINFO_DEVICE_COMMON
+ APPHINT_LIST_DEBUGINFO_DEVICE
+#undef X
+ APPHINT_DEBUGINFO_DEVICE_ID_MAX
+} APPHINT_DEBUGINFO_DEVICE_ID;
+
+#endif /* KM_APPHINT_DEFS_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX API Header kernel mode
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exported RGX API details
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXAPI_KM_H
+#define RGXAPI_KM_H
+
+#include "rgx_hwperf.h"
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Control API(s)
+ *****************************************************************************/
+
+/*! HWPerf device identification structure */
+typedef struct _RGX_HWPERF_DEVICE_
+{
+ IMG_CHAR pszName[20]; /*!< Helps identify this device uniquely */
+ IMG_HANDLE hDevData; /*!< Handle for the server */
+
+ struct _RGX_HWPERF_DEVICE_ *psNext; /*!< Next device if any */
+} RGX_HWPERF_DEVICE;
+
+/*! HWPerf connection structure */
+typedef struct
+{
+ RGX_HWPERF_DEVICE *psHWPerfDevList; /*!< pointer to list of devices */
+} RGX_HWPERF_CONNECTION;
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfLazyConnect
+@Description Obtain a HWPerf connection object to the RGX device(s). The
+ connections to devices are not actually opened until
+ HWPerfOpen() is called.
+
+@Output ppsHWPerfConnection Address of a HWPerf connection object
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfOpen
+@Description Opens connection(s) to the RGX device(s). Valid handle to the
+ connection object has to be provided which means the this
+ function needs to be preceded by the call to
+ RGXHWPerfLazyConnect() function.
+
+@Input psHWPerfConnection HWPerf connection object
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION* psHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfConnect
+@Description Obtain a connection object to the RGX HWPerf module. Allocated
+ connection object(s) reference opened connection(s). Calling
+ this function is an equivalent of calling RGXHWPerfLazyConnect
+ and RGXHWPerfOpen. This connect should be used when the caller
+ will be retrieving event data.
+
+@Output ppsHWPerfConnection Address of HWPerf connection object
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfFreeConnection
+@Description Frees the HWPerf connection object
+
+@Input psHWPerfConnection Pointer to connection object as returned
+ from RGXHWPerfLazyConnect()
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** psHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfClose
+@Description Closes all the opened connection(s) to RGX device(s)
+
+@Input psHWPerfConnection Pointer to HWPerf connection object as
+ returned from RGXHWPerfConnect() or
+ RGXHWPerfOpen()
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfDisconnect
+@Description Disconnect from the RGX device
+
+@Input ppsHWPerfConnection Pointer to HWPerf connection object as
+ returned from RGXHWPerfConnect() or
+ RGXHWPerfOpen(). Calling this function is
+ an equivalent of calling RGXHWPerfClose()
+ and RGXHWPerfFreeConnection().
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfControl
+@Description Enable or disable the generation of RGX HWPerf event packets.
+ See RGXCtrlHWPerf().
+
+@Input psHWPerfConnection Pointer to HWPerf connection object
+@Input eStreamId ID of the HWPerf stream
+@Input bToggle Switch to toggle or apply mask.
+@Input ui64Mask Mask of events to control.
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfControl(
+ RGX_HWPERF_CONNECTION *psHWPerfConnection,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_BOOL bToggle,
+ IMG_UINT64 ui64Mask);
+
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfGetFilter
+@Description Reads HWPerf stream filter where stream is identified by the
+ given stream ID.
+
+@Input hDevData Handle to connection/device object
+@Input eStreamId ID of the HWPerf stream
+@Output ui64Filter HWPerf filter value
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfGetFilter(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_UINT64 *ui64Filter
+);
+
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfConfigureCounters
+@Description Enable and configure the performance counter block for one or
+ more device layout modules.
+ See RGXConfigHWPerfCounters().
+
+@Input psHWPerfConnection Pointer to HWPerf connection object
+@Input ui32CtrlWord One of <tt>RGX_HWPERF_CTRL_NOP</tt>,
+ <tt>RGX_HWPERF_CTRL_GEOM_FULLrange</tt>,
+ <tt>RGX_HWPERF_CTRL_COMP_FULLRANGE</tt>,
+ <tt>RGX_HWPERF_CTRL_TDM_FULLRANGE</tt>
+@Input ui32NumBlocks Number of elements in the array
+@Input asBlockConfigs Address of the array of configuration blocks
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfConfigureCounters(
+ RGX_HWPERF_CONNECTION *psHWPerfConnection,
+ IMG_UINT32 ui32CtrlWord,
+ IMG_UINT32 ui32NumBlocks,
+ RGX_HWPERF_CONFIG_CNTBLK* asBlockConfigs);
+
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfDisableCounters
+@Description Disable the performance counter block for one or more device
+ layout modules.
+
+@Input psHWPerfConnection Pointer to HWPerf connection object
+@Input ui32NumBlocks Number of elements in the array
+@Input aeBlockIDs An array of words with values taken from
+ the RGX_HWPERF_CNTBLK_ID enumeration.
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfDisableCounters(
+ RGX_HWPERF_CONNECTION *psHWPerfConnection,
+ IMG_UINT32 ui32NumBlocks,
+ IMG_UINT16* aeBlockIDs);
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfEnableCounters
+@Description Enable the performance counter block for one or more device
+ layout modules.
+
+@Input psHWPerfConnection Pointer to HWPerf connection object
+@Input ui32NumBlocks Number of elements in the array
+@Input aeBlockIDs An array of words with values taken from the
+ <tt>RGX_HWPERF_CNTBLK_ID</tt> enumeration.
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfEnableCounters(
+ RGX_HWPERF_CONNECTION *psHWPerfConnection,
+ IMG_UINT32 ui32NumBlocks,
+ IMG_UINT16* aeBlockIDs);
+
+/******************************************************************************
+ * RGX HW Performance Profiling Retrieval API(s)
+ *
+ * The client must ensure their use of this acquire/release API for a single
+ * connection/stream must not be shared with multiple execution contexts e.g.
+ * between a kernel thread and an ISR handler. It is the client's
+ * responsibility to ensure this API is not interrupted by a high priority
+ * thread/ISR
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfAcquireEvents
+@Description When there is data available to read this call returns with OK
+ and the address and length of the data buffer the client can
+ safely read. This buffer may contain one or more event packets.
+ When there is no data to read, this call returns with OK and
+ sets *puiBufLen to 0 on exit.
+ Clients must pair this call with a RGXHWPerfReleaseEvents()
+ call.
+ Data returned in ppBuf will be in the form of a sequence of
+ HWPerf packets which should be traversed using the pointers,
+ structures and macros provided in rgx_hwperf.h
+
+@Input hDevData Handle to connection/device object
+@Input eStreamId ID of the HWPerf stream
+@Output ppBuf Address of a pointer to a byte buffer. On exit it
+ contains the address of buffer to read from
+@Output pui32BufLen Pointer to an integer. On exit it is the size of
+ the data to read from the buffer
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfAcquireEvents(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_PBYTE* ppBuf,
+ IMG_UINT32* pui32BufLen);
+
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfReleaseEvents
+@Description Called after client has read the event data out of the buffer
+ retrieved from the Acquire Events call to release resources.
+@Input hDevData Handle to connection/device object
+@Input eStreamId ID of the HWPerf stream
+@Return PVRSRV_ERROR System error code
+*/ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXHWPerfReleaseEvents(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId);
+
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfConvertCRTimeStamp
+@Description Converts the timestamp given by FW events to the common OS
+ timestamp. The first three inputs are obtained via a CLK_SYNC
+ event, ui64CRTimeStamp is the CR timestamp from the FW event
+ to be converted.
+@Input ui32ClkSpeed Clock speed given by sync event
+@Input ui64CorrCRTimeStamp CR Timestamp given by sync event
+@Input ui64CorrOSTimeStamp Correlating OS Timestamp given by sync
+ event
+@Input ui64CRTimeStamp CR Timestamp to convert
+@Return IMG_UINT64 Calculated OS Timestamp
+*/ /**************************************************************************/
+IMG_UINT64 RGXHWPerfConvertCRTimeStamp(
+ IMG_UINT32 ui32ClkSpeed,
+ IMG_UINT64 ui64CorrCRTimeStamp,
+ IMG_UINT64 ui64CorrOSTimeStamp,
+ IMG_UINT64 ui64CRTimeStamp);
+
+#endif /* RGXAPI_KM_H */
+
+/******************************************************************************
+ End of file (rgxapi_km.h)
+******************************************************************************/
static CACHEOP_WORK_QUEUE gsCwq;
#define CacheOpConfigSupports(e) ((gsCwq.eConfig & (e)) ? IMG_TRUE : IMG_FALSE)
-extern void do_invalid_range(unsigned long start, unsigned long len);
#if defined(CACHEOP_DEBUG)
static INLINE void CacheOpStatsExecLogHeader(IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN])
}
}
- if(uiCacheOp == PVRSRV_CACHE_OP_INVALIDATE && uiSize >= 4096)
- {
- do_invalid_range(0x00000000, 0x200000);
- }
-
e0:
if (psCpuPhyAddr != asCpuPhyAddr)
{
if (psProcessHandleBase != NULL)
{
+ /* PVRSRVReleaseProcessHandleBase() calls PVRSRVFreeKernelHendles()
+ * and PVRSRVFreeHandleBase() for the process handle base.
+ * Releasing kernel handles can never return RETRY error because
+ * release function for those handles are NOPs and PVRSRVFreeKernelHendles()
+ * doesn't even call pfnReleaseData() callback.
+ * Process handles can potentially return RETRY hence additional check
+ * below. */
eError = PVRSRVReleaseProcessHandleBase(psProcessHandleBase, psConnection->pid,
ui64MaxBridgeTime);
- PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVReleaseProcessHandleBase");
+ if (PVRSRVIsRetryError(eError))
+ {
+ return eError;
+ }
+ else
+ {
+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVReleaseProcessHandleBase");
+ }
psConnection->psProcessHandleBase = NULL;
}
* Retrying will allow the in-flight work to be completed and the
* tear-down request can be completed when the FW is no longer busy.
*/
- if (PVRSRV_ERROR_RETRY == eError)
+ if (PVRSRVIsRetryError(eError))
{
return eError;
}
else
{
- PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeHandleBase:2");
+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeHandleBase");
}
psConnection->psHandleBase = NULL;
if (psConnection->psPDumpConnectionData != NULL)
{
- PDumpUnregisterConnection(psConnection->psPDumpConnectionData);
+ PDumpUnregisterConnection(OSGetDevNode(psConnection),
+ psConnection->psPDumpConnectionData);
psConnection->psPDumpConnectionData = NULL;
}
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ PVRSRVStatsDeviceDisconnect(OSGetDevNode(psConnection));
+#endif
+
/* Call environment specific connection data deinit function */
if (psConnection->hOsPrivateData != NULL)
{
* Register this connection and Sync PDump callback with
* the pdump core. Pass in the Sync connection data.
*/
- eError = PDumpRegisterConnection(psConnection->psSyncConnectionData,
- SyncConnectionPDumpSyncBlocks,
- &psConnection->psPDumpConnectionData);
+ eError = PDumpRegisterConnection(OSGetDevNode(psConnection),
+ psConnection->psSyncConnectionData,
+ SyncConnectionPDumpSyncBlocks,
+ &psConnection->psPDumpConnectionData);
PVR_LOG_GOTO_IF_ERROR(eError, "PDumpRegisterConnection", failure);
/* Allocate handle base for this connection */
PVRSRV_RGXDEV_INFO *psRgxDevInfo;
PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnection);
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ eError = PVRSRVStatsDeviceConnect(psDevNode);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsDeviceConnect", failure);
+#endif
+
OSLockAcquire(psDevNode->hConnectionsLock);
dllist_add_to_tail(&psDevNode->sConnections, &psConnection->sConnectionListNode);
#if defined(DEBUG) || defined(PDUMP)
eErrorConnection = ConnectionDataDestroy(psConnectionData);
if (eErrorConnection != PVRSRV_OK)
{
- if (eErrorConnection == PVRSRV_ERROR_RETRY)
+ if (PVRSRVIsRetryError(eErrorConnection))
{
- PVR_DPF((PVR_DBG_MESSAGE,
- "%s: Failed to purge connection data %p "
- "(deferring destruction)",
- __func__,
- psConnectionData));
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Failed to purge connection data %p "
+ "(deferring destruction)", __func__, psConnectionData));
}
}
else
{
- PVR_DPF((PVR_DBG_MESSAGE,
- "%s: Connection data %p deferred destruction finished",
- __func__,
- psConnectionData));
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Connection data %p deferred destruction "
+ "finished", __func__, psConnectionData));
}
/* Check if possible resize the global handle base */
/* Defer the release of the connection data */
psConnectionData->sCleanupThreadFn.pfnFree = _CleanupThreadPurgeConnectionData;
psConnectionData->sCleanupThreadFn.pvData = psConnectionData;
- psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_FALSE;
+ /* Some resources in HANDLE_BASE may need FW idle confirmation
+ * hence setting to TRUE to use the global EO for retries which is
+ * signalled by the device MISR */
+ psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_TRUE;
CLEANUP_THREAD_SET_RETRY_COUNT(&psConnectionData->sCleanupThreadFn,
CLEANUP_THREAD_RETRY_COUNT_DEFAULT);
PVRSRVCleanupThreadAddWork(&psConnectionData->sCleanupThreadFn);
{
PVR_DUMPDEBUG_LOG(CONNECTIONS_PREFIX " No active connections",
(unsigned char)psDevNode->sDevId.ui32InternalID,
- (unsigned char)psDevNode->sDevId.i32OsDeviceID);
+ (unsigned char)psDevNode->sDevId.i32KernelDeviceID);
}
else
{
MAX_CONNECTIONS_PREFIX,
CONNECTIONS_PREFIX,
(unsigned char)psDevNode->sDevId.ui32InternalID,
- (unsigned char)psDevNode->sDevId.i32OsDeviceID);
+ (unsigned char)psDevNode->sDevId.i32KernelDeviceID);
OSStringLCopy(sActiveConnections+uiPos, szTmpConBuff, uiSize);
/* Move the write offset to the end of the current string */
#include "rgx_options.h"
#include "allocmem.h"
#include "rgxfwutils.h"
+#include "osfunc.h"
+#if defined(SUPPORT_RGX) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
+#include "rgxfwdbg.h"
+#endif
#ifdef SUPPORT_RGX
#include "rgxdevice.h"
#include "rgxdebug.h"
#include "rgxinit.h"
#include "rgxmmudefs_km.h"
-static IMG_HANDLE ghGpuUtilUserDebugFS;
#endif
static DI_ENTRY *gpsVersionDIEntry;
static DI_ENTRY *gpsDebugLevelDIEntry;
#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */
+#if defined(SUPPORT_RGX) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
+struct DI_VZ_DATA {
+ PVRSRV_DEVICE_NODE *psDevNode;
+ IMG_UINT32 ui32DriverID;
+};
+#endif
+
static void _DumpDebugDIPrintfWrapper(void *pvDumpDebugFile, const IMG_CHAR *pszFormat, ...)
{
IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
DIPrintf(psEntry, "\nDevice Name: %s\n", psDevConfig->pszName);
DIPrintf(psEntry, "Device ID: %u:%d\n", psDevNode->sDevId.ui32InternalID,
- psDevNode->sDevId.i32OsDeviceID);
+ psDevNode->sDevId.i32KernelDeviceID);
if (psDevConfig->pszVersion)
{
PVRVERSION_UNPACK_MIN(ui32DDKVer),
psRGXCompChecks->ui32DDKBuild,
((psRGXCompChecks->ui32BuildOptions &
- OPTIONS_DEBUG_MASK) ? STR_DEBUG : STR_RELEASE),
+ OPTIONS_DEBUG_EN) ? STR_DEBUG : STR_RELEASE),
psRGXCompChecks->ui32BuildOptions,
PVR_BUILD_DIR);
bFwVersionInfoPrinted = IMG_TRUE;
eError = RGXScheduleCommandAndGetKCCBSlot(psDeviceNode->pvDevice,
RGXFWIF_DM_GP,
&sCounterDumpCmd,
- 0,
PDUMP_FLAGS_CONTINUOUS,
pui32kCCBCommandSlot);
PVR_LOG_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot");
PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry);
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
IMG_UINT32 ui32kCCBCommandSlot;
- PVRSRV_ERROR eError = PVRSRV_OK;
+ int eError = 0;
PVR_UNREFERENCED_PARAMETER(pvData);
/* Read back the buffer */
{
IMG_UINT32* pui32PowerBuffer;
- IMG_UINT32 ui32NumOfRegs, ui32SamplePeriod;
+ IMG_UINT32 ui32NumOfRegs, ui32SamplePeriod, ui32NumOfCores;
IMG_UINT32 i, j;
+ if (!psDevInfo->psCounterBufferMemDesc)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Counter buffer not allocated!"));
+ return -EINVAL;
+ }
+
eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCounterBufferMemDesc,
(void**)&pui32PowerBuffer);
if (eError != PVRSRV_OK)
ui32NumOfRegs = *pui32PowerBuffer++;
ui32SamplePeriod = *pui32PowerBuffer++;
+ ui32NumOfCores = *pui32PowerBuffer++;
+ PVR_DPF((PVR_DBG_MESSAGE, "Number of power counters: %u.", ui32NumOfRegs));
- if (ui32NumOfRegs)
+ if (ui32NumOfCores == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "No GPU cores enabled!"));
+ eError = -EINVAL;
+ }
+
+ if (ui32NumOfRegs && ui32NumOfCores)
{
DIPrintf(psEntry, "Power counter data for device\n");
DIPrintf(psEntry, "Sample period: 0x%08x\n", ui32SamplePeriod);
DIPrintf(psEntry, "0x%08x:", ui32RegOffset);
- for (j = 0; j < ui32NumOfInstances; j++)
+ for (j = 0; j < ui32NumOfInstances * ui32NumOfCores; j++)
{
ui32Low = *pui32PowerBuffer++;
- ui32High = *pui32PowerBuffer++;
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CATURIX_XTP_TOP_INFRASTRUCTURE))
+ {
+ /* Power counters have 32-bit range */
+ DIPrintf(psEntry, " 0x%08x", ui32Low);
+ }
+ else
+ {
+ /* Power counters have 64-bit range */
+ ui32High = *pui32PowerBuffer++;
- DIPrintf(psEntry, " 0x%016llx",
- (IMG_UINT64) ui32Low | (IMG_UINT64) ui32High << 32);
+ DIPrintf(psEntry, " 0x%016" IMG_UINT64_FMTSPECx,
+ (IMG_UINT64) ui32Low | (IMG_UINT64) ui32High << 32);
+ }
}
DIPrintf(psEntry, "\n");
PVRSRV_DEVICE_HEALTH_REASON eHealthReason;
DIPrintf(psEntry, "\nDevice ID: %u:%d\n", psDeviceNode->sDevId.ui32InternalID,
- psDeviceNode->sDevId.i32OsDeviceID);
+ psDeviceNode->sDevId.i32KernelDeviceID);
/* Update the health status now if possible... */
if (psDeviceNode->pfnUpdateHealthStatus)
if (psDevInfo->pfnGetGpuUtilStats &&
eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK)
{
- RGXFWIF_GPU_UTIL_STATS sGpuUtilStats;
+ PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo;
+ RGXFWIF_GPU_UTIL_STATS *psGpuUtilStats = OSAllocMem(sizeof(*psGpuUtilStats));
PVRSRV_ERROR eError = PVRSRV_OK;
+ if (psGpuUtilStats == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate GPU stats memory", __func__));
+ goto return_;
+ }
+
eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode,
- ghGpuUtilUserDebugFS,
- &sGpuUtilStats);
+ psDebugInfo->hGpuUtilUserDebugFS,
+ psGpuUtilStats);
if ((eError == PVRSRV_OK) &&
- ((IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative))
+ ((IMG_UINT32)psGpuUtilStats->ui64GpuStatCumulative))
{
+ const IMG_CHAR *apszDmNames[RGXFWIF_DM_MAX] = {"GP", "TDM", "GEOM", "3D", "CDM", "RAY", "GEOM2", "GEOM3", "GEOM4"};
IMG_UINT64 util;
IMG_UINT32 rem;
+ IMG_UINT32 ui32DriverID;
+ RGXFWIF_DM eDM;
- util = 100 * sGpuUtilStats.ui64GpuStatActive;
- util = OSDivide64(util, (IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative, &rem);
+#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)))
+ {
+ apszDmNames[RGXFWIF_DM_TDM] = "2D";
+ }
+#endif
+
+ util = 100 * psGpuUtilStats->ui64GpuStatActive;
+ util = OSDivide64(util, (IMG_UINT32)psGpuUtilStats->ui64GpuStatCumulative, &rem);
DIPrintf(psEntry, "GPU Utilisation: %u%%\n", (IMG_UINT32)util);
+
+ DIPrintf(psEntry, " ");
+
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ DIPrintf(psEntry, " VM%u", ui32DriverID);
+ }
+
+ DIPrintf(psEntry, "\n");
+
+ for (eDM = RGXFWIF_DM_TDM; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++)
+ {
+ DIPrintf(psEntry, "%-5s Utilisation: ", apszDmNames[eDM]);
+
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ IMG_UINT32 uiDivisor = (IMG_UINT32)psGpuUtilStats->aaui64DMOSStatCumulative[eDM][ui32DriverID];
+
+ if (uiDivisor == 0U)
+ {
+ DIPrintf(psEntry, " - ");
+ continue;
+ }
+
+ util = 100 * psGpuUtilStats->aaui64DMOSStatActive[eDM][ui32DriverID];
+ util = OSDivide64(util, uiDivisor, &rem);
+
+ DIPrintf(psEntry, "%3u%% ", (IMG_UINT32)util);
+ }
+
+
+ DIPrintf(psEntry, "\n");
+ }
}
else
{
DIPrintf(psEntry, "GPU Utilisation: -\n");
}
+
+ OSFreeMem(psGpuUtilStats);
}
}
#endif /* SUPPORT_RGX */
}
}
+#ifdef SUPPORT_RGX
+return_:
+#endif
return 0;
}
#if defined(RGX_FEATURE_MIPS_BIT_MASK)
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
{
- DIPrintf(psEntry, "| 0x%8X | "
- "0x%16" IMG_UINT64_FMTSPECX " | "
- "0x%16" IMG_UINT64_FMTSPECX " | "
+ DIPrintf(psEntry, "| 0x%08X | "
+ "0x%016" IMG_UINT64_FMTSPECX " | "
+ "0x%016" IMG_UINT64_FMTSPECX " | "
"%s%s%s |\n",
ui32FwVA,
(IMG_UINT64) sCpuPA.uiAddr,
#endif
{
/* META and RISCV use a subset of the GPU's virtual address space */
- DIPrintf(psEntry, "| 0x%8X | "
- "0x%16" IMG_UINT64_FMTSPECX " | "
- "0x%16" IMG_UINT64_FMTSPECX " | "
+ DIPrintf(psEntry, "| 0x%08X | "
+ "0x%016" IMG_UINT64_FMTSPECX " | "
+ "0x%016" IMG_UINT64_FMTSPECX " | "
"%s%s%s%s%s%s |\n",
ui32FwVA,
(IMG_UINT64) sCpuPA.uiAddr,
PVRSRV_RGXDEV_INFO *psDevInfo;
IMG_UINT32 ui32FwVA;
IMG_UINT32 ui32FwPageSize;
- IMG_UINT32 ui32OSID;
+ IMG_UINT32 ui32DriverID;
psDeviceNode = DIGetPrivData(psEntry);
ui32FwPageSize = BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT);
}
- for (ui32OSID = 0; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
{
IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) ((RGX_FIRMWARE_RAW_HEAP_BASE +
- (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)) & UINT_MAX);
+ (ui32DriverID * RGX_FIRMWARE_RAW_HEAP_SIZE)) & UINT_MAX);
IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_SIZE & UINT_MAX);
DIPrintf(psEntry, "| OS ID %u |\n"
- "+-----------------+------------------------+------------------------+--------------+\n", ui32OSID);
+ "+-----------------+------------------------+------------------------+--------------+\n", ui32DriverID);
for (ui32FwVA = ui32FwHeapBase;
ui32FwVA < ui32FwHeapEnd;
DIPrintf(psEntry, "+-----------------+------------------------+------------------------+--------------+\n");
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
if (PVRSRV_VZ_MODE_IS(NATIVE))
{
break;
}
+#endif
}
return 0;
#endif /* SUPPORT_FIRMWARE_GCOV */
-#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS
-
-/*************************************************************************/ /*!
- Power monitoring DebugFS entry
-*/ /**************************************************************************/
-
-static int _PowMonTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
-{
- PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry);
- PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
-
- PVR_UNREFERENCED_PARAMETER(pvData);
-
- if (psDevInfo != NULL)
- {
- RGXDumpPowerMonitoring(_DumpDebugDIPrintfWrapper, psEntry, psDevInfo);
- }
-
- return 0;
-}
-
-#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */
-
#ifdef SUPPORT_VALIDATION
#ifndef SYS_RGX_DEV_UNMAPPED_FW_REG
}
#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */
+#if defined(SUPPORT_RGX) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
+static int VZPriorityDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+ DI_VZ_DATA *psVZDriverData = DIGetPrivData(psEntry);
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg;
+ IMG_UINT32 ui32DriverID;
+
+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL);
+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO);
+
+ psDevInfo = psVZDriverData->psDevNode->pvDevice;
+ PVR_RETURN_IF_FALSE(psDevInfo != NULL, -EIO);
+
+ psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+ PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, -EIO);
+
+ ui32DriverID = psVZDriverData->ui32DriverID;
+ PVR_RETURN_IF_FALSE(ui32DriverID < (RGXFW_HOST_DRIVER_ID + RGX_NUM_DRIVERS_SUPPORTED),
+ -EINVAL);
+
+ DIPrintf(psEntry, "%u\n", psRuntimeCfg->aui32DriverPriority[ui32DriverID]);
+
+ return 0;
+}
+
+static IMG_INT64 VZPrioritySet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count,
+ IMG_UINT64 *pui64Pos, void *pvData)
+{
+ const DI_VZ_DATA *psVZDriverData = (const DI_VZ_DATA*)pvData;
+ const IMG_UINT32 uiMaxBufferSize = 12;
+ IMG_UINT32 ui32Priority;
+ PVRSRV_ERROR eError;
+
+ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO);
+ PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL);
+ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL);
+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL);
+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO);
+
+ if (OSStringToUINT32(pcBuffer, 10, &ui32Priority) != PVRSRV_OK)
+ {
+ return -EINVAL;
+ }
+
+ eError = PVRSRVRGXFWDebugSetDriverPriorityKM(NULL, psVZDriverData->psDevNode,
+ psVZDriverData->ui32DriverID, ui32Priority);
+ if (eError != PVRSRV_OK)
+ {
+ return -EIO;
+ }
+
+ *pui64Pos += ui64Count;
+ return ui64Count;
+}
+
+static int VZIsolationGroupDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+ DI_VZ_DATA *psVZDriverData = DIGetPrivData(psEntry);
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg;
+ IMG_UINT32 ui32DriverID;
+
+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL);
+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO);
+
+ psDevInfo = psVZDriverData->psDevNode->pvDevice;
+ PVR_RETURN_IF_FALSE(psDevInfo != NULL, -EIO);
+
+ psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+ PVR_RETURN_IF_FALSE(psRuntimeCfg != NULL, -EIO);
+
+ ui32DriverID = psVZDriverData->ui32DriverID;
+ PVR_RETURN_IF_FALSE(ui32DriverID < (RGXFW_HOST_DRIVER_ID + RGX_NUM_DRIVERS_SUPPORTED),
+ -EINVAL);
+
+ DIPrintf(psEntry, "%u\n", psRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID]);
+
+ return 0;
+}
+
+static IMG_INT64 VZIsolationGroupSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count,
+ IMG_UINT64 *pui64Pos, void *pvData)
+{
+ const DI_VZ_DATA *psVZDriverData = (const DI_VZ_DATA*)pvData;
+ const IMG_UINT32 uiMaxBufferSize = 12;
+ IMG_UINT32 ui32IsolationGroup;
+ PVRSRV_ERROR eError;
+
+ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO);
+ PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL);
+ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL);
+ PVR_RETURN_IF_FALSE(psVZDriverData != NULL, -EINVAL);
+ PVR_RETURN_IF_FALSE(psVZDriverData->psDevNode != NULL, -ENXIO);
+
+ if (OSStringToUINT32(pcBuffer, 10, &ui32IsolationGroup) != PVRSRV_OK)
+ {
+ return -EINVAL;
+ }
+
+ eError = PVRSRVRGXFWDebugSetDriverIsolationGroupKM(NULL, psVZDriverData->psDevNode,
+ psVZDriverData->ui32DriverID, ui32IsolationGroup);
+ if (eError != PVRSRV_OK)
+ {
+ return -EIO;
+ }
+
+ *pui64Pos += ui64Count;
+ return ui64Count;
+}
+#endif
+
PVRSRV_ERROR DebugCommonInitDriver(void)
{
PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
return -EEXIST;
}
-#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE)
- if (SORgxGpuUtilStatsRegister(&ghGpuUtilUserDebugFS) != PVRSRV_OK)
- {
- return -ENOMEM;
- }
-#endif /* defined(SUPPORT_RGX) && !defined(NO_HARDWARE) */
-
{
DI_ITERATOR_CB sIterator = {
.pfnStart = _VersionDIStart,
}
#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */
-#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE)
- if (ghGpuUtilUserDebugFS != NULL)
- {
- SORgxGpuUtilStatsUnregister(ghGpuUtilUserDebugFS);
- ghGpuUtilUserDebugFS = NULL;
- }
-#endif /* defined(SUPPORT_RGX) && !defined(NO_HARDWARE) */
-
#ifdef SUPPORT_VALIDATION
if (gpsTestMemLeakDIEntry != NULL)
{
{
PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo;
PVRSRV_ERROR eError;
+ IMG_CHAR pszDeviceId[sizeof("gpu4294967296")];
- {
- IMG_CHAR pszDeviceId[sizeof("gpu4294967296")];
+ OSSNPrintf(pszDeviceId, sizeof(pszDeviceId), "gpu%02d",
+ psDeviceNode->sDevId.ui32InternalID);
+ eError = DICreateGroup(pszDeviceId, NULL, &psDebugInfo->psGroup);
+ PVR_GOTO_IF_ERROR(eError, return_error_);
- OSSNPrintf(pszDeviceId, sizeof(pszDeviceId), "gpu%02d",
- psDeviceNode->sDevId.ui32InternalID);
-
- eError = DICreateGroup(pszDeviceId, NULL, &psDebugInfo->psGroup);
- PVR_GOTO_IF_ERROR(eError, return_error_);
- }
+#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE)
+ eError = SORgxGpuUtilStatsRegister(&psDebugInfo->hGpuUtilUserDebugFS);
+ PVR_GOTO_IF_ERROR(eError, return_error_);
+#endif
{
DI_ITERATOR_CB sIterator = {.pfnShow = _DebugDumpDebugDIShow};
psDebugInfo->ui64RiscvDmi = 0ULL;
}
#endif /* SUPPORT_VALIDATION || SUPPORT_RISCV_GDB */
+
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
+ if (PVRSRV_VZ_MODE_IS(HOST))
+ {
+ eError = DICreateGroup("vz", psDebugInfo->psGroup, &psDebugInfo->psVZGroup);
+ PVR_GOTO_IF_ERROR(eError, return_error_);
+
+ {
+ IMG_UINT32 ui32DriverID;
+
+ DI_ITERATOR_CB sPriorityIterator = {
+ .pfnShow = VZPriorityDIShow,
+ .pfnWrite = VZPrioritySet,
+ //Max value of UINT_MAX (10 chars) + Null terminator
+ .ui32WriteLenMax = sizeof("4294967295")
+ };
+
+ DI_ITERATOR_CB sIsolationGroupIterator = {
+ .pfnShow = VZIsolationGroupDIShow,
+ .pfnWrite = VZIsolationGroupSet,
+ //Max value of UINT_MAX (10 chars) + Null terminator
+ .ui32WriteLenMax = sizeof("4294967295")
+ };
+
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ IMG_CHAR szDriverID[2];
+ OSSNPrintf(szDriverID, 2, "%u", ui32DriverID);
+
+ eError = DICreateGroup(szDriverID, psDebugInfo->psVZGroup, &psDebugInfo->apsVZDriverGroups[ui32DriverID]);
+ PVR_GOTO_IF_ERROR(eError, return_error_);
+
+ psDebugInfo->apsVZDriverData[ui32DriverID] = OSAllocMem(sizeof(PVRSRV_DEVICE_DEBUG_INFO));
+ PVR_GOTO_IF_NOMEM(psDebugInfo->apsVZDriverData[ui32DriverID], eError, return_error_);
+
+ psDebugInfo->apsVZDriverData[ui32DriverID]->psDevNode = psDeviceNode;
+ psDebugInfo->apsVZDriverData[ui32DriverID]->ui32DriverID = ui32DriverID;
+
+ eError = DICreateEntry("priority", psDebugInfo->apsVZDriverGroups[ui32DriverID],
+ &sPriorityIterator, psDebugInfo->apsVZDriverData[ui32DriverID], DI_ENTRY_TYPE_GENERIC,
+ &psDebugInfo->apsVZDriverPriorityDIEntries[ui32DriverID]);
+ PVR_GOTO_IF_ERROR(eError, return_error_);
+
+ eError = DICreateEntry("isolation_group", psDebugInfo->apsVZDriverGroups[ui32DriverID],
+ &sIsolationGroupIterator, psDebugInfo->apsVZDriverData[ui32DriverID], DI_ENTRY_TYPE_GENERIC,
+ &psDebugInfo->apsVZDriverIsolationGroupDIEntries[ui32DriverID]);
+ PVR_GOTO_IF_ERROR(eError, return_error_);
+ }
+ }
+ }
+#endif
}
#ifdef SUPPORT_VALIDATION
{
}
#endif /* SUPPORT_VALIDATION */
-#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS
- if (! PVRSRV_VZ_MODE_IS(GUEST))
- {
- DI_ITERATOR_CB sIterator = {
- .pfnShow = _PowMonTraceDIShow
- };
- eError = DICreateEntry("power_mon", psDebugInfo->psGroup, &sIterator, psDeviceNode,
- DI_ENTRY_TYPE_GENERIC, &psDebugInfo->psPowMonEntry);
- PVR_GOTO_IF_ERROR(eError, return_error_);
- }
-#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */
#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS
{
DI_ITERATOR_CB sIterator = {
PVR_GOTO_IF_ERROR(eError, return_error_);
}
#endif /* SUPPORT_POWER_SAMPLING_VIA_DEBUGFS */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ {
+ DI_ITERATOR_CB sIterator = {
+ .pfnShow = PVRSRVPowerStatsPrintElements,
+ };
+ eError = DICreateEntry("power_timing_stats", psDebugInfo->psGroup, &sIterator, psDeviceNode,
+ DI_ENTRY_TYPE_GENERIC, &psDebugInfo->psPowerTimingStatsEntry);
+ PVR_GOTO_IF_ERROR(eError, return_error_);
+ }
+#endif
#endif /* SUPPORT_RGX */
return PVRSRV_OK;
{
PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo;
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ if (psDebugInfo->psPowerTimingStatsEntry != NULL)
+ {
+ DIDestroyEntry(psDebugInfo->psPowerTimingStatsEntry);
+ psDebugInfo->psPowerTimingStatsEntry = NULL;
+ }
+#endif
+
#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS
if (psDebugInfo->psPowerDataEntry != NULL)
{
}
#endif /* SUPPORT_POWER_SAMPLING_VIA_DEBUGFS */
-#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS
- if (psDebugInfo->psPowMonEntry != NULL)
- {
- DIDestroyEntry(psDebugInfo->psPowMonEntry);
- psDebugInfo->psPowMonEntry = NULL;
- }
-#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */
-
#ifdef SUPPORT_VALIDATION
if (psDebugInfo->psRGXRegsEntry != NULL)
{
#endif /* SUPPORT_VALIDATION */
#ifdef SUPPORT_RGX
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
+ if (PVRSRV_VZ_MODE_IS(HOST))
+ {
+ IMG_UINT32 ui32DriverID;
+
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ if (psDebugInfo->apsVZDriverIsolationGroupDIEntries[ui32DriverID] != NULL)
+ {
+ DIDestroyEntry(psDebugInfo->apsVZDriverIsolationGroupDIEntries[ui32DriverID]);
+ psDebugInfo->apsVZDriverIsolationGroupDIEntries[ui32DriverID] = NULL;
+ }
+
+ if (psDebugInfo->apsVZDriverPriorityDIEntries[ui32DriverID] != NULL)
+ {
+ DIDestroyEntry(psDebugInfo->apsVZDriverPriorityDIEntries[ui32DriverID]);
+ psDebugInfo->apsVZDriverPriorityDIEntries[ui32DriverID] = NULL;
+ }
+
+ if (psDebugInfo->apsVZDriverData[ui32DriverID] != NULL)
+ {
+ OSFreeMem(psDebugInfo->apsVZDriverData[ui32DriverID]);
+ psDebugInfo->apsVZDriverData[ui32DriverID] = NULL;
+ }
+
+ if (psDebugInfo->apsVZDriverGroups[ui32DriverID] != NULL)
+ {
+ DIDestroyGroup(psDebugInfo->apsVZDriverGroups[ui32DriverID]);
+ psDebugInfo->apsVZDriverGroups[ui32DriverID] = NULL;
+ }
+ }
+
+ if (psDebugInfo->psVZGroup != NULL)
+ {
+ DIDestroyGroup(psDebugInfo->psVZGroup);
+ psDebugInfo->psVZGroup = NULL;
+ }
+ }
+#endif
+
if (psDebugInfo->psFWTraceEntry != NULL)
{
DIDestroyEntry(psDebugInfo->psFWTraceEntry);
psDebugInfo->psDumpDebugEntry = NULL;
}
+#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE)
+ if (psDebugInfo->hGpuUtilUserDebugFS != NULL)
+ {
+ SORgxGpuUtilStatsUnregister(psDebugInfo->hGpuUtilUserDebugFS);
+ psDebugInfo->hGpuUtilUserDebugFS = NULL;
+ }
+#endif /* defined(SUPPORT_RGX) && !defined(NO_HARDWARE) */
+
if (psDebugInfo->psGroup != NULL)
{
DIDestroyGroup(psDebugInfo->psGroup);
psDebugInfo->psGroup = NULL;
}
}
+
+/*
+ Appends flags strings to a null-terminated string buffer
+*/
+void DebugCommonFlagStrings(IMG_CHAR *psDesc,
+ IMG_UINT32 ui32DescSize,
+ const IMG_FLAGS2DESC *psConvTable,
+ IMG_UINT32 ui32TableSize,
+ IMG_UINT32 ui32Flags)
+{
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++)
+ {
+ if (BITMASK_HAS(ui32Flags, psConvTable[ui32Idx].uiFlag))
+ {
+ OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize);
+ }
+ }
+}
static INLINE void _CheckBlueprintHeapAlignment(DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint)
{
- IMG_UINT32 ui32OSPageSize = OSGetPageShift();
+ IMG_UINT32 ui32OSPageSize = OSGetPageSize();
/* Any heap length should at least match OS page size at the minimum or
* a multiple of OS page size */
psHeapBlueprint->uiHeapLength,
psHeapBlueprint->uiHeapLength));
PVR_DPF((PVR_DBG_ERROR,
- "Heap Size should always be a non-zero value and a "
+ "Heap Size should always be at least the DevMem minimum size and a "
"multiple of OS Page Size:%u(0x%x)",
ui32OSPageSize, ui32OSPageSize));
PVR_ASSERT(psHeapBlueprint->uiHeapLength >= ui32OSPageSize);
IMG_DEVMEM_SIZE_T uiReservedRegionLength,
IMG_UINT32 ui32Log2DataPageSize,
IMG_UINT32 uiLog2ImportAlignment,
+ PFN_HEAP_INIT pfnInit,
+ PFN_HEAP_DEINIT pfnDeInit,
DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint)
{
psHeapBlueprint->pszName = pszName;
psHeapBlueprint->uiReservedRegionLength = uiReservedRegionLength;
psHeapBlueprint->uiLog2DataPageSize = ui32Log2DataPageSize;
psHeapBlueprint->uiLog2ImportAlignment = uiLog2ImportAlignment;
+ psHeapBlueprint->pfnInit = pfnInit;
+ psHeapBlueprint->pfnDeInit = pfnDeInit;
_CheckBlueprintHeapAlignment(psHeapBlueprint);
}
return PVRSRV_OK;
}
+PVRSRV_ERROR
+HeapCfgGetCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapIndex,
+ PFN_HEAP_INIT *ppfnInit,
+ PFN_HEAP_DEINIT *ppfnDeinit)
+{
+ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint;
+
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppfnInit, "ppfnInit");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppfnDeinit, "ppfnDeinit");
+
+ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+ {
+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+ }
+
+ if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps)
+ {
+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+ }
+
+ psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex];
+
+ *ppfnInit = psHeapBlueprint->pfnInit;
+ *ppfnDeinit = psHeapBlueprint->pfnDeInit;
+
+ return PVRSRV_OK;
+}
+
PVRSRV_ERROR
HeapCfgHeapDetails(CONNECTION_DATA * psConnection,
const PVRSRV_DEVICE_NODE *psDeviceNode,
#include "pvr_debug.h"
#include "devicemem_server.h"
#include "lock.h"
+#include "di_server.h"
#include "devicemem_history_server.h"
#include "pdump_km.h"
-#include "di_server.h"
-#define ALLOCATION_LIST_NUM_ENTRIES 10000
+
+#if (PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES < 5000)
+#error PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES is too low.
+#elif (PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES > 250000)
+#error PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES is too high.
+#else
+#define ALLOCATION_LIST_NUM_ENTRIES PVRSRV_APPHINT_DEVMEM_HISTORY_MAX_ENTRIES
+#endif
+
/* data type to hold an allocation index.
* we make it 16 bits wide if possible
} COMMAND_WRAPPER;
/* target size for the circular buffer of commands */
-#define CIRCULAR_BUFFER_SIZE_KB 2048
+#if (PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 < 5)
+#error PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 is too low.
+#elif (PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 > 18)
+#error PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2 is too high.
+#else
+#define CIRCULAR_BUFFER_SIZE_KB (1 << PVRSRV_APPHINT_DEVMEM_HISTORY_BUFSIZE_LOG2)
+#endif
+
+
/* turn the circular buffer target size into a number of commands */
#define CIRCULAR_BUFFER_NUM_COMMANDS ((CIRCULAR_BUFFER_SIZE_KB * 1024) / sizeof(COMMAND_WRAPPER))
/* index value denoting the end of a list */
#define END_OF_LIST 0xFFFFFFFF
-#define ALLOC_INDEX_TO_PTR(idx) (&(gsDevicememHistoryData.sRecords.pasAllocations[idx]))
+#define ALLOC_INDEX_TO_PTR(psDevHData, idx) (&((psDevHData)->sRecords.pasAllocations[idx]))
#define CHECK_ALLOC_INDEX(idx) (idx < ALLOCATION_LIST_NUM_ENTRIES)
/* wrapper structure for the allocation records and the commands circular buffer */
IMG_UINT32 ui32Head;
IMG_UINT32 ui32Tail;
COMMAND_WRAPPER *pasCircularBuffer;
+ /* Times the CB has wrapped back to start */
+ IMG_UINT64 ui64CBWrapCount;
+ /* Records of CB commands sent */
+ IMG_UINT64 ui64MapAllCount;//Incremented by InsertMapAllCommand()
+ IMG_UINT64 ui64UnMapAllCount;//Incremented by InsertUnmapAllCommand()
+ IMG_UINT64 ui64MapRangeCount;//Incremented by InsertMapRangeCommand()
+ IMG_UINT64 ui64UnMapRangeCount;//Incremented by InsertUnmapRangeCommand()
+ IMG_UINT64 ui64TimeStampCount;//Incremented by InsertTimeStampCommand()
} RECORDS;
typedef struct _DEVICEMEM_HISTORY_DATA_
{
- /* DI entry */
- DI_ENTRY *psDIEntry;
-
RECORDS sRecords;
POS_LOCK hLock;
} DEVICEMEM_HISTORY_DATA;
-static DEVICEMEM_HISTORY_DATA gsDevicememHistoryData;
+/* Maximum number of device instances supported. This should be DDK global */
+static DEVICEMEM_HISTORY_DATA *gapsDevicememHistoryData[PVRSRV_MAX_DEVICES] = { NULL };
+
+/* DevmemFindDataFromDev
+ *
+ * Return the address of the associated DEVICEMEM_HISTORY_DATA for the given
+ * device. If psDevNode associated unit is out of range we return NULL.
+ */
+static DEVICEMEM_HISTORY_DATA *DevmemFindDataFromDev(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ DEVICEMEM_HISTORY_DATA *psDevmemData = NULL;
+
+ IMG_UINT32 uiUnit = psDevNode->sDevId.ui32InternalID;
+
+ PVR_ASSERT(uiUnit < PVRSRV_MAX_DEVICES);
+ if ((uiUnit < PVRSRV_MAX_DEVICES) && (gapsDevicememHistoryData[uiUnit] != NULL))
+ {
+ psDevmemData = gapsDevicememHistoryData[uiUnit];
+ }
+
+ return psDevmemData;
+}
/* gsDevicememHistoryData is static, hLock is NULL unless
* EnablePageFaultDebug is set and DevicememHistoryInitKM()
* was called.
*/
-static void DevicememHistoryLock(void)
+static void DevicememHistoryLock(DEVICEMEM_HISTORY_DATA *psDevHData)
{
- if (gsDevicememHistoryData.hLock)
+ if (psDevHData->hLock)
{
- OSLockAcquire(gsDevicememHistoryData.hLock);
+ OSLockAcquire(psDevHData->hLock);
}
}
-static void DevicememHistoryUnlock(void)
+static void DevicememHistoryUnlock(DEVICEMEM_HISTORY_DATA *psDevHData)
{
- if (gsDevicememHistoryData.hLock)
+ if (psDevHData->hLock)
{
- OSLockRelease(gsDevicememHistoryData.hLock);
+ OSLockRelease(psDevHData->hLock);
}
}
* move the circular buffer head along by one
* Returns a pointer to the acquired slot.
*/
-static COMMAND_WRAPPER *AcquireCBSlot(void)
+static COMMAND_WRAPPER *AcquireCBSlot(DEVICEMEM_HISTORY_DATA *psDevHData)
{
COMMAND_WRAPPER *psSlot;
- psSlot = &gsDevicememHistoryData.sRecords.pasCircularBuffer[gsDevicememHistoryData.sRecords.ui32Head];
+ psSlot = &psDevHData->sRecords.pasCircularBuffer[psDevHData->sRecords.ui32Head];
- gsDevicememHistoryData.sRecords.ui32Head =
- (gsDevicememHistoryData.sRecords.ui32Head + 1)
+ psDevHData->sRecords.ui32Head =
+ (psDevHData->sRecords.ui32Head + 1)
% CIRCULAR_BUFFER_NUM_COMMANDS;
+ if (psDevHData->sRecords.ui32Head == 0)
+ {
+ psDevHData->sRecords.ui64CBWrapCount++;
+ }
+
return psSlot;
}
/* InsertTimeStampCommand:
* Insert a timestamp command into the circular buffer.
*/
-static void InsertTimeStampCommand(IMG_UINT64 ui64Now)
+static void InsertTimeStampCommand(IMG_UINT64 ui64Now, PVRSRV_DEVICE_NODE *psDevNode)
{
COMMAND_WRAPPER *psCommand;
+ DEVICEMEM_HISTORY_DATA *psDevHData = DevmemFindDataFromDev(psDevNode);
- psCommand = AcquireCBSlot();
+ if (psDevHData == NULL)
+ {
+ return;
+ }
- psCommand->ui8Type = COMMAND_TYPE_TIMESTAMP;
+ psCommand = AcquireCBSlot(psDevHData);
+ psCommand->ui8Type = COMMAND_TYPE_TIMESTAMP;
+ psDevHData->sRecords.ui64TimeStampCount++;
TimeStampPack(&psCommand->u.sTimeStamp, ui64Now);
}
IMG_UINT32 ui32AllocIndex)
{
COMMAND_WRAPPER *psCommand;
+ DEVICEMEM_HISTORY_DATA *psDevHData = DevmemFindDataFromDev(psDeviceNode);
+
+ if (psDevHData == NULL)
+ {
+ return;
+ }
- psCommand = AcquireCBSlot();
+ psCommand = AcquireCBSlot(psDevHData);
psCommand->ui8Type = COMMAND_TYPE_MAP_ALL;
psCommand->u.sMapAll.uiAllocIndex = ui32AllocIndex;
+ psDevHData->sRecords.ui64MapAllCount++;
#if defined(PDUMP)
EmitPDumpMapUnmapAll(psDeviceNode, COMMAND_TYPE_MAP_ALL, ui32AllocIndex);
-#else
- PVR_UNREFERENCED_PARAMETER(psDeviceNode);
#endif
}
IMG_UINT32 ui32AllocIndex)
{
COMMAND_WRAPPER *psCommand;
+ DEVICEMEM_HISTORY_DATA *psDevHData = DevmemFindDataFromDev(psDeviceNode);
- psCommand = AcquireCBSlot();
+ if (psDevHData == NULL)
+ {
+ return;
+ }
+
+ psCommand = AcquireCBSlot(psDevHData);
psCommand->ui8Type = COMMAND_TYPE_UNMAP_ALL;
psCommand->u.sUnmapAll.uiAllocIndex = ui32AllocIndex;
+ psDevHData->sRecords.ui64UnMapAllCount++;
#if defined(PDUMP)
EmitPDumpMapUnmapAll(psDeviceNode, COMMAND_TYPE_UNMAP_ALL, ui32AllocIndex);
-#else
- PVR_UNREFERENCED_PARAMETER(psDeviceNode);
#endif
}
IMG_UINT32 ui32Count)
{
COMMAND_WRAPPER *psCommand;
+ DEVICEMEM_HISTORY_DATA *psDevHData = DevmemFindDataFromDev(psDeviceNode);
- psCommand = AcquireCBSlot();
+ if (psDevHData == NULL)
+ {
+ return;
+ }
+
+ psCommand = AcquireCBSlot(psDevHData);
psCommand->ui8Type = COMMAND_TYPE_MAP_RANGE;
psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex;
+ psDevHData->sRecords.ui64MapRangeCount++;
MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count);
ui32AllocIndex,
ui32StartPage,
ui32Count);
-#else
- PVR_UNREFERENCED_PARAMETER(psDeviceNode);
#endif
}
IMG_UINT32 ui32Count)
{
COMMAND_WRAPPER *psCommand;
+ DEVICEMEM_HISTORY_DATA *psDevHData = DevmemFindDataFromDev(psDeviceNode);
- psCommand = AcquireCBSlot();
+ if (psDevHData == NULL)
+ {
+ return;
+ }
+
+ psCommand = AcquireCBSlot(psDevHData);
psCommand->ui8Type = COMMAND_TYPE_UNMAP_RANGE;
psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex;
+ psDevHData->sRecords.ui64UnMapRangeCount++;
MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count);
ui32AllocIndex,
ui32StartPage,
ui32Count);
-#else
- PVR_UNREFERENCED_PARAMETER(psDeviceNode);
#endif
}
static void InsertAllocationToList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc)
{
RECORD_ALLOCATION *psAlloc;
+ DEVICEMEM_HISTORY_DATA *psDevHData;
+
+ psDevHData = IMG_CONTAINER_OF(pui32ListHead, DEVICEMEM_HISTORY_DATA,
+ sRecords.ui32AllocationsListHead);
- psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32Alloc);
if (*pui32ListHead == END_OF_LIST)
{
RECORD_ALLOCATION *psHeadAlloc;
RECORD_ALLOCATION *psTailAlloc;
- psHeadAlloc = ALLOC_INDEX_TO_PTR(*pui32ListHead);
- psTailAlloc = ALLOC_INDEX_TO_PTR(psHeadAlloc->ui32Prev);
+ psHeadAlloc = ALLOC_INDEX_TO_PTR(psDevHData, *pui32ListHead);
+ psTailAlloc = ALLOC_INDEX_TO_PTR(psDevHData, psHeadAlloc->ui32Prev);
/* make the new alloc point forwards to the previous head */
psAlloc->ui32Next = *pui32ListHead;
}
}
-static void InsertAllocationToBusyList(IMG_UINT32 ui32Alloc)
+static void InsertAllocationToBusyList(DEVICEMEM_HISTORY_DATA *psDevHData,
+ IMG_UINT32 ui32Alloc)
{
- InsertAllocationToList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc);
+ InsertAllocationToList(&psDevHData->sRecords.ui32AllocationsListHead, ui32Alloc);
}
/* RemoveAllocationFromList:
{
RECORD_ALLOCATION *psAlloc;
- psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+ DEVICEMEM_HISTORY_DATA *psDevHData;
+
+ psDevHData = IMG_CONTAINER_OF(pui32ListHead, DEVICEMEM_HISTORY_DATA,
+ sRecords.ui32AllocationsListHead);
+
+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32Alloc);
/* if this is the only element in the list then just make the list empty */
if ((*pui32ListHead == ui32Alloc) && (psAlloc->ui32Next == ui32Alloc))
{
RECORD_ALLOCATION *psPrev, *psNext;
- psPrev = ALLOC_INDEX_TO_PTR(psAlloc->ui32Prev);
- psNext = ALLOC_INDEX_TO_PTR(psAlloc->ui32Next);
+ psPrev = ALLOC_INDEX_TO_PTR(psDevHData, psAlloc->ui32Prev);
+ psNext = ALLOC_INDEX_TO_PTR(psDevHData, psAlloc->ui32Next);
/* remove the allocation from the list */
psPrev->ui32Next = psAlloc->ui32Next;
}
}
-static void RemoveAllocationFromBusyList(IMG_UINT32 ui32Alloc)
+static void RemoveAllocationFromBusyList(DEVICEMEM_HISTORY_DATA *psDevHData, IMG_UINT32 ui32Alloc)
{
- RemoveAllocationFromList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc);
+ RemoveAllocationFromList(&psDevHData->sRecords.ui32AllocationsListHead, ui32Alloc);
}
/* TouchBusyAllocation:
* Move the given allocation to the head of the list
*/
-static void TouchBusyAllocation(IMG_UINT32 ui32Alloc)
+static void TouchBusyAllocation(DEVICEMEM_HISTORY_DATA *psDevHData, IMG_UINT32 ui32Alloc)
{
- RemoveAllocationFromBusyList(ui32Alloc);
- InsertAllocationToBusyList(ui32Alloc);
+ RemoveAllocationFromBusyList(psDevHData, ui32Alloc);
+ InsertAllocationToBusyList(psDevHData, ui32Alloc);
}
/* GetOldestBusyAllocation:
* Returns the index of the oldest allocation in the MRU list
*/
-static IMG_UINT32 GetOldestBusyAllocation(void)
+static IMG_UINT32 GetOldestBusyAllocation(DEVICEMEM_HISTORY_DATA *psDevHData)
{
IMG_UINT32 ui32Alloc;
RECORD_ALLOCATION *psAlloc;
- ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+ if (psDevHData != NULL)
+ {
+ ui32Alloc = psDevHData->sRecords.ui32AllocationsListHead;
+ }
+ else
+ {
+ ui32Alloc = END_OF_LIST; /* Default if no psDevHData */
+ }
if (ui32Alloc == END_OF_LIST)
{
return END_OF_LIST;
}
- psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32Alloc);
return psAlloc->ui32Prev;
}
-static IMG_UINT32 GetFreeAllocation(void)
+static IMG_UINT32 GetFreeAllocation(DEVICEMEM_HISTORY_DATA *psDevHData)
{
IMG_UINT32 ui32Alloc;
- ui32Alloc = GetOldestBusyAllocation();
+ ui32Alloc = GetOldestBusyAllocation(psDevHData);
return ui32Alloc;
}
IMG_UINT32 ui32Alloc;
RECORD_ALLOCATION *psAlloc;
- ui32Alloc = GetFreeAllocation();
+ DEVICEMEM_HISTORY_DATA *psDevHData;
- psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+ psDevHData = DevmemFindDataFromDev(psDeviceNode);
- InitialiseAllocation(ALLOC_INDEX_TO_PTR(ui32Alloc),
+ if (psDevHData == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ ui32Alloc = GetFreeAllocation(psDevHData);
+
+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32Alloc);
+
+ InitialiseAllocation(ALLOC_INDEX_TO_PTR(psDevHData, ui32Alloc),
pszName,
ui64Serial,
uiPID,
ui32Log2PageSize);
/* put the newly initialised allocation at the front of the MRU list */
- TouchBusyAllocation(ui32Alloc);
+ TouchBusyAllocation(psDevHData, ui32Alloc);
*puiAllocationIndex = ui32Alloc;
#if defined(PDUMP)
EmitPDumpAllocation(psDeviceNode, ui32Alloc, psAlloc);
-#else
- PVR_UNREFERENCED_PARAMETER(psDeviceNode);
#endif
return PVRSRV_OK;
* Tests if the allocation at the given index matches the supplied properties.
* Returns IMG_TRUE if it is a match, otherwise IMG_FALSE.
*/
-static IMG_BOOL MatchAllocation(IMG_UINT32 ui32AllocationIndex,
+static IMG_BOOL MatchAllocation(DEVICEMEM_HISTORY_DATA *psDevHData,
+ IMG_UINT32 ui32AllocationIndex,
IMG_UINT64 ui64Serial,
IMG_DEV_VIRTADDR sDevVAddr,
IMG_DEVMEM_SIZE_T uiSize,
{
RECORD_ALLOCATION *psAlloc;
- psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocationIndex);
+ if (psDevHData == NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32AllocationIndex);
return (psAlloc->ui64Serial == ui64Serial) &&
(psAlloc->sDevVAddr.uiAddr == sDevVAddr.uiAddr) &&
* if the caller provided a hint but the allocation record is no longer
* there, it must have been purged, so go ahead and create a new allocation
*/
- bHaveAllocation = MatchAllocation(ui32AllocationIndexHint,
+ bHaveAllocation = MatchAllocation(DevmemFindDataFromDev(psDeviceNode),
+ ui32AllocationIndexHint,
ui64Serial,
sDevVAddr,
uiSize,
if (ui32DonePages == ui32NumPages)
{
- break;
+ break;
}
bInARun = IMG_FALSE;
IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
PVRSRV_ERROR eError;
IMG_BOOL bCreated;
+ DEVICEMEM_HISTORY_DATA *psDevHData;
if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
!CHECK_ALLOC_INDEX(ui32AllocationIndex))
PMRGetUID(psPMR, &ui64Serial);
- DevicememHistoryLock();
+ psDevHData = DevmemFindDataFromDev(PMR_DeviceNode(psPMR));
+
+ if (psDevHData == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ DevicememHistoryLock(psDevHData);
eError = FindOrCreateAllocation(PMR_DeviceNode(psPMR),
ui32AllocationIndex,
if ((eError == PVRSRV_OK) && !bCreated)
{
/* touch the allocation so it goes to the head of our MRU list */
- TouchBusyAllocation(ui32AllocationIndex);
+ TouchBusyAllocation(psDevHData, ui32AllocationIndex);
}
else if (eError != PVRSRV_OK)
{
IMG_TRUE);
}
- InsertTimeStampCommand(OSClockns64());
+ InsertTimeStampCommand(OSClockns64(), PMR_DeviceNode(psPMR));
*pui32AllocationIndexOut = ui32AllocationIndex;
out_unlock:
- DevicememHistoryUnlock();
+ DevicememHistoryUnlock(psDevHData);
return eError;
}
IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
PVRSRV_ERROR eError;
IMG_BOOL bCreated;
+ DEVICEMEM_HISTORY_DATA *psDevHData;
PVR_UNREFERENCED_PARAMETER(psConnection);
return PVRSRV_ERROR_INVALID_PARAMS;
}
- DevicememHistoryLock();
+ psDevHData = DevmemFindDataFromDev(psDeviceNode);
+
+ if (psDevHData == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ DevicememHistoryLock(psDevHData);
eError = FindOrCreateAllocation(psDeviceNode,
ui32AllocationIndex,
if ((eError == PVRSRV_OK) && !bCreated)
{
/* touch the allocation so it goes to the head of our MRU list */
- TouchBusyAllocation(ui32AllocationIndex);
+ TouchBusyAllocation(psDevHData, ui32AllocationIndex);
}
else if (eError != PVRSRV_OK)
{
*pui32AllocationIndexOut = ui32AllocationIndex;
out_unlock:
- DevicememHistoryUnlock();
+ DevicememHistoryUnlock(psDevHData);
return eError;
IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
PVRSRV_ERROR eError;
IMG_BOOL bCreated;
+ DEVICEMEM_HISTORY_DATA *psDevHData;
PVR_UNREFERENCED_PARAMETER(psConnection);
return PVRSRV_ERROR_INVALID_PARAMS;
}
- DevicememHistoryLock();
+ psDevHData = DevmemFindDataFromDev(psDeviceNode);
+
+ if (psDevHData == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ DevicememHistoryLock(psDevHData);
eError = FindOrCreateAllocation(psDeviceNode,
ui32AllocationIndex,
if ((eError == PVRSRV_OK) && !bCreated)
{
/* touch the allocation so it goes to the head of our MRU list */
- TouchBusyAllocation(ui32AllocationIndex);
+ TouchBusyAllocation(psDevHData, ui32AllocationIndex);
}
else if (eError != PVRSRV_OK)
{
*pui32AllocationIndexOut = ui32AllocationIndex;
out_unlock:
- DevicememHistoryUnlock();
+ DevicememHistoryUnlock(psDevHData);
return eError;
}
IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
PVRSRV_ERROR eError;
IMG_BOOL bCreated;
+ DEVICEMEM_HISTORY_DATA *psDevHData;
if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
!CHECK_ALLOC_INDEX(ui32AllocationIndex))
PMRGetUID(psPMR, &ui64Serial);
- DevicememHistoryLock();
+ psDevHData = DevmemFindDataFromDev(PMR_DeviceNode(psPMR));
+
+ if (psDevHData == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ DevicememHistoryLock(psDevHData);
eError = FindOrCreateAllocation(PMR_DeviceNode(psPMR),
ui32AllocationIndex,
if ((eError == PVRSRV_OK) && !bCreated)
{
/* touch the allocation so it goes to the head of our MRU list */
- TouchBusyAllocation(ui32AllocationIndex);
+ TouchBusyAllocation(psDevHData, ui32AllocationIndex);
}
else if (eError != PVRSRV_OK)
{
IMG_FALSE);
}
- InsertTimeStampCommand(OSClockns64());
+ InsertTimeStampCommand(OSClockns64(), PMR_DeviceNode(psPMR));
*pui32AllocationIndexOut = ui32AllocationIndex;
out_unlock:
- DevicememHistoryUnlock();
+ DevicememHistoryUnlock(psDevHData);
return eError;
}
IMG_PID uiPID = OSGetCurrentClientProcessIDKM();
PVRSRV_ERROR eError;
IMG_BOOL bCreated;
+ DEVICEMEM_HISTORY_DATA *psDevHData;
+
+ if (!PMRValidateSize((IMG_UINT64) ui32AllocPageCount << ui32Log2PageSize))
+ {
+ PVR_LOG_VA(PVR_DBG_ERROR,
+ "PMR size exceeds limit #Chunks: %u ChunkSz %"IMG_UINT64_FMTSPECX"",
+ ui32AllocPageCount,
+ (IMG_UINT64) 1ULL << ui32Log2PageSize);
+ return PVRSRV_ERROR_PMR_TOO_LARGE;
+ }
if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
!CHECK_ALLOC_INDEX(ui32AllocationIndex))
PMRGetUID(psPMR, &ui64Serial);
- DevicememHistoryLock();
+ psDevHData = DevmemFindDataFromDev(PMR_DeviceNode(psPMR));
+
+ if (psDevHData == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ DevicememHistoryLock(psDevHData);
eError = FindOrCreateAllocation(PMR_DeviceNode(psPMR),
ui32AllocationIndex,
if ((eError == PVRSRV_OK) && !bCreated)
{
/* touch the allocation so it goes to the head of our MRU list */
- TouchBusyAllocation(ui32AllocationIndex);
+ TouchBusyAllocation(psDevHData, ui32AllocationIndex);
}
else if (eError != PVRSRV_OK)
{
ui32AllocationIndex,
IMG_FALSE);
- InsertTimeStampCommand(OSClockns64());
+ InsertTimeStampCommand(OSClockns64(), PMR_DeviceNode(psPMR));
*pui32AllocationIndexOut = ui32AllocationIndex;
out_unlock:
- DevicememHistoryUnlock();
+ DevicememHistoryUnlock(psDevHData);
return eError;
/* CircularBufferIterateStart:
* Initialise local state for iterating over the circular buffer
*/
-static void CircularBufferIterateStart(IMG_UINT32 *pui32Head, IMG_UINT32 *pui32Iter)
+static void CircularBufferIterateStart(DEVICEMEM_HISTORY_DATA *psHData, IMG_UINT32 *pui32Head, IMG_UINT32 *pui32Iter)
{
- *pui32Head = gsDevicememHistoryData.sRecords.ui32Head;
+ *pui32Head = psHData->sRecords.ui32Head;
if (*pui32Head != 0)
{
* Iterate to the previous item in the circular buffer.
* This is called repeatedly to iterate over the whole circular buffer.
*/
-static COMMAND_WRAPPER *CircularBufferIteratePrevious(IMG_UINT32 ui32Head,
+static COMMAND_WRAPPER *CircularBufferIteratePrevious(DEVICEMEM_HISTORY_DATA *psHData,
+ IMG_UINT32 ui32Head,
IMG_UINT32 *pui32Iter,
COMMAND_TYPE *peType,
IMG_BOOL *pbLast)
IMG_UINT8 *pui8Header;
COMMAND_WRAPPER *psOut = NULL;
- psOut = gsDevicememHistoryData.sRecords.pasCircularBuffer + *pui32Iter;
+ psOut = psHData->sRecords.pasCircularBuffer + *pui32Iter;
pui8Header = (void *) psOut;
* Helper function to get the address and mapping information from a MAP_ALL, UNMAP_ALL,
* MAP_RANGE or UNMAP_RANGE command
*/
-static void MapUnmapCommandGetInfo(COMMAND_WRAPPER *psCommand,
+static void MapUnmapCommandGetInfo(DEVICEMEM_HISTORY_DATA *psHData,
+ COMMAND_WRAPPER *psCommand,
COMMAND_TYPE eType,
IMG_DEV_VIRTADDR *psDevVAddrStart,
IMG_DEV_VIRTADDR *psDevVAddrEnd,
*pbMap = (eType == COMMAND_TYPE_MAP_ALL);
*pui32AllocIndex = psMapAll->uiAllocIndex;
- psAlloc = ALLOC_INDEX_TO_PTR(psMapAll->uiAllocIndex);
+ psAlloc = ALLOC_INDEX_TO_PTR(psHData, psMapAll->uiAllocIndex);
*psDevVAddrStart = psAlloc->sDevVAddr;
psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + psAlloc->uiSize - 1;
*pbMap = (eType == COMMAND_TYPE_MAP_RANGE);
*pui32AllocIndex = psMapRange->uiAllocIndex;
- psAlloc = ALLOC_INDEX_TO_PTR(psMapRange->uiAllocIndex);
+ psAlloc = ALLOC_INDEX_TO_PTR(psHData, psMapRange->uiAllocIndex);
MapRangeUnpack(psMapRange, &ui32StartPage, &ui32Count);
}
}
+void DevicememHistoryDumpRecordStats(PVRSRV_DEVICE_NODE *psDevNode,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ DEVICEMEM_HISTORY_DATA *psDevHData;
+ psDevHData = DevmemFindDataFromDev(psDevNode);
+
+ if (psDevHData)
+ {
+ PVR_DUMPDEBUG_LOG(" DevmemHistoryRecordStats -"
+ " CBWC:%"IMG_UINT64_FMTSPEC
+ " MAC:%"IMG_UINT64_FMTSPEC
+ " UMAC:%"IMG_UINT64_FMTSPEC
+ " MRC:%"IMG_UINT64_FMTSPEC
+ " UMRC:%"IMG_UINT64_FMTSPEC
+ " TSC:%"IMG_UINT64_FMTSPEC
+ " MAX:%"IMG_UINT64_FMTSPEC
+ " CHD:%u",
+ psDevHData->sRecords.ui64CBWrapCount,
+ psDevHData->sRecords.ui64MapAllCount,
+ psDevHData->sRecords.ui64UnMapAllCount,
+ psDevHData->sRecords.ui64MapRangeCount,
+ psDevHData->sRecords.ui64UnMapRangeCount,
+ psDevHData->sRecords.ui64TimeStampCount,
+ (IMG_UINT64)CIRCULAR_BUFFER_NUM_COMMANDS,
+ psDevHData->sRecords.ui32Head);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" DevmemHistoryRecordStats - None");
+ }
+}
+
/* DevicememHistoryQuery:
* Entry point for rgxdebug to look up addresses relating to a page fault
*/
IMG_BOOL bLast = IMG_FALSE;
IMG_UINT64 ui64StartTime = OSClockns64();
IMG_UINT64 ui64TimeNs = 0;
+ DEVICEMEM_HISTORY_DATA *psDevHData;
/* initialise the results count for the caller */
psQueryOut->ui32NumResults = 0;
+ psQueryOut->ui64SearchCount = 0;
+
+ psDevHData = DevmemFindDataFromDev(psQueryIn->psDevNode);
+
+ if (psDevHData == NULL)
+ {
+ return IMG_FALSE;
+ }
- DevicememHistoryLock();
+ DevicememHistoryLock(psDevHData);
/* if the search is constrained to a particular PID then we
* first search the list of allocations to see if this
if (psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY)
{
IMG_UINT32 ui32Alloc;
- ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+ ui32Alloc = psDevHData->sRecords.ui32AllocationsListHead;
while (ui32Alloc != END_OF_LIST)
{
RECORD_ALLOCATION *psAlloc;
- psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32Alloc);
if (psAlloc->uiPID == psQueryIn->uiPID)
{
goto found_pid;
}
- if (ui32Alloc == gsDevicememHistoryData.sRecords.ui32AllocationsListHead)
+ if (ui32Alloc == psDevHData->sRecords.ui32AllocationsListHead)
{
/* gone through whole list */
break;
found_pid:
- CircularBufferIterateStart(&ui32Head, &ui32Iter);
+ CircularBufferIterateStart(psDevHData, &ui32Head, &ui32Iter);
while (!bLast)
{
- psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast);
+ psCommand = CircularBufferIteratePrevious(psDevHData, ui32Head, &ui32Iter, &eType, &bLast);
if (eType == COMMAND_TYPE_TIMESTAMP)
{
IMG_BOOL bMap;
IMG_UINT32 ui32AllocIndex;
- MapUnmapCommandGetInfo(psCommand,
+ MapUnmapCommandGetInfo(psDevHData,
+ psCommand,
eType,
&sAllocStartAddrOrig,
&sAllocEndAddrOrig,
sAllocStartAddr = sAllocStartAddrOrig;
sAllocEndAddr = sAllocEndAddrOrig;
- psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex);
+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32AllocIndex);
/* skip this command if we need to search within
* a particular PID, and this allocation is not from
continue;
}
+ psQueryOut->ui64SearchCount++;
+
/* if the caller wants us to match any allocation in the
* same page as the allocation then tweak the real start/end
* addresses of the allocation here
if (bMatchAnyAllocInPage)
{
sAllocStartAddr.uiAddr = sAllocStartAddr.uiAddr & ~(IMG_UINT64) (ui32PageSizeBytes - 1);
- sAllocEndAddr.uiAddr = (sAllocEndAddr.uiAddr + ui32PageSizeBytes - 1) & ~(IMG_UINT64) (ui32PageSizeBytes - 1);
+ sAllocEndAddr.uiAddr = PVR_ALIGN(sAllocEndAddr.uiAddr, ui32PageSizeBytes);
}
if ((psQueryIn->sDevVAddr.uiAddr >= sAllocStartAddr.uiAddr) &&
}
out_unlock:
- DevicememHistoryUnlock();
+ DevicememHistoryUnlock(psDevHData);
return psQueryOut->ui32NumResults > 0;
}
+#if defined(SUPPORT_RGX)
static void DeviceMemHistoryFmt(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN],
IMG_PID uiPID,
const IMG_CHAR *pszName,
IMG_BOOL bLast = IMG_FALSE;
IMG_UINT64 ui64TimeNs = 0;
IMG_UINT64 ui64StartTime = OSClockns64();
+ DEVICEMEM_HISTORY_DATA *psDevHData;
+ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry);
DeviceMemHistoryFmtHeader(szBuffer);
DIPrintf(psEntry, "%s\n", szBuffer);
- CircularBufferIterateStart(&ui32Head, &ui32Iter);
+ psDevHData = DevmemFindDataFromDev(psDeviceNode);
+
+ if (psDevHData == NULL)
+ {
+ return;
+ }
+
+ CircularBufferIterateStart(psDevHData, &ui32Head, &ui32Iter);
while (!bLast)
{
COMMAND_WRAPPER *psCommand;
COMMAND_TYPE eType = COMMAND_TYPE_NONE;
- psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType,
- &bLast);
+ psCommand = CircularBufferIteratePrevious(psDevHData, ui32Head, &ui32Iter,
+ &eType, &bLast);
if (eType == COMMAND_TYPE_TIMESTAMP)
{
IMG_BOOL bMap;
IMG_UINT32 ui32AllocIndex;
- MapUnmapCommandGetInfo(psCommand,
+ MapUnmapCommandGetInfo(psDevHData,
+ psCommand,
eType,
&sDevVAddrStart,
&sDevVAddrEnd,
&bMap,
&ui32AllocIndex);
- psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex);
+ psAlloc = ALLOC_INDEX_TO_PTR(psDevHData, ui32AllocIndex);
if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs)
{
static int DevicememHistoryPrintAllWrapper(OSDI_IMPL_ENTRY *psEntry,
void *pvData)
{
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)DIGetPrivData(psEntry);
+ DEVICEMEM_HISTORY_DATA *psDevHData;
+
+ /* Get the backing store associated with the device. If we are
+ * called before the device has been started (i.e. FW loaded)
+ * then we haven't yet had this data allocated.
+ * Return to provide a NULL data stream to the consumer.
+ */
+ psDevHData = DevmemFindDataFromDev(psDeviceNode);
+ if (psDevHData == NULL)
+ {
+ return 0;
+ }
PVR_UNREFERENCED_PARAMETER(pvData);
- DevicememHistoryLock();
+ DevicememHistoryLock(psDevHData);
DevicememHistoryPrintAll(psEntry);
- DevicememHistoryUnlock();
+ DevicememHistoryUnlock(psDevHData);
return 0;
}
+#endif /* defined(SUPPORT_RGX) */
-static PVRSRV_ERROR CreateRecords(void)
+static PVRSRV_ERROR CreateRecords(DEVICEMEM_HISTORY_DATA *psDevHData)
{
- gsDevicememHistoryData.sRecords.pasAllocations =
- OSAllocMem(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES);
+ psDevHData->sRecords.pasAllocations =
+ OSAllocMemNoStats(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES);
- PVR_RETURN_IF_NOMEM(gsDevicememHistoryData.sRecords.pasAllocations);
+ PVR_RETURN_IF_NOMEM(psDevHData->sRecords.pasAllocations);
/* Allocated and initialise the circular buffer with zeros so every
* command is initialised as a command of type COMMAND_TYPE_NONE. */
- gsDevicememHistoryData.sRecords.pasCircularBuffer =
- OSAllocZMem(sizeof(COMMAND_WRAPPER) * CIRCULAR_BUFFER_NUM_COMMANDS);
+ psDevHData->sRecords.pasCircularBuffer =
+ OSAllocZMemNoStats(sizeof(COMMAND_WRAPPER) * CIRCULAR_BUFFER_NUM_COMMANDS);
- if (gsDevicememHistoryData.sRecords.pasCircularBuffer == NULL)
+ if (psDevHData->sRecords.pasCircularBuffer == NULL)
{
- OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations);
+ OSFreeMemNoStats(psDevHData->sRecords.pasAllocations);
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
return PVRSRV_OK;
}
-static void DestroyRecords(void)
+static void DestroyRecords(DEVICEMEM_HISTORY_DATA *psDevHData)
{
- OSFreeMem(gsDevicememHistoryData.sRecords.pasCircularBuffer);
- OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations);
+ OSFreeMemNoStats(psDevHData->sRecords.pasCircularBuffer);
+ OSFreeMemNoStats(psDevHData->sRecords.pasAllocations);
}
-static void InitialiseRecords(void)
+static void InitialiseRecords(DEVICEMEM_HISTORY_DATA *psDevHData)
{
IMG_UINT32 i;
/* initialise the allocations list */
- gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Prev = ALLOCATION_LIST_NUM_ENTRIES - 1;
- gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Next = 1;
+ psDevHData->sRecords.pasAllocations[0].ui32Prev = ALLOCATION_LIST_NUM_ENTRIES - 1;
+ psDevHData->sRecords.pasAllocations[0].ui32Next = 1;
for (i = 1; i < ALLOCATION_LIST_NUM_ENTRIES; i++)
{
- gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Prev = i - 1;
- gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Next = i + 1;
+ psDevHData->sRecords.pasAllocations[i].ui32Prev = i - 1;
+ psDevHData->sRecords.pasAllocations[i].ui32Next = i + 1;
}
- gsDevicememHistoryData.sRecords.pasAllocations[ALLOCATION_LIST_NUM_ENTRIES - 1].ui32Next = 0;
+ psDevHData->sRecords.pasAllocations[ALLOCATION_LIST_NUM_ENTRIES - 1].ui32Next = 0;
- gsDevicememHistoryData.sRecords.ui32AllocationsListHead = 0;
+ psDevHData->sRecords.ui32AllocationsListHead = 0;
}
+static void DevicememHistoryDevDeInitUnit(IMG_UINT32 uiUnit);
+static PVRSRV_ERROR DevicememHistoryDevInitUnit(IMG_UINT32 uiUnit);
+
PVRSRV_ERROR DevicememHistoryInitKM(void)
+{
+ IMG_UINT32 ui;
+
+ /* Zero-fill the gapsDevicememHistoryData array entries */
+ for (ui = 0; ui < PVRSRV_MAX_DEVICES; ui++)
+ {
+ gapsDevicememHistoryData[ui] = NULL;
+ }
+
+ return PVRSRV_OK;
+}
+
+void DevicememHistoryDeInitKM(void)
+{
+ IMG_UINT32 uiUnit;
+
+ /* Iterate over all potential units and remove their data.
+ * DI entry is removed by DevicememHistoryDeviceDestroy()
+ */
+ for (uiUnit = 0; uiUnit < PVRSRV_MAX_DEVICES; uiUnit++)
+ {
+ DevicememHistoryDevDeInitUnit(uiUnit);
+ }
+}
+
+/* Allocate DEVICEMEM_HISTORY_DATA entry for the specified unit */
+static PVRSRV_ERROR DevicememHistoryDevInitUnit(IMG_UINT32 uiUnit)
{
PVRSRV_ERROR eError;
- DI_ITERATOR_CB sIterator = {.pfnShow = DevicememHistoryPrintAllWrapper};
+ DEVICEMEM_HISTORY_DATA *psDevicememHistoryData;
+
+ if (uiUnit >= PVRSRV_MAX_DEVICES)
+ {
+ PVR_LOG_RETURN_IF_FALSE(uiUnit < PVRSRV_MAX_DEVICES, "Invalid Unit",
+ PVRSRV_ERROR_INVALID_PARAMS);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
- eError = OSLockCreate(&gsDevicememHistoryData.hLock);
+ /* Valid unit, try and allocate and fill all structure members */
+
+ psDevicememHistoryData = OSAllocZMemNoStats(sizeof(DEVICEMEM_HISTORY_DATA));
+ PVR_RETURN_IF_NOMEM(psDevicememHistoryData);
+
+ eError = OSLockCreate(&psDevicememHistoryData->hLock);
PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", err_lock);
- eError = CreateRecords();
+ eError = CreateRecords(psDevicememHistoryData);
PVR_LOG_GOTO_IF_ERROR(eError, "CreateRecords", err_allocations);
- InitialiseRecords();
+ InitialiseRecords(psDevicememHistoryData);
- eError = DICreateEntry("devicemem_history", NULL, &sIterator, NULL,
- DI_ENTRY_TYPE_GENERIC,
- &gsDevicememHistoryData.psDIEntry);
- PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", err_di_creation);
+ gapsDevicememHistoryData[uiUnit] = psDevicememHistoryData;
return PVRSRV_OK;
-err_di_creation:
- DestroyRecords();
err_allocations:
- OSLockDestroy(gsDevicememHistoryData.hLock);
- gsDevicememHistoryData.hLock = NULL;
+ OSLockDestroy(psDevicememHistoryData->hLock);
+ psDevicememHistoryData->hLock = NULL;
err_lock:
+ OSFreeMemNoStats(psDevicememHistoryData);
return eError;
}
-void DevicememHistoryDeInitKM(void)
+/* Allocate DI entry for specified psDeviceNode */
+PVRSRV_ERROR DevicememHistoryDeviceCreate(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 uiUnit = psDeviceNode->sDevId.ui32InternalID;
+#if defined(SUPPORT_RGX)
+ PVRSRV_DEVICE_DEBUG_INFO *psDevDebugInfo = &psDeviceNode->sDebugInfo;
+ DI_ITERATOR_CB sIterator = {.pfnShow = DevicememHistoryPrintAllWrapper};
+#endif
+
+ if (uiUnit >= PVRSRV_MAX_DEVICES)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Create the DI entry for the device's devicemem_history handle */
+
+
+#if defined(SUPPORT_RGX)
+ eError = DICreateEntry("devicemem_history", psDevDebugInfo->psGroup,
+ &sIterator, psDeviceNode,
+ DI_ENTRY_TYPE_GENERIC,
+ &psDevDebugInfo->psDevMemEntry);
+#endif /* defined(SUPPORT_RGX) */
+
+ return eError;
+}
+
+/* Allocate the DEVICEMEM_HISTORY_DATA for specified psDeviceNode */
+PVRSRV_ERROR DevicememHistoryDeviceInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiUnit = psDeviceNode->sDevId.ui32InternalID;
+
+ if (uiUnit >= PVRSRV_MAX_DEVICES)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = DevicememHistoryDevInitUnit(uiUnit);
+
+ return eError;
+}
+
+static void DevicememHistoryDevDeInitUnit(IMG_UINT32 uiUnit)
{
- if (gsDevicememHistoryData.psDIEntry != NULL)
+ DEVICEMEM_HISTORY_DATA *psDevicememHistoryData;
+
+ if (uiUnit >= PVRSRV_MAX_DEVICES)
{
- DIDestroyEntry(gsDevicememHistoryData.psDIEntry);
+ return;
}
- DestroyRecords();
+ psDevicememHistoryData = gapsDevicememHistoryData[uiUnit];
- if (gsDevicememHistoryData.hLock != NULL)
+ if (psDevicememHistoryData == NULL)
{
- OSLockDestroy(gsDevicememHistoryData.hLock);
- gsDevicememHistoryData.hLock = NULL;
+ return;
+ }
+
+ DestroyRecords(psDevicememHistoryData);
+
+ if (psDevicememHistoryData->hLock != NULL)
+ {
+ OSLockDestroy(psDevicememHistoryData->hLock);
+ psDevicememHistoryData->hLock = NULL;
}
+
+ OSFreeMemNoStats(psDevicememHistoryData);
+ gapsDevicememHistoryData[uiUnit] = NULL;
+}
+
+void DevicememHistoryDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(SUPPORT_RGX)
+ PVRSRV_DEVICE_DEBUG_INFO *psDevDebugInfo = &psDeviceNode->sDebugInfo;
+
+ /* Remove the DI entry associated with this device */
+ DIDestroyEntry(psDevDebugInfo->psDevMemEntry);
+#endif /* defined(SUPPORT_RGX) */
+
}
#define DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE (1 << 0)
#define DEVMEMHEAP_REFCOUNT_MIN 1
#define DEVMEMHEAP_REFCOUNT_MAX IMG_INT32_MAX
+#define DEVMEMRESERVATION_REFCOUNT_MIN 0
+#define DEVMEMRESERVATION_REFCOUNT_MAX IMG_INT32_MAX
struct _DEVMEMINT_CTX_
{
{
struct _DEVMEMINT_CTX_ *psDevmemCtx;
IMG_UINT32 uiLog2PageSize;
+ IMG_DEV_VIRTADDR sBaseAddr;
ATOMIC_T uiRefCount;
+
+ /* Private data for callback functions */
+ IMG_HANDLE hPrivData;
+
+ /* Callback function init */
+ PFN_HEAP_INIT pfnInit;
+
+ /* Callback function deinit */
+ PFN_HEAP_DEINIT pfnDeInit;
};
struct _DEVMEMINT_RESERVATION_
struct _DEVMEMINT_HEAP_ *psDevmemHeap;
IMG_DEV_VIRTADDR sBase;
IMG_DEVMEM_SIZE_T uiLength;
+ /* lock used to guard against potential race when freeing reservation */
+ POS_LOCK hLock;
+ IMG_INT32 i32RefCount;
};
struct _DEVMEMINT_MAPPING_
IMG_UINT32 uiNumPages;
};
+/*! Object representing a virtual range reservation and mapping between
+ * the virtual range and a set of PMRs.
+ *
+ * The physical allocations may be mapped entirely or partially to the entire
+ * or partial virtual range. */
+struct _DEVMEMXINT_RESERVATION_
+{
+ /*! Pointer to a device memory heap this reservation is made on. */
+ struct _DEVMEMINT_HEAP_ *psDevmemHeap;
+ /*! Base device virtual address of this reservation. */
+ IMG_DEV_VIRTADDR sBase;
+ /*! Size of this reservation (in bytes). */
+ IMG_DEVMEM_SIZE_T uiLength;
+ /*! Lock for protecting concurrent operations on the mapping. */
+ POS_LOCK hLock;
+ /*! Array of PMRs of size `uiNumPages`. This array represents how the
+ * physical memory is mapped to the virtual range. Each entry in the array
+ * represents to one device page which means that one PMR may be spread
+ * across many indices. */
+ PMR **ppsPMR;
+};
+
struct _DEVMEMINT_PF_NOTIFY_
{
IMG_UINT32 ui32PID;
}
}
-PVRSRV_ERROR
-DevmemIntUnpin(PMR *psPMR)
-{
- PVRSRV_ERROR eError;
-
- /* Unpin */
- eError = PMRUnpinPMR(psPMR, IMG_FALSE);
-
- return eError;
-}
-
-PVRSRV_ERROR
-DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR)
+/*************************************************************************/ /*!
+@Function DevmemIntReservationAcquire
+@Description Acquire a reference to the provided device memory reservation.
+@Return IMG_TRUE if referenced and IMG_FALSE in case of error
+*/ /**************************************************************************/
+static INLINE IMG_BOOL DevmemIntReservationAcquire(DEVMEMINT_RESERVATION *psDevmemReservation)
{
- PVRSRV_ERROR eError;
+ IMG_BOOL bSuccess;
- eError = PMRUnpinPMR(psPMR, IMG_TRUE);
- PVR_GOTO_IF_ERROR(eError, e_exit);
+ OSLockAcquire(psDevmemReservation->hLock);
- /* Invalidate mapping */
- eError = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
- psDevmemMapping->psReservation->sBase,
- psDevmemMapping->uiNumPages,
- psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize,
- IMG_FALSE, /* !< Choose to invalidate PT entries */
- psPMR);
+ bSuccess = (psDevmemReservation->i32RefCount < DEVMEMRESERVATION_REFCOUNT_MAX);
-e_exit:
- return eError;
-}
-
-PVRSRV_ERROR
-DevmemIntPin(PMR *psPMR)
-{
- PVRSRV_ERROR eError = PVRSRV_OK;
-
- /* Start the pinning */
- eError = PMRPinPMR(psPMR);
+ if (!bSuccess)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory "
+ "reservation, reference count has overflowed.", __func__));
+ }
+ else
+ {
+ psDevmemReservation->i32RefCount++;
+ }
- return eError;
+ OSLockRelease(psDevmemReservation->hLock);
+ return bSuccess;
}
-PVRSRV_ERROR
-DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR)
+/*************************************************************************/ /*!
+@Function DevmemIntReservationRelease
+@Description Release the reference to the provided device memory reservation.
+ If this is the last reference which was taken then the
+ reservation will be freed.
+@Return None.
+*/ /**************************************************************************/
+static INLINE void DevmemIntReservationRelease(DEVMEMINT_RESERVATION *psDevmemReservation)
{
- PVRSRV_ERROR eError;
- PVRSRV_ERROR eErrorMMU = PVRSRV_OK;
- IMG_UINT32 uiLog2PageSize = psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize;
-
- /* Start the pinning */
- eError = PMRPinPMR(psPMR);
+ OSLockAcquire(psDevmemReservation->hLock);
- if (eError == PVRSRV_OK)
+ if (psDevmemReservation->i32RefCount == DEVMEMRESERVATION_REFCOUNT_MIN)
{
- /* Make mapping valid again */
- eErrorMMU = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
- psDevmemMapping->psReservation->sBase,
- psDevmemMapping->uiNumPages,
- uiLog2PageSize,
- IMG_TRUE, /* !< Choose to make PT entries valid again */
- psPMR);
+ PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to release the device memory "
+ "reservation, reference count has underflowed.", __func__));
}
- else if (eError == PVRSRV_ERROR_PMR_NEW_MEMORY)
- {
- /* If we lost the physical backing we have to map it again because
- * the old physical addresses are not valid anymore. */
- PMR_FLAGS_T uiFlags;
- uiFlags = PMR_Flags(psPMR);
-
- eErrorMMU = MMU_MapPages(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
- uiFlags,
- psDevmemMapping->psReservation->sBase,
- psPMR,
- 0,
- psDevmemMapping->uiNumPages,
- NULL,
- uiLog2PageSize);
- }
-
- /* Just overwrite eError if the mappings failed.
- * PMR_NEW_MEMORY has to be propagated to the user. */
- if (eErrorMMU != PVRSRV_OK)
+ else
{
- eError = eErrorMMU;
+ /* Decrement reservation reference count and free it
+ * if this was the final reference
+ */
+ if (--psDevmemReservation->i32RefCount == DEVMEMRESERVATION_REFCOUNT_MIN)
+ {
+ /* Destroy lock */
+ OSLockRelease(psDevmemReservation->hLock);
+ OSLockDestroy(psDevmemReservation->hLock);
+ OSFreeMem(psDevmemReservation);
+ goto exit_noderef;
+ }
}
- return eError;
+ OSLockRelease(psDevmemReservation->hLock);
+exit_noderef:
+ return;
}
/*************************************************************************/ /*!
PVRSRV_ERROR
DevmemIntCtxCreate(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_BOOL bKernelMemoryCtx,
+ IMG_BOOL bKernelFWMemoryCtx,
DEVMEMINT_CTX **ppsDevmemCtxPtr,
IMG_HANDLE *hPrivData,
IMG_UINT32 *pui32CPUCacheLineSize)
DEVMEMINT_CTX *psDevmemCtx;
IMG_HANDLE hPrivDataInt = NULL;
MMU_DEVICEATTRIBS *psMMUDevAttrs = psDeviceNode->pfnGetMMUDeviceAttributes(psDeviceNode,
- bKernelMemoryCtx);
+ bKernelFWMemoryCtx);
PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__));
+ /* Only allow request for a kernel context that comes from a direct bridge
+ * (psConnection == NULL). Only the FW/KM Ctx is created over the direct bridge. */
+ PVR_LOG_RETURN_IF_INVALID_PARAM(!bKernelFWMemoryCtx || psConnection == NULL,
+ "bKernelFWMemoryCtx && psConnection");
+
/*
* Ensure that we are safe to perform unaligned accesses on memory
* we mark write-combine, as the compiler might generate
*/ /**************************************************************************/
PVRSRV_ERROR
DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapIndex,
IMG_DEV_VIRTADDR sHeapBaseAddr,
- IMG_DEVMEM_SIZE_T uiHeapLength,
IMG_UINT32 uiLog2DataPageSize,
DEVMEMINT_HEAP **ppsDevmemHeapPtr)
{
DEVMEMINT_HEAP *psDevmemHeap;
+ PVRSRV_ERROR eError;
+ IMG_DEV_VIRTADDR sBlueprintHeapBaseAddr;
+ IMG_DEVMEM_SIZE_T uiBlueprintHeapLength;
+ IMG_DEVMEM_SIZE_T uiBlueprintResRgnLength;
+ IMG_UINT32 ui32BlueprintLog2DataPageSize;
+ IMG_UINT32 ui32BlueprintLog2ImportAlignment;
PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__));
OSAtomicWrite(&psDevmemHeap->uiRefCount, 1);
+ /* Check page size and base addr match the heap blueprint */
+ eError = HeapCfgHeapDetails(NULL,
+ psDevmemHeap->psDevmemCtx->psDevNode,
+ uiHeapConfigIndex,
+ uiHeapIndex,
+ 0, NULL,
+ &sBlueprintHeapBaseAddr,
+ &uiBlueprintHeapLength,
+ &uiBlueprintResRgnLength,
+ &ui32BlueprintLog2DataPageSize,
+ &ui32BlueprintLog2ImportAlignment);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get details for HeapConfig:%d HeapIndex:%d.",
+ __func__, uiHeapConfigIndex, uiHeapIndex));
+ goto ErrorCtxRelease;
+ }
+ PVR_ASSERT(uiLog2DataPageSize == ui32BlueprintLog2DataPageSize);
+ PVR_ASSERT(sHeapBaseAddr.uiAddr == sBlueprintHeapBaseAddr.uiAddr);
+
psDevmemHeap->uiLog2PageSize = uiLog2DataPageSize;
+ psDevmemHeap->sBaseAddr = sHeapBaseAddr;
+
+ eError = HeapCfgGetCallbacks(psDevmemHeap->psDevmemCtx->psDevNode,
+ uiHeapConfigIndex,
+ uiHeapIndex,
+ &psDevmemHeap->pfnInit,
+ &psDevmemHeap->pfnDeInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get callbacks for HeapConfig:%d HeapIndex:%d.",
+ __func__, uiHeapConfigIndex, uiHeapIndex));
+ goto ErrorCtxRelease;
+ }
+
+ if (psDevmemHeap->pfnInit != NULL)
+ {
+ eError = psDevmemHeap->pfnInit(psDevmemHeap->psDevmemCtx->psDevNode,
+ psDevmemHeap,
+ &psDevmemHeap->hPrivData);
+ PVR_GOTO_IF_ERROR(eError, ErrorCtxRelease);
+ }
*ppsDevmemHeapPtr = psDevmemHeap;
return PVRSRV_OK;
+
+ErrorCtxRelease:
+ DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx);
+ OSFreeMem(psDevmemHeap);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysicalPgOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psReservation);
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(ui32PageCount);
+ PVR_UNREFERENCED_PARAMETER(ui32PhysicalPgOffset);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+ PVR_UNREFERENCED_PARAMETER(sDevVAddrBase);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
}
-PVRSRV_ERROR DevmemIntAllocDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode,
- PVRSRV_DEF_PAGE *psDefPage,
- IMG_INT uiInitValue,
- IMG_CHAR *pcDefPageName,
- IMG_BOOL bInitPage)
+PVRSRV_ERROR
+DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT32 ui32PageCount)
{
- IMG_UINT32 ui32RefCnt;
- PVRSRV_ERROR eError = PVRSRV_OK;
+ PVR_UNREFERENCED_PARAMETER(psReservation);
+ PVR_UNREFERENCED_PARAMETER(sDevVAddrBase);
+ PVR_UNREFERENCED_PARAMETER(ui32PageCount);
- OSLockAcquire(psDefPage->psPgLock);
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
- /* We know there will not be 4G number of sparse PMR's */
- ui32RefCnt = OSAtomicIncrement(&psDefPage->atRefCounter);
+static INLINE IMG_UINT32
+_ReservationPageCount(DEVMEMXINT_RESERVATION *psRsrv)
+{
+ return psRsrv->uiLength >> psRsrv->psDevmemHeap->uiLog2PageSize;
+}
- if (1 == ui32RefCnt)
- {
- IMG_DEV_PHYADDR sDevPhysAddr = {0};
+static INLINE IMG_DEV_VIRTADDR
+_ReservationPageAddress(DEVMEMXINT_RESERVATION *psRsrv, IMG_UINT32 uiVirtPageOffset)
+{
+ IMG_DEV_VIRTADDR sAddr = {
+ .uiAddr = psRsrv->sBase.uiAddr + (uiVirtPageOffset << psRsrv->psDevmemHeap->uiLog2PageSize)
+ };
-#if defined(PDUMP)
- PDUMPCOMMENT(psDevNode, "Alloc %s page object", pcDefPageName);
-#endif
+ return sAddr;
+}
- /* Allocate the dummy page required for sparse backing */
- eError = DevPhysMemAlloc(psDevNode,
- (1 << psDefPage->ui32Log2PgSize),
- 0,
- uiInitValue,
- bInitPage,
-#if defined(PDUMP)
- psDevNode->psMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
- pcDefPageName,
- &psDefPage->hPdumpPg,
-#endif
- &psDefPage->sPageHandle,
- &sDevPhysAddr);
- if (PVRSRV_OK != eError)
- {
- OSAtomicDecrement(&psDefPage->atRefCounter);
- }
- else
- {
- psDefPage->ui64PgPhysAddr = sDevPhysAddr.uiAddr;
- }
+PVRSRV_ERROR
+DevmemXIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+ IMG_DEV_VIRTADDR sAllocationDevVAddr,
+ IMG_DEVMEM_SIZE_T uiAllocationSize,
+ DEVMEMXINT_RESERVATION **ppsRsrv)
+{
+ DEVMEMXINT_RESERVATION *psRsrv;
+ IMG_UINT32 uiNumPages;
+ PVRSRV_ERROR eError;
+
+ if (!DevmemIntHeapAcquire(psDevmemHeap))
+ {
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, ErrorReturnError);
}
- OSLockRelease(psDefPage->psPgLock);
+ /* align address to full device page size */
+ uiAllocationSize = PVR_ALIGN(uiAllocationSize, IMG_UINT64_C(1) << psDevmemHeap->uiLog2PageSize);
+ uiNumPages = uiAllocationSize >> psDevmemHeap->uiLog2PageSize;
+
+ PVR_LOG_GOTO_IF_INVALID_PARAM(uiNumPages <= PMR_MAX_SUPPORTED_4K_PAGE_COUNT, eError,
+ ErrorUnreferenceHeap);
+
+ psRsrv = OSAllocZMem(sizeof(*psRsrv->ppsPMR) * uiNumPages + sizeof(*psRsrv));
+ PVR_LOG_GOTO_IF_NOMEM(psRsrv, eError, ErrorUnreferenceHeap);
+
+ eError = OSLockCreate(&psRsrv->hLock);
+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", ErrorFreeReservation);
+
+ psRsrv->sBase = sAllocationDevVAddr;
+ psRsrv->uiLength = uiAllocationSize;
+ psRsrv->ppsPMR = IMG_OFFSET_ADDR(psRsrv, sizeof(*psRsrv));
+
+ eError = MMU_Alloc(psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiAllocationSize,
+ &uiAllocationSize,
+ 0, /* IMG_UINT32 uiProtFlags */
+ 0, /* alignment is n/a since we supply devvaddr */
+ &sAllocationDevVAddr,
+ psDevmemHeap->uiLog2PageSize);
+ PVR_GOTO_IF_ERROR(eError, ErrorDestroyLock);
+
+ /* since we supplied the virt addr, MMU_Alloc shouldn't have
+ chosen a new one for us */
+ PVR_ASSERT(sAllocationDevVAddr.uiAddr == psRsrv->sBase.uiAddr);
+
+ psRsrv->psDevmemHeap = psDevmemHeap;
+ *ppsRsrv = psRsrv;
+ return PVRSRV_OK;
+
+ErrorDestroyLock:
+ OSLockDestroy(psRsrv->hLock);
+ErrorFreeReservation:
+ OSFreeMem(psRsrv);
+ErrorUnreferenceHeap:
+ DevmemIntHeapRelease(psDevmemHeap);
+ErrorReturnError:
return eError;
}
-void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode,
- PVRSRV_DEF_PAGE *psDefPage,
- IMG_CHAR *pcDefPageName)
+PVRSRV_ERROR
+DevmemXIntUnreserveRange(DEVMEMXINT_RESERVATION *psRsrv)
{
- IMG_UINT32 ui32RefCnt;
-
- ui32RefCnt = OSAtomicRead(&psDefPage->atRefCounter);
+ IMG_UINT32 i;
- /* For the cases where the dummy page allocation fails due to lack of memory
- * The refcount can still be 0 even for a sparse allocation */
- if (0 != ui32RefCnt)
- {
- OSLockAcquire(psDefPage->psPgLock);
+ MMU_Free(psRsrv->psDevmemHeap->psDevmemCtx->psMMUContext,
+ psRsrv->sBase,
+ psRsrv->uiLength,
+ psRsrv->psDevmemHeap->uiLog2PageSize);
- /* We know there will not be 4G number of sparse PMR's */
- ui32RefCnt = OSAtomicDecrement(&psDefPage->atRefCounter);
+ /* No need to lock the mapping here since this is a handle destruction path which can not be
+ * executed while there are outstanding handle lookups, i.e. other operations are performed
+ * on the mapping. Bridge and handle framework also make sure this path can also not be executed
+ * concurrently. */
- if (0 == ui32RefCnt)
+ for (i = 0; i < _ReservationPageCount(psRsrv); i++)
+ {
+ if (psRsrv->ppsPMR[i] != NULL)
{
- PDUMPCOMMENT(psDevNode, "Free %s page object", pcDefPageName);
-
- /* Free the dummy page when refcount reaches zero */
- DevPhysMemFree(psDevNode,
-#if defined(PDUMP)
- psDefPage->hPdumpPg,
-#endif
- &psDefPage->sPageHandle);
-
-#if defined(PDUMP)
- psDefPage->hPdumpPg = NULL;
-#endif
- psDefPage->ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
+ PMRUnrefPMR2(psRsrv->ppsPMR[i]);
}
-
- OSLockRelease(psDefPage->psPgLock);
}
+ /* Don't bother with refcount on reservation, as a reservation only ever
+ * holds one mapping, so we directly decrement the refcount on the heap
+ * instead.
+ * Function will print an error if the heap could not be unreferenced. */
+ DevmemIntHeapRelease(psRsrv->psDevmemHeap);
+
+ OSLockDestroy(psRsrv->hLock);
+ OSFreeMem(psRsrv);
+
+ return PVRSRV_OK;
}
PVRSRV_ERROR
-DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation,
- PMR *psPMR,
- IMG_UINT32 ui32PageCount,
- IMG_UINT32 ui32PhysicalPgOffset,
- PVRSRV_MEMALLOCFLAGS_T uiFlags,
- IMG_DEV_VIRTADDR sDevVAddrBase)
+DevmemXIntMapPages(DEVMEMXINT_RESERVATION *psRsrv,
+ PMR *psPMR,
+ IMG_UINT32 uiPageCount,
+ IMG_UINT32 uiPhysPageOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 uiVirtPageOffset)
{
PVRSRV_ERROR eError;
-
- PVR_LOG_RETURN_IF_INVALID_PARAM((ui32PageCount < PMR_MAX_SUPPORTED_PAGE_COUNT), "ui32PageCount");
- PVR_LOG_RETURN_IF_INVALID_PARAM((ui32PhysicalPgOffset < PMR_MAX_SUPPORTED_PAGE_COUNT), "ui32PhysicalPgOffset");
-
- if (psReservation->psDevmemHeap->uiLog2PageSize > PMR_GetLog2Contiguity(psPMR))
+ IMG_UINT32 uiPMRMaxChunkCount = PMRGetMaxChunkCount(psPMR);
+ DEVMEMINT_HEAP *psDevmemHeap = psRsrv->psDevmemHeap;
+ IMG_UINT32 uiLog2PageSize = psDevmemHeap->uiLog2PageSize;
+ IMG_UINT32 i;
+
+ PVR_LOG_RETURN_IF_INVALID_PARAM(uiPageCount <= uiPMRMaxChunkCount, "uiPageCount");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(uiPhysPageOffset < uiPMRMaxChunkCount, "uiPhysPageOffset");
+ /* The range is not valid for the given virtual descriptor */
+ PVR_LOG_RETURN_IF_FALSE((uiVirtPageOffset + uiPageCount) <= _ReservationPageCount(psRsrv),
+ "mapping offset out of range", PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE);
+ PVR_LOG_RETURN_IF_FALSE((uiFlags & ~PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK) == 0,
+ "invalid flags", PVRSRV_ERROR_INVALID_FLAGS);
+
+ if (uiLog2PageSize > PMR_GetLog2Contiguity(psPMR))
{
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Device heap and PMR have incompatible Log2Contiguity (%u - %u). "
- "PMR contiguity must be a multiple of the heap contiguity!",
- __func__,
- psReservation->psDevmemHeap->uiLog2PageSize,
- PMR_GetLog2Contiguity(psPMR)));
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device heap and PMR have incompatible "
+ "Log2Contiguity (%u - %u). PMR contiguity must be a multiple "
+ "of the heap contiguity!", __func__, uiLog2PageSize,
+ PMR_GetLog2Contiguity(psPMR)));
+ return PVRSRV_ERROR_INVALID_PARAMS;
}
- eError = MMU_MapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ OSLockAcquire(psRsrv->hLock);
+
+ eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext,
uiFlags,
- sDevVAddrBase,
+ _ReservationPageAddress(psRsrv, uiVirtPageOffset),
psPMR,
- ui32PhysicalPgOffset,
- ui32PageCount,
+ uiPhysPageOffset,
+ uiPageCount,
NULL,
- psReservation->psDevmemHeap->uiLog2PageSize);
+ psDevmemHeap->uiLog2PageSize);
+ PVR_GOTO_IF_ERROR(eError, ErrUnlock);
+
+ for (i = uiVirtPageOffset; i < (uiVirtPageOffset + uiPageCount); i++)
+ {
+ PMRRefPMR2(psPMR);
+
+ if (psRsrv->ppsPMR[i] != NULL)
+ {
+ PMRUnrefPMR2(psRsrv->ppsPMR[i]);
+ }
+
+ psRsrv->ppsPMR[i] = psPMR;
+ }
+
+ OSLockRelease(psRsrv->hLock);
+
+ return PVRSRV_OK;
+
+ErrUnlock:
+ OSLockRelease(psRsrv->hLock);
-e0:
return eError;
}
PVRSRV_ERROR
-DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation,
- IMG_DEV_VIRTADDR sDevVAddrBase,
- IMG_UINT32 ui32PageCount)
+DevmemXIntUnmapPages(DEVMEMXINT_RESERVATION *psRsrv,
+ IMG_UINT32 uiVirtPageOffset,
+ IMG_UINT32 uiPageCount)
{
- PVR_LOG_RETURN_IF_INVALID_PARAM((ui32PageCount < PMR_MAX_SUPPORTED_PAGE_COUNT), "ui32PageCount");
+ DEVMEMINT_HEAP *psDevmemHeap = psRsrv->psDevmemHeap;
+ IMG_UINT32 i;
+
+ PVR_LOG_RETURN_IF_FALSE((uiVirtPageOffset + uiPageCount) <= _ReservationPageCount(psRsrv),
+ "mapping offset out of range", PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE);
+
+ OSLockAcquire(psRsrv->hLock);
/* Unmap the pages and mark them invalid in the MMU PTE */
- MMU_UnmapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ MMU_UnmapPages(psDevmemHeap->psDevmemCtx->psMMUContext,
0,
- sDevVAddrBase,
- ui32PageCount,
+ _ReservationPageAddress(psRsrv, uiVirtPageOffset),
+ uiPageCount,
NULL,
- psReservation->psDevmemHeap->uiLog2PageSize,
+ psDevmemHeap->uiLog2PageSize,
0);
+ for (i = uiVirtPageOffset; i < (uiVirtPageOffset + uiPageCount); i++)
+ {
+ if (psRsrv->ppsPMR[i] != NULL)
+ {
+ PMRUnrefPMR2(psRsrv->ppsPMR[i]);
+ psRsrv->ppsPMR[i] = NULL;
+ }
+ }
+
+ OSLockRelease(psRsrv->hLock);
+
return PVRSRV_OK;
}
/* and its length */
IMG_DEVMEM_SIZE_T uiAllocationSize;
IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize;
- IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE;
- PVRSRV_DEVICE_NODE *psDevNode;
- PMR_FLAGS_T uiPMRFlags;
- PVRSRV_DEF_PAGE *psDefPage;
- IMG_CHAR *pszPageName;
+ IMG_BOOL bIsSparse = IMG_FALSE;
if (uiLog2HeapContiguity > PMR_GetLog2Contiguity(psPMR))
{
PMR_GetLog2Contiguity(psPMR) ));
PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, ErrorReturnError);
}
- psDevNode = psDevmemHeap->psDevmemCtx->psDevNode;
- /* Don't bother with refcount on reservation, as a reservation
- only ever holds one mapping, so we directly increment the
- refcount on the heap instead */
- if (!DevmemIntHeapAcquire(psDevmemHeap))
+ if (!DevmemIntReservationAcquire(psReservation))
{
PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, ErrorReturnError);
}
bIsSparse = PMR_IsSparse(psPMR);
if (bIsSparse)
{
- /*Get the flags*/
- uiPMRFlags = PMR_Flags(psPMR);
- bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
-
- if (bNeedBacking)
- {
- IMG_INT uiInitValue;
-
- if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags))
- {
- psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage;
- uiInitValue = PVR_ZERO_PAGE_INIT_VALUE;
- pszPageName = DEV_ZERO_PAGE;
- }
- else
- {
- psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage;
- uiInitValue = PVR_DUMMY_PAGE_INIT_VALUE;
- pszPageName = DUMMY_PAGE;
- }
-
- /* Error is logged with in the function if any failures.
- * As the allocation fails we need to fail the map request and
- * return appropriate error
- *
- * Allocation of dummy/zero page is done after locking the pages for PMR physically
- * By implementing this way, the best case path of dummy/zero page being most likely to be
- * allocated after physically locking down pages, is considered.
- * If the dummy/zero page allocation fails, we do unlock the physical address and the impact
- * is a bit more in on demand mode of operation */
- eError = DevmemIntAllocDefBackingPage(psDevNode,
- psDefPage,
- uiInitValue,
- pszPageName,
- IMG_TRUE);
- PVR_GOTO_IF_ERROR(eError, ErrorUnlockPhysAddr);
- }
-
/* N.B. We pass mapping permission flags to MMU_MapPages and let
* it reject the mapping if the permissions on the PMR are not compatible. */
eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext,
ui32NumDevPages,
NULL,
uiLog2HeapContiguity);
- PVR_GOTO_IF_ERROR(eError, ErrorFreeDefBackingPage);
+ PVR_GOTO_IF_ERROR(eError, ErrorUnlockPhysAddr);
}
else
{
return PVRSRV_OK;
-ErrorFreeDefBackingPage:
- if (bNeedBacking)
- {
- /*if the mapping failed, the allocated dummy ref count need
- * to be handled accordingly */
- DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode,
- psDefPage,
- pszPageName);
- }
-
ErrorUnlockPhysAddr:
{
PVRSRV_ERROR eError1 = PVRSRV_OK;
ErrorUnreference:
/* if fails there's not much to do (the function will print an error) */
- DevmemIntHeapRelease(psDevmemHeap);
+ DevmemIntReservationRelease(psReservation);
ErrorReturnError:
PVR_ASSERT (eError != PVRSRV_OK);
IMG_DEV_VIRTADDR sAllocationDevVAddr;
/* number of pages (device pages) that allocation spans */
IMG_UINT32 ui32NumDevPages;
- IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE;
- PMR_FLAGS_T uiPMRFlags;
+ IMG_BOOL bIsSparse = IMG_FALSE;
ui32NumDevPages = psMapping->uiNumPages;
sAllocationDevVAddr = psMapping->psReservation->sBase;
if (bIsSparse)
{
- /*Get the flags*/
- uiPMRFlags = PMR_Flags(psMapping->psPMR);
- bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
-
- if (bNeedBacking)
- {
- if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags))
- {
- DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode,
- &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage,
- DEV_ZERO_PAGE);
- }
- else
- {
- DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode,
- &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage,
- DUMMY_PAGE);
- }
- }
-
- MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+ MMU_UnmapPages(psDevmemHeap->psDevmemCtx->psMMUContext,
0,
sAllocationDevVAddr,
ui32NumDevPages,
NULL,
- psMapping->psReservation->psDevmemHeap->uiLog2PageSize,
+ psDevmemHeap->uiLog2PageSize,
0);
}
else
MMU_UnmapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext,
sAllocationDevVAddr,
ui32NumDevPages,
- psMapping->psReservation->psDevmemHeap->uiLog2PageSize);
+ psDevmemHeap->uiLog2PageSize);
}
eError = PMRUnlockSysPhysAddresses(psMapping->psPMR);
PVR_ASSERT(eError == PVRSRV_OK);
- /* Don't bother with refcount on reservation, as a reservation only ever
- * holds one mapping, so we directly decrement the refcount on the heap
- * instead.
- * Function will print an error if the heap could not be unreferenced. */
- DevmemIntHeapRelease(psDevmemHeap);
+ DevmemIntReservationRelease(psMapping->psReservation);
OSFreeMem(psMapping);
psReservation = OSAllocMem(sizeof(*psReservation));
PVR_LOG_GOTO_IF_NOMEM(psReservation, eError, ErrorUnreference);
+ /* Create lock */
+ eError = OSLockCreate(&psReservation->hLock);
+
+ /* Initialise refcount */
+ psReservation->i32RefCount = 1;
+
psReservation->sBase = sAllocationDevVAddr;
psReservation->uiLength = uiAllocationSize;
{
IMG_DEV_VIRTADDR sBase = psReservation->sBase;
IMG_UINT32 uiLength = psReservation->uiLength;
- IMG_UINT32 uiLog2DataPageSize = psReservation->psDevmemHeap->uiLog2PageSize;
+ DEVMEMINT_HEAP *psDevmemHeap = psReservation->psDevmemHeap;
+ IMG_UINT32 uiLog2DataPageSize = psDevmemHeap->uiLog2PageSize;
- MMU_Free(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ MMU_Free(psDevmemHeap->psDevmemCtx->psMMUContext,
sBase,
uiLength,
uiLog2DataPageSize);
- /* Don't bother with refcount on reservation, as a reservation only ever
- * holds one mapping, so we directly decrement the refcount on the heap
- * instead.
- * Function will print an error if the heap could not be unreferenced. */
- DevmemIntHeapRelease(psReservation->psDevmemHeap);
-
- OSFreeMem(psReservation);
+ DevmemIntReservationRelease(psReservation);
+ DevmemIntHeapRelease(psDevmemHeap);
return PVRSRV_OK;
}
PVRSRV_ERROR
DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap)
{
+ if (psDevmemHeap->pfnDeInit != NULL)
+ {
+ psDevmemHeap->pfnDeInit(psDevmemHeap->hPrivData);
+ psDevmemHeap->pfnDeInit = NULL;
+ }
+
if (OSAtomicRead(&psDevmemHeap->uiRefCount) != DEVMEMHEAP_REFCOUNT_MIN)
{
PVR_DPF((PVR_DBG_ERROR, "BUG! %s called but has too many references (%d) "
return PVRSRV_OK;
}
+IMG_DEV_VIRTADDR
+DevmemIntHeapGetBaseAddr(DEVMEMINT_HEAP *psDevmemHeap)
+{
+ PVR_ASSERT(psDevmemHeap != NULL);
+
+ return psDevmemHeap->sBaseAddr;
+}
+
PVRSRV_ERROR
DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap,
PMR *psPMR,
return PVRSRV_OK;
}
-PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- DEVMEMINT_CTX *psDevMemContext,
- IMG_DEV_VIRTADDR sDevAddr)
+/*************************************************************************/ /*!
+@Function DevmemIntGetVDevAddrPageSize
+@Description Get the page size for a virtual address.
+@Input psConnection
+@Input psDevNode
+@Input psDevmemCtx Device Memory context
+@Input sDevAddr Get the page size for this virtual address.
+@Output puiLog2HeapPageSize On success returns log2 of the page size.
+@Return Failure code if the virtual address is outside any heap.
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR DevmemIntGetVDevAddrPageSize(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_DEV_VIRTADDR sDevAddr,
+ IMG_PUINT32 puiLog2HeapPageSize)
{
IMG_UINT32 i, j, uiLog2HeapPageSize = 0;
DEVICE_MEMORY_INFO *psDinfo = &psDevNode->sDevMemoryInfo;
return PVRSRV_ERROR_INVALID_GPU_ADDR;
}
- return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext,
- uiLog2HeapPageSize,
- sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR;
+ *puiLog2HeapPageSize = uiLog2HeapPageSize;
+ return PVRSRV_OK;
}
-PVRSRV_ERROR
-DevmemIntFlushDevSLCRange(DEVMEMINT_CTX *psDevMemContext,
- IMG_DEV_VIRTADDR sDevVAddr,
- IMG_DEVMEM_SIZE_T uiSize,
- IMG_BOOL bInvalidate)
+/*************************************************************************/ /*!
+@Function DevmemIntIsVDevAddrValid
+@Description Checks if a virtual address is valid for access.
+@Input psConnection
+@Input psDevNode
+@Input psDevmemCtx Device Memory context
+@Input sDevAddr Virtual address to check.
+@Return Failure code if the virtual address is invalid.
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_DEV_VIRTADDR sDevAddr)
{
- PVRSRV_DEVICE_NODE *psDevNode = psDevMemContext->psDevNode;
- MMU_CONTEXT *psMMUContext = psDevMemContext->psMMUContext;
-
- if (psDevNode->pfnDevSLCFlushRange)
+ IMG_UINT32 uiLog2HeapPageSize = 0;
+ PVRSRV_ERROR eError;
+ eError = DevmemIntGetVDevAddrPageSize(psConnection,
+ psDevNode,
+ psDevMemContext,
+ sDevAddr,
+ &uiLog2HeapPageSize);
+ if (eError != PVRSRV_OK)
{
- return psDevNode->pfnDevSLCFlushRange(psDevNode,
- psMMUContext,
- sDevVAddr,
- uiSize,
- bInvalidate);
+ return eError;
}
- return PVRSRV_ERROR_NOT_SUPPORTED;
+ return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext,
+ uiLog2HeapPageSize,
+ sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR;
}
PVRSRV_ERROR
@Description Registers a PID to be notified when a page fault occurs on a
specific device memory context.
@Input psDevmemCtx The context to be notified about.
-@Input ui32PID The PID of the process that would like to be
- notified.
@Input bRegister If true, register. If false, de-register.
@Return PVRSRV_ERROR.
*/ /**************************************************************************/
PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx,
- IMG_INT32 ui32PID,
IMG_BOOL bRegister)
{
PVRSRV_DEVICE_NODE *psDevNode;
DEVMEMINT_PF_NOTIFY *psNotifyNode;
IMG_BOOL bPresent = IMG_FALSE;
PVRSRV_ERROR eError;
+ IMG_PID ui32PID;
PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemCtx, "psDevmemCtx");
}
}
+ /* Obtain current client PID */
+ ui32PID = OSGetCurrentClientProcessIDKM();
+
/* Loop through the registered PIDs and check whether this one is
* present */
dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
}
PVRSRV_ERROR
-DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+DevmemIntPDumpSaveToFileVirtual(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEMINT_CTX *psDevmemCtx,
IMG_DEV_VIRTADDR sDevAddrStart,
IMG_DEVMEM_SIZE_T uiSize,
IMG_UINT32 ui32ArraySize,
{
PVRSRV_ERROR eError;
IMG_UINT32 uiPDumpMMUCtx;
+ IMG_UINT32 uiLog2HeapPageSize;
+ IMG_UINT32 uiHeapPageSize;
+ IMG_DEV_VIRTADDR sValidStart, sValidEnd, sCurrent, sEnd, sStartPage;
+ IMG_UINT64 ui64PageMask;
PVR_UNREFERENCED_PARAMETER(ui32ArraySize);
PVR_ASSERT(eError == PVRSRV_OK);
+ /*
+ Get the page size for heap containing the start virtual address.
+ */
+ eError = DevmemIntGetVDevAddrPageSize(psConnection,
+ psDeviceNode,
+ psDevmemCtx,
+ sDevAddrStart,
+ &uiLog2HeapPageSize);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ uiHeapPageSize = 1 << uiLog2HeapPageSize;
+ ui64PageMask = uiHeapPageSize - 1;
+
/*
The following SYSMEM refers to the 'MMU Context', hence it
should be the MMU context, not the PMR, that says what the PDump
From a PDump P.O.V. it doesn't matter which name space we use as long
as that MemSpace is used on the 'MMU Context' we're dumping from
*/
- eError = PDumpMMUSAB(psDevmemCtx->psDevNode,
- psDevmemCtx->psDevNode->sDevId.pszPDumpDevName,
- uiPDumpMMUCtx,
- sDevAddrStart,
- uiSize,
- pszFilename,
- ui32FileOffset,
- ui32PDumpFlags);
- PVR_ASSERT(eError == PVRSRV_OK);
+
+ /*
+ Iterate every page in the region to dump...
+ */
+ sValidStart.uiAddr = sValidEnd.uiAddr = 0; /* Start/end of the current region which is valid to read. */
+ sStartPage.uiAddr = sDevAddrStart.uiAddr & ~ui64PageMask; /* Page aligned start of the region to dump. */
+ sCurrent = sStartPage;
+ sEnd.uiAddr = sDevAddrStart.uiAddr + uiSize; /* End of the region to dump. */
+ for (;;)
+ {
+ IMG_BOOL bAtEnd = sCurrent.uiAddr >= sEnd.uiAddr;
+ IMG_BOOL bValid = IMG_FALSE;
+
+ if (!bAtEnd)
+ {
+ /* Check if the page starting at the current address is valid for reading. */
+ eError = DevmemIntIsVDevAddrValid(psConnection,
+ psDeviceNode,
+ psDevmemCtx,
+ sCurrent);
+ if (eError == PVRSRV_OK)
+ {
+ /* If the current valid region is empty then set the start to the current page. */
+ if (sValidStart.uiAddr == 0)
+ {
+ if (sCurrent.uiAddr == sStartPage.uiAddr)
+ {
+ /* Use the start of the region to dump if it doesn't start page aligned. */
+ sValidStart = sDevAddrStart;
+ }
+ else
+ {
+ sValidStart = sCurrent;
+ }
+ }
+ /* Set the end of the valid region. */
+ sValidEnd.uiAddr = sCurrent.uiAddr + uiHeapPageSize;
+ /* Restrict to the region to dump. */
+ if (sValidEnd.uiAddr > sEnd.uiAddr)
+ {
+ sValidEnd = sEnd;
+ }
+ bValid = IMG_TRUE;
+ }
+ /* Move to the next page. */
+ sCurrent.uiAddr += uiHeapPageSize;
+ }
+ /*
+ If the current page is invalid or we've reached the end of the region to dump then pdump the current valid region.
+ */
+ if (!bValid && sValidEnd.uiAddr > sValidStart.uiAddr)
+ {
+ IMG_DEVMEM_SIZE_T uiValidSize = sValidEnd.uiAddr - sValidStart.uiAddr;
+ eError = PDumpMMUSAB(psDevmemCtx->psDevNode,
+ psDevmemCtx->psDevNode->sDevId.pszPDumpDevName,
+ uiPDumpMMUCtx,
+ sValidStart,
+ uiValidSize,
+ pszFilename,
+ ui32FileOffset,
+ ui32PDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ ui32FileOffset += uiValidSize;
+
+ sValidStart.uiAddr = sValidEnd.uiAddr = 0;
+ }
+
+ if (bAtEnd)
+ {
+ break;
+ }
+ }
MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext, ui32PDumpFlags);
return PVRSRV_OK;
{
PVRSRV_ERROR eError;
+#if defined(__linux__) && defined(__KERNEL__)
+ eError = OSLockCreateNoStats(&_g_hLock);
+#else
eError = OSLockCreate(&_g_hLock);
+#endif
PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", return_);
- _g_psRootGroup = OSAllocMem(sizeof(*_g_psRootGroup));
+ _g_psRootGroup = OSAllocMemNoStats(sizeof(*_g_psRootGroup));
PVR_LOG_GOTO_IF_NOMEM(_g_psRootGroup, eError, destroy_lock_);
- _g_psRootGroup->pszName = OSAllocMem(sizeof(ROOT_GROUP_NAME));
+ _g_psRootGroup->pszName = OSAllocMemNoStats(sizeof(ROOT_GROUP_NAME));
PVR_LOG_GOTO_IF_NOMEM(_g_psRootGroup->pszName, eError, cleanup_name_);
OSStringLCopy(_g_psRootGroup->pszName, ROOT_GROUP_NAME,
sizeof(ROOT_GROUP_NAME));
return PVRSRV_OK;
cleanup_name_:
- OSFreeMem(_g_psRootGroup);
+ OSFreeMemNoStats(_g_psRootGroup);
destroy_lock_:
+#if defined(__linux__) && defined(__KERNEL__)
+ OSLockDestroyNoStats(_g_hLock);
+#else
OSLockDestroy(_g_hLock);
+#endif
return_:
return eError;
}
/* all resources freed so free the lock itself too */
+#if defined(__linux__) && defined(__KERNEL__)
+ OSLockDestroyNoStats(_g_hLock);
+#else
OSLockDestroy(_g_hLock);
+#endif
}
static IMG_BOOL _ValidateIteratorCb(const DI_ITERATOR_CB *psIterCb,
dllist_remove_node(&psGroup->sListNode);
- OSFreeMem(psGroup->pszName);
- OSFreeMem(psGroup);
+ if (psGroup == _g_psRootGroup)
+ {
+ OSFreeMemNoStats(psGroup->pszName);
+ OSFreeMemNoStats(psGroup);
+ }
+ else
+ {
+ OSFreeMem(psGroup->pszName);
+ OSFreeMem(psGroup);
+ }
}
void *DIGetPrivData(const OSDI_IMPL_ENTRY *psEntry)
return --psHandleData->iLookupCount;
}
-static inline IMG_BOOL IsRetryError(PVRSRV_ERROR eError)
-{
- return eError == PVRSRV_ERROR_RETRY || eError == PVRSRV_ERROR_KERNEL_CCB_FULL;
-}
-
#if defined(PVRSRV_NEED_PVR_DPF)
static const IMG_CHAR *HandleTypeToString(PVRSRV_HANDLE_TYPE eType)
{
eError = psHandleData->pfnReleaseData(psHandleData->pvData);
if (eError != PVRSRV_OK)
{
- if (IsRetryError(eError))
+ if (PVRSRVIsRetryError(eError))
{
PVR_DPF((PVR_DBG_MESSAGE, "%s: Got retry while calling release "
"data callback for handle %p of type = %s", __func__,
/* If the data could not be freed due to a temporary condition the
* handle must be kept alive so that the next destroy call can try again */
- if (IsRetryError(eError))
+ if (PVRSRVIsRetryError(eError))
{
psHandleData->bCanLookup = IMG_TRUE;
}
PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ PVRSRV_HANDLE_TYPE_DEVMEMXINT_RESERVATION,
PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
*/ /**************************************************************************/
#include "rgxdevice.h"
#include "htbserver.h"
-#include "htbuffer.h"
#include "htbuffer_types.h"
#include "tlstream.h"
#include "tlclient.h"
"%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'",
__func__, ui32Data, HTB_SF_GID(ui32Data),
HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData));
- bUnrecognizedErrorPrinted = IMG_FALSE;
+ bUnrecognizedErrorPrinted = IMG_TRUE;
}
} while (HTB_SF_LAST == ui32LogIdx);
*/ /**************************************************************************/
#include "htbserver.h"
-#include "htbuffer.h"
#include "htbuffer_types.h"
#include "tlstream.h"
#include "pvrsrv_tlcommon.h"
/* number of times to try rewriting a log entry */
#define HTB_LOG_RETRY_COUNT 5
+#if defined(__linux__)
+ #include <linux/version.h>
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ #include <linux/stdarg.h>
+ #else
+ #include <stdarg.h>
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+#else
+ #include <stdarg.h>
+#endif /* __linux__ */
+
/*************************************************************************/ /*!
Host Trace Buffer control information structure
*/ /**************************************************************************/
static IMG_HANDLE hHtbDbgReqNotify;
-
/************************************************************************/ /*!
@Function _LookupFlags
@Description Convert HTBuffer Operation mode to TLStream flags
return PVRSRV_OK;
}
+static IMG_BOOL
+_ValidPID( IMG_UINT32 PID )
+{
+ IMG_UINT32 i;
+
+ for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
+ {
+ if ( g_sCtrl.aui32EnablePID[i] == PID )
+ {
+ return IMG_TRUE;
+ }
+ }
+ return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+ @Function HTBLogKM
+ @Description Record a Host Trace Buffer log event
+
+ @Input PID The PID of the process the event is associated
+ with. This is provided as an argument rather
+ than querying internally so that events associated
+ with a particular process, but performed by
+ another can be logged correctly.
+
+ @Input ui64TimeStamp The timestamp to be associated with this log event
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+HTBLogKM(IMG_UINT32 PID,
+ IMG_UINT32 TID,
+ IMG_UINT64 ui64TimeStamp,
+ HTB_LOG_SFids SF,
+ va_list args
+)
+{
+ OS_SPINLOCK_FLAGS uiSpinLockFlags;
+ IMG_UINT32 ui32ReturnFlags = 0;
+ IMG_UINT32 i = 0;
+
+ /* Local snapshot variables of global counters */
+ IMG_UINT64 ui64OSTSSnap;
+ IMG_UINT64 ui64CRTSSnap;
+ IMG_UINT32 ui32ClkSpeedSnap;
+
+ /* format of messages is: SF:PID:TID:TIMEPT1:TIMEPT2:[PARn]*
+ * Buffer is on the stack so we don't need a semaphore to guard it
+ */
+ IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS];
+ IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS + 1] = {0};
+
+ /* Min HTB size is HTB_TL_BUFFER_SIZE_MIN : 10000 bytes and Max message/
+ * packet size is 4*(HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS) = 80 bytes,
+ * hence with these constraints this design is unlikely to get
+ * PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED error
+ */
+ PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_ENABLED;
+ IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT;
+ IMG_UINT32 * pui32Message = aui32MessageBuffer;
+ IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF);
+
+ IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs);
+
+ PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS);
+ ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS) ?
+ HTB_LOG_MAX_PARAMS : ui32NumArgs;
+
+ PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs == HTB_SF_PARAMNUM(SF), eError, ReturnError);
+ PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs <= HTB_LOG_MAX_PARAMS, eError, ReturnError);
+
+ /* Needs to be set up here because it's accessed from both `if` blocks below
+ * and it needs to be pre-populated for both of them (pui32Message case and
+ * HTB_SF_CTRL_FWSYNC_MARK_SCALE case). */
+ for (i = 0; i < ui32NumArgs; i++)
+ {
+ aui32Args[i] = va_arg(args, IMG_UINT32);
+ }
+
+ if ( g_hTLStream
+ && ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) )
+/* && ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */
+/* && ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */
+ )
+ {
+ *pui32Message++ = SF;
+ *pui32Message++ = PID;
+ *pui32Message++ = TID;
+ *pui32Message++ = ((IMG_UINT32)((ui64TimeStamp>>32)&0xffffffff));
+ *pui32Message++ = ((IMG_UINT32)(ui64TimeStamp&0xffffffff));
+ for (i = 0; i < ui32NumArgs; i++)
+ {
+ pui32Message[i] = aui32Args[i];
+ }
+
+ eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags );
+ while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- )
+ {
+ OSReleaseThreadQuanta();
+ eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags );
+ }
+
+ if ( PVRSRV_OK == eError )
+ {
+ g_sCtrl.bLogDropSignalled = IMG_FALSE;
+ }
+ else if ( PVRSRV_ERROR_STREAM_FULL != eError || !g_sCtrl.bLogDropSignalled )
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__));
+ }
+ if ( PVRSRV_ERROR_STREAM_FULL == eError )
+ {
+ g_sCtrl.bLogDropSignalled = IMG_TRUE;
+ }
+
+ }
+
+ if (SF == HTB_SF_CTRL_FWSYNC_MARK_SCALE)
+ {
+ OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
+
+ /* If a marker is being placed reset byte count from last marker */
+ g_sCtrl.ui32ByteCount = 0;
+ g_sCtrl.ui64OSTS = (IMG_UINT64)aui32Args[HTB_ARG_OSTS_PT1] << 32 | aui32Args[HTB_ARG_OSTS_PT2];
+ g_sCtrl.ui64CRTS = (IMG_UINT64)aui32Args[HTB_ARG_CRTS_PT1] << 32 | aui32Args[HTB_ARG_CRTS_PT2];
+ g_sCtrl.ui32ClkSpeed = aui32Args[HTB_ARG_CLKSPD];
+
+ OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
+ }
+ else
+ {
+ OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
+ /* Increase global count */
+ g_sCtrl.ui32ByteCount += ui32MessageSize;
+
+ /* Check if packet has overwritten last marker/rpt &&
+ If the packet count is over half the size of the buffer */
+ if (ui32ReturnFlags & TL_FLAG_OVERWRITE_DETECTED &&
+ g_sCtrl.ui32ByteCount > HTB_MARKER_PREDICTION_THRESHOLD(g_sCtrl.ui32BufferSize))
+ {
+ /* Take snapshot of global variables */
+ ui64OSTSSnap = g_sCtrl.ui64OSTS;
+ ui64CRTSSnap = g_sCtrl.ui64CRTS;
+ ui32ClkSpeedSnap = g_sCtrl.ui32ClkSpeed;
+ /* Reset global variable counter */
+ g_sCtrl.ui32ByteCount = 0;
+ OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
+
+ /* Produce a repeat marker */
+ HTBSyncPartitionMarkerRepeat(g_sCtrl.ui32SyncMarker, ui64OSTSSnap, ui64CRTSSnap, ui32ClkSpeedSnap);
+ }
+ else
+ {
+ OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
+ }
+ }
+
+ReturnError:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+ @Function HTBLog
+ @Description Record a Host Trace Buffer log event
+ @Input PID The PID of the process the event is associated
+ with. This is provided as an argument rather
+ than querying internally so that events
+ associated with a particular process, but
+ performed by another can be logged correctly.
+ @Input ui64TimeStamp The timestamp to be associated with this
+ log event
+ @Input SF The log event ID
+ @Input ... Log parameters
+ @Return PVRSRV_OK Success.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+HTBLog(IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStamp,
+ IMG_UINT32 SF, ...)
+{
+ PVRSRV_ERROR eError;
+
+ va_list args;
+ va_start(args, SF);
+ eError = HTBLogKM(PID, TID, ui64TimeStamp, SF, args);
+ va_end(args);
+ return eError;
+}
static void
_OnTLReaderOpenCallback( void *pvArg )
{
IMG_UINT64 ui64Time;
OSClockMonotonicns64(&ui64Time);
- (void) HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+ (void) HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
g_sCtrl.ui32SyncMarker,
((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)),
((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
}
/* Dump the current configuration state */
- eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode);
+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode);
PVR_LOG_IF_ERROR(eError, "HTBLog");
- eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]);
+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]);
PVR_LOG_IF_ERROR(eError, "HTBLog");
- eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel);
+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel);
PVR_LOG_IF_ERROR(eError, "HTBLog");
- eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode);
+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode);
PVR_LOG_IF_ERROR(eError, "HTBLog");
for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
{
- eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]);
+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]);
PVR_LOG_IF_ERROR(eError, "HTBLog");
}
/* Else should never be hit as we set the spd when the power state is updated */
if (0 != g_sCtrl.ui32SyncMarker && 0 != g_sCtrl.ui32SyncCalcClkSpd)
{
- eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
g_sCtrl.ui32SyncMarker,
((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
return eError;
}
-/*************************************************************************/ /*!
-*/ /**************************************************************************/
-static IMG_BOOL
-_ValidPID( IMG_UINT32 PID )
-{
- IMG_UINT32 i;
-
- for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
- {
- if ( g_sCtrl.aui32EnablePID[i] == PID )
- {
- return IMG_TRUE;
- }
- }
- return IMG_FALSE;
-}
-
/*************************************************************************/ /*!
@Function HTBSyncPartitionMarker
/* Else should never be hit as we set the spd when the power state is updated */
if (0 != g_sCtrl.ui32SyncCalcClkSpd)
{
- eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
ui32Marker,
((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
/* Else should never be hit as we set the spd when the power state is updated */
if (0 != ui32ClkSpeed)
{
- eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
ui32Marker,
((IMG_UINT32)((ui64SyncOSTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncOSTS&0xffffffffU)),
((IMG_UINT32)((ui64SyncCRTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncCRTS&0xffffffffU)),
PVRSRV_ERROR eError;
IMG_UINT64 ui64Time;
OSClockMonotonicns64(&ui64Time);
- eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
+ eError = HTBLog(0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE,
g_sCtrl.ui32SyncMarker,
((IMG_UINT32)((ui64OSTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64OSTS&0xffffffff)),
((IMG_UINT32)((ui64CRTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64CRTS&0xffffffff)),
}
}
-
/*************************************************************************/ /*!
- @Function HTBLogKM
- @Description Record a Host Trace Buffer log event
-
- @Input PID The PID of the process the event is associated
- with. This is provided as an argument rather
- than querying internally so that events associated
- with a particular process, but performed by
- another can be logged correctly.
-
- @Input ui64TimeStamp The timestamp to be associated with this log event
-
+ @Function HTBLogSimple
+ @Description Record a Host Trace Buffer log event with implicit PID and
+ Timestamp
@Input SF The log event ID
-
@Input ... Log parameters
-
@Return PVRSRV_OK Success.
-
*/ /**************************************************************************/
-PVRSRV_ERROR
-HTBLogKM(
- IMG_UINT32 PID,
- IMG_UINT32 TID,
- IMG_UINT64 ui64TimeStamp,
- HTB_LOG_SFids SF,
- IMG_UINT32 ui32NumArgs,
- IMG_UINT32 * aui32Args
-)
+IMG_INTERNAL PVRSRV_ERROR
+HTBLogSimple(IMG_UINT32 SF, ...)
{
- OS_SPINLOCK_FLAGS uiSpinLockFlags;
- IMG_UINT32 ui32ReturnFlags = 0;
-
- /* Local snapshot variables of global counters */
- IMG_UINT64 ui64OSTSSnap;
- IMG_UINT64 ui64CRTSSnap;
- IMG_UINT32 ui32ClkSpeedSnap;
-
- /* format of messages is: SF:PID:TID:TIMEPT1:TIMEPT2:[PARn]*
- * Buffer is on the stack so we don't need a semaphore to guard it
- */
- IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS];
-
- /* Min HTB size is HTB_TL_BUFFER_SIZE_MIN : 10000 bytes and Max message/
- * packet size is 4*(HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS) = 80 bytes,
- * hence with these constraints this design is unlikely to get
- * PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED error
- */
- PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_ENABLED;
- IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT;
- IMG_UINT32 * pui32Message = aui32MessageBuffer;
- IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs);
-
- PVR_LOG_GOTO_IF_INVALID_PARAM(aui32Args != NULL, eError, ReturnError);
- PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs == HTB_SF_PARAMNUM(SF), eError, ReturnError);
- PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs <= HTB_LOG_MAX_PARAMS, eError, ReturnError);
-
- if ( g_hTLStream
- && ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) )
-/* && ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */
-/* && ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */
- )
- {
- *pui32Message++ = SF;
- *pui32Message++ = PID;
- *pui32Message++ = TID;
- *pui32Message++ = ((IMG_UINT32)((ui64TimeStamp>>32)&0xffffffff));
- *pui32Message++ = ((IMG_UINT32)(ui64TimeStamp&0xffffffff));
- while ( ui32NumArgs )
- {
- ui32NumArgs--;
- pui32Message[ui32NumArgs] = aui32Args[ui32NumArgs];
- }
-
- eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags );
- while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- )
- {
- OSReleaseThreadQuanta();
- eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags );
- }
-
- if ( PVRSRV_OK == eError )
- {
- g_sCtrl.bLogDropSignalled = IMG_FALSE;
- }
- else if ( PVRSRV_ERROR_STREAM_FULL != eError || !g_sCtrl.bLogDropSignalled )
- {
- PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__));
- }
- if ( PVRSRV_ERROR_STREAM_FULL == eError )
- {
- g_sCtrl.bLogDropSignalled = IMG_TRUE;
- }
-
- }
-
- if (SF == HTB_SF_CTRL_FWSYNC_MARK_SCALE)
- {
- OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
-
- /* If a marker is being placed reset byte count from last marker */
- g_sCtrl.ui32ByteCount = 0;
- g_sCtrl.ui64OSTS = (IMG_UINT64)aui32Args[HTB_ARG_OSTS_PT1] << 32 | aui32Args[HTB_ARG_OSTS_PT2];
- g_sCtrl.ui64CRTS = (IMG_UINT64)aui32Args[HTB_ARG_CRTS_PT1] << 32 | aui32Args[HTB_ARG_CRTS_PT2];
- g_sCtrl.ui32ClkSpeed = aui32Args[HTB_ARG_CLKSPD];
-
- OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
- }
- else
- {
- OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
- /* Increase global count */
- g_sCtrl.ui32ByteCount += ui32MessageSize;
-
- /* Check if packet has overwritten last marker/rpt &&
- If the packet count is over half the size of the buffer */
- if (ui32ReturnFlags & TL_FLAG_OVERWRITE_DETECTED &&
- g_sCtrl.ui32ByteCount > HTB_MARKER_PREDICTION_THRESHOLD(g_sCtrl.ui32BufferSize))
- {
- /* Take snapshot of global variables */
- ui64OSTSSnap = g_sCtrl.ui64OSTS;
- ui64CRTSSnap = g_sCtrl.ui64CRTS;
- ui32ClkSpeedSnap = g_sCtrl.ui32ClkSpeed;
- /* Reset global variable counter */
- g_sCtrl.ui32ByteCount = 0;
- OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
-
- /* Produce a repeat marker */
- HTBSyncPartitionMarkerRepeat(g_sCtrl.ui32SyncMarker, ui64OSTSSnap, ui64CRTSSnap, ui32ClkSpeedSnap);
- }
- else
- {
- OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags);
- }
- }
-
-ReturnError:
+ PVRSRV_ERROR eError;
+ IMG_UINT64 ui64TimeStamp;
+ va_list args;
+ va_start(args, SF);
+ OSClockMonotonicns64(&ui64TimeStamp);
+ eError = HTBLogKM(OSGetCurrentProcessID(), OSGetCurrentThreadID(), ui64TimeStamp,
+ SF, args);
+ va_end(args);
return eError;
}
PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData)
{
const PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE |
- PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
- PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT |
PVRSRV_MEMALLOCFLAG_CPU_UNCACHED |
PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL);
eError = OSLockCreate(&psData->hInfoPageLock);
PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0);
+ /* Because the memory is allocated read only we need to explicitly set it to
+ * 0. The reason for this is that if the memory is allocated with
+ * ZERO_ON_ALLOC a WRITEABLE attribute is implicitly added to the flags (see
+ * DevmemValidateParams()). */
+ OSCachedMemSet(psData->pui32InfoPage, 0, OSGetPageSize());
+
return PVRSRV_OK;
e0:
#include "pvr_notifier.h"
#include "pvrsrv.h"
-#include "htbuffer.h"
+#include "htbserver.h"
#include "pvr_ricommon.h"
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
#include "pdump_physmem.h"
#endif
+#include "physmem.h"
#if defined(SUPPORT_GPUVIRT_VALIDATION)
#include "physmem_lma.h"
+#if defined(PVRSRV_TEST_FW_PREMAP_MMU)
+#include "rgxdevice.h"
+#include "xt_mmu_fw_premap.h"
+#endif
#endif
/*
#define MMU_OBJ_DBG(x)
#endif
+#define DUMMY_PAGE "DUMMY_PAGE"
+#define DEV_ZERO_PAGE "DEV_ZERO_PAGE"
+#define PVR_DUMMY_PAGE_INIT_VALUE 0
+#define PVR_ZERO_PAGE_INIT_VALUE 0
+
/*!
* Refcounted structure that is shared between the context and
* the cleanup thread items.
DLLIST_NODE sMMUCtxCleanupItemsHead;
/*! Was the MMU context destroyed and should not be accessed any more? */
IMG_BOOL bMMUContextExists;
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(SUPPORT_CUSTOM_OSID_EMISSION)
/*! Associated OSid for this context */
IMG_UINT32 ui32OSid;
-#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+#endif /* defined(SUPPORT_CUSTOM_OSID_EMISSION) */
} MMU_CTX_CLEANUP_DATA;
/*! Temporary list of all deferred MMU_MEMORY_MAPPINGs. */
DLLIST_NODE sTmpMMUMappingHead;
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(SUPPORT_CUSTOM_OSID_EMISSION)
IMG_UINT32 ui32OSid;
IMG_UINT32 ui32OSidReg;
IMG_BOOL bOSidAxiProt;
IMG_UINT32 ui32PDumpContextIDRefCount;
#endif
- /*! MMU cache invalidation flags (only used on Volcanic driver) */
+ /*! MMU cache invalidation flags. For Rogue, only one set of flags are
+ * maintained (in the KernelMMUCtx) and used for all context page table
+ * updates, while in Volcanic each context maintains its own flags
+ * for the PTs it contains. This is due to the different memory hierarchy
+ * designs in use between the architectures, See SLC_VIVT feature. */
ATOMIC_T sCacheFlags;
/*! Lock to ensure exclusive access when manipulating the MMU context or
return PVRSRV_ERROR_RETRY;
}
psCleanup->psSync = psDevNode->psMMUCacheSyncPrim;
+ /* If we have no sync reference we treat this as a simple FREE.
+ * We cannot retry as there will never be a sync-prim to kick / invalidate.
+ */
+ if (psCleanup->psSync == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device %u has no MMUCacheSyncPrim",
+ __func__, psDevNode->sDevId.ui32InternalID));
+ bFreeNow = IMG_TRUE;
+ goto freeNow;
+ }
}
uiSyncCurrent = OSReadDeviceMem32(psCleanup->psSync->pui32LinAddr);
}
}
+freeNow:
/* Free if the invalidate operation completed or the operation itself timed out */
if (bFreeNow)
{
@Input uiFlags Flags that where passed in the allocation.
+@Input uBaseAlignment Alignment for the base returned, not used
+ in this context.
+
@Output puiBase The address of where to insert this import
@Output puiActualSize The actual size of the import
static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle,
RA_LENGTH_T uiSize,
RA_FLAGS_T uiFlags,
+ RA_LENGTH_T uBaseAlignment,
const IMG_CHAR *pszAnnotation,
RA_BASE_T *puiBase,
RA_LENGTH_T *puiActualSize,
PVR_UNREFERENCED_PARAMETER(pszAnnotation);
PVR_UNREFERENCED_PARAMETER(uiFlags);
+ PVR_UNREFERENCED_PARAMETER(uBaseAlignment);
PVR_ASSERT(psDevNode != NULL);
PVR_GOTO_IF_INVALID_PARAM(psDevNode, eError, e0);
PVR_GOTO_IF_NOMEM(psMapping, eError, e0);
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
- uiPid = psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ?
+ uiPid = psDevNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE ?
PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM();
#endif
if (eError != PVRSRV_OK)
{
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
- PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT,
+ PVRSRVStatsUpdateOOMStat(NULL, psDevNode, PVRSRV_DEVICE_STAT_TYPE_OOM_PHYSMEM_COUNT,
OSGetCurrentClientProcessIDKM());
#endif
goto e1;
case 8:
{
IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+ IMG_UINT64 ui64PxE64;
- pui64Px[uiIndex] = psDevPAddr->uiAddr /* Calculate the offset to that base */
+ ui64PxE64 = psDevPAddr->uiAddr /* Calculate the offset to that base */
>> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */
<< psConfig->uiAddrShift /* Shift back to fit address in the Px entry */
& psConfig->uiAddrMask; /* Delete unused higher bits */
- pui64Px[uiIndex] |= pfnDerivePxEProt8(uiProtFlags, uiLog2DataPageSize);
+ ui64PxE64 |= pfnDerivePxEProt8(uiProtFlags, uiLog2DataPageSize);
+
+ pui64Px[uiIndex] = ui64PxE64;
HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
uiIndex, eMMULevel,
- HTBLOG_U64_BITS_HIGH(pui64Px[uiIndex]), HTBLOG_U64_BITS_LOW(pui64Px[uiIndex]),
+ HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64),
(uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1);
break;
}
}
}
+/* dummy pages */
+
+static PVRSRV_ERROR _MMU_GetBackingPage(PVRSRV_DEVICE_NODE *psDevNode,
+ PVRSRV_DEF_PAGE *psDefPage,
+ IMG_INT uiInitValue,
+ IMG_CHAR *pcDefPageName,
+ IMG_BOOL bInitPage)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_DEV_PHYADDR sDevPAddr = {0};
+
+ OSLockAcquire(psDefPage->psPgLock);
+
+ if (psDefPage->ui64PgPhysAddr != MMU_BAD_PHYS_ADDR)
+ {
+ goto UnlockAndReturn;
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENT(psDevNode, "Alloc %s page object", pcDefPageName);
+#endif
+
+ /* Allocate the dummy page required for sparse backing */
+ eError = DevPhysMemAlloc(psDevNode,
+ (1 << psDefPage->ui32Log2PgSize),
+ 0,
+ uiInitValue,
+ bInitPage,
+#if defined(PDUMP)
+ psDevNode->psMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+ pcDefPageName,
+ &psDefPage->hPdumpPg,
+#endif
+ PVR_SYS_ALLOC_PID,
+ &psDefPage->sPageHandle,
+ &sDevPAddr);
+ PVR_GOTO_IF_ERROR(eError, UnlockAndReturn);
+
+ psDefPage->ui64PgPhysAddr = sDevPAddr.uiAddr;
+
+UnlockAndReturn:
+ OSLockRelease(psDefPage->psPgLock);
+
+ return eError;
+}
+
+static void _MMU_FreeBackingPage(PVRSRV_DEVICE_NODE *psDevNode,
+ PVRSRV_DEF_PAGE *psDefPage,
+ IMG_CHAR *pcDefPageName)
+{
+ OSLockAcquire(psDefPage->psPgLock);
+
+ if (psDefPage->ui64PgPhysAddr == MMU_BAD_PHYS_ADDR)
+ {
+ goto UnlockAndReturn;
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENT(psDevNode, "Free %s page object", pcDefPageName);
+#endif
+
+ DevPhysMemFree(psDevNode,
+#if defined(PDUMP)
+ psDefPage->hPdumpPg,
+#endif
+ &psDefPage->sPageHandle);
+
+#if defined(PDUMP)
+ psDefPage->hPdumpPg = NULL;
+#endif
+ psDefPage->ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
+
+UnlockAndReturn:
+ OSLockRelease(psDefPage->psPgLock);
+}
+
/*****************************************************************************
* Public interface functions *
*****************************************************************************/
+/*
+ MMU_InitDevice
+*/
+PVRSRV_ERROR MMU_InitDevice(struct _PVRSRV_DEVICE_NODE_ *psDevNode)
+{
+ PVRSRV_ERROR eError;
+
+ /* Set the order to 0 */
+ psDevNode->sDummyPage.sPageHandle.uiOrder = 0;
+ psDevNode->sDevZeroPage.sPageHandle.uiOrder = 0;
+
+ /* Set the size of the Dummy and Zero pages to largest page size */
+ if (psDevNode->ui32RGXLog2Non4KPgSize != 0)
+ {
+ psDevNode->sDummyPage.ui32Log2PgSize = psDevNode->ui32RGXLog2Non4KPgSize;
+ psDevNode->sDevZeroPage.ui32Log2PgSize = psDevNode->ui32RGXLog2Non4KPgSize;
+ }
+ else
+ {
+ psDevNode->sDummyPage.ui32Log2PgSize = OSGetPageSize();
+ psDevNode->sDevZeroPage.ui32Log2PgSize = OSGetPageSize();
+ }
+
+ /* Set the Dummy page phys addr */
+ psDevNode->sDummyPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
+
+ /* Set the Zero page phys addr */
+ psDevNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
+
+ /* The lock can be acquired from MISR (Z-buffer) path */
+ eError = OSLockCreate(&psDevNode->sDummyPage.psPgLock);
+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate.Dummy", ErrReturnError);
+
+ /* Create the lock for zero page */
+ eError = OSLockCreate(&psDevNode->sDevZeroPage.psPgLock);
+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate.Zero", ErrFreeDummyPageLock);
+
+#ifdef PDUMP
+ psDevNode->sDummyPage.hPdumpPg = NULL;
+ psDevNode->sDevZeroPage.hPdumpPg = NULL;
+
+ eError = _MMU_GetBackingPage(psDevNode,
+ &psDevNode->sDummyPage,
+ PVR_DUMMY_PAGE_INIT_VALUE,
+ DUMMY_PAGE,
+ IMG_TRUE);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage.Dummy", ErrFreeZeroPageLock);
+
+ eError = _MMU_GetBackingPage(psDevNode,
+ &psDevNode->sDevZeroPage,
+ PVR_ZERO_PAGE_INIT_VALUE,
+ DEV_ZERO_PAGE,
+ IMG_TRUE);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage.Zero", ErrFreeDummyPage);
+#endif /* PDUMP */
+
+ return PVRSRV_OK;
+
+#ifdef PDUMP
+ErrFreeDummyPage:
+ _MMU_FreeBackingPage(psDevNode, &psDevNode->sDummyPage, DUMMY_PAGE);
+ErrFreeZeroPageLock:
+ OSLockDestroy(psDevNode->sDevZeroPage.psPgLock);
+ psDevNode->sDevZeroPage.psPgLock = NULL;
+#endif /* PDUMP */
+ErrFreeDummyPageLock:
+ OSLockDestroy(psDevNode->sDummyPage.psPgLock);
+ psDevNode->sDummyPage.psPgLock = NULL;
+ErrReturnError:
+ return eError;
+}
+
+/*
+ MMU_DeInitDevice
+*/
+void MMU_DeInitDevice(struct _PVRSRV_DEVICE_NODE_ *psDevNode)
+{
+ if (psDevNode->sDummyPage.psPgLock != NULL)
+ {
+ _MMU_FreeBackingPage(psDevNode, &psDevNode->sDummyPage, DUMMY_PAGE);
+
+ OSLockDestroy(psDevNode->sDummyPage.psPgLock);
+ psDevNode->sDummyPage.psPgLock = NULL;
+ }
+
+ if (psDevNode->sDevZeroPage.psPgLock)
+ {
+ _MMU_FreeBackingPage(psDevNode, &psDevNode->sDevZeroPage, DEV_ZERO_PAGE);
+
+
+ OSLockDestroy(psDevNode->sDevZeroPage.psPgLock);
+ psDevNode->sDevZeroPage.psPgLock = NULL;
+ }
+}
+
/*
MMU_ContextCreate
*/
psPhysMemCtx->psDevNode = psDevNode; /* Needed for Direct Bridge case */
psPhysMemCtx->psMMUContext = psMMUContext; /* Back-link to self */
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(SUPPORT_CUSTOM_OSID_EMISSION)
/* Save the app-specific values for external reference via MMU_GetOSids. */
if (psConnection != NULL)
{
psPhysMemCtx->psCleanupData = OSAllocMem(sizeof(*(psPhysMemCtx->psCleanupData)));
PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->psCleanupData, eError, e4);
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(SUPPORT_CUSTOM_OSID_EMISSION)
/* Record the originating OSid for all allocation / free for this context */
psPhysMemCtx->psCleanupData->ui32OSid = psPhysMemCtx->ui32OSid;
-#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+#endif /* defined(SUPPORT_CUSTOM_OSID_EMISSION) */
OSLockCreate(&psPhysMemCtx->psCleanupData->hCleanupLock);
psPhysMemCtx->psCleanupData->bMMUContextExists = IMG_TRUE;
dllist_init(&psPhysMemCtx->psCleanupData->sMMUCtxCleanupItemsHead);
sDevVAddrEnd.uiAddr += uSize;
OSLockAcquire(psMMUContext->hLock);
- eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize);
+#if defined(PVRSRV_TEST_FW_PREMAP_MMU) && defined(SUPPORT_GPUVIRT_VALIDATION)
+ /*
+ * This is to divert generation of firmware pre-mapped page tables to a standalone mmu driver.
+ * Only the kernel created (direct bridge) firmware memory context will have a NULL connection
+ * and all other application memory context get a valid connection object.
+ */
+ if (psMMUContext->psConnection == NULL)
+ {
+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice;
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Standalone MMU driver Log2 Page size set to %d",
+ __func__, PVRSRVMMU4GetPageSizeFromVirtAddr(*psDevVAddr,
+ &psDevInfo->aui64MMUPageSizeRangeValue[0],
+ ARRAY_SIZE(psDevInfo->aui64MMUPageSizeRangeValue))));
+ eError = PVRSRVConfigureMMU((MMUx_CONTEXT *)psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize);
+ }
+ else
+#endif
+ {
+ eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize);
+ }
OSLockRelease(psMMUContext->hLock);
if (eError != PVRSRV_OK)
IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0;
IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0, uiDefProtFlags=0;
- IMG_UINT64 uiDummyProtFlags = 0;
MMU_PROTFLAGS_T uiMMUProtFlags = 0;
const MMU_PxE_CONFIG *psConfig;
#endif /*PDUMP*/
/* Validate the most essential parameters */
- PVR_LOG_GOTO_IF_INVALID_PARAM(psMMUContext, eError, e0);
- PVR_LOG_GOTO_IF_INVALID_PARAM(psPMR, eError, e0);
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psMMUContext != NULL, "psMMUContext");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psPMR != NULL, "psPMR");
psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
if (ui32MapPageCount > PMR_MAX_TRANSLATION_STACK_ALLOC)
{
psDevPAddr = OSAllocMem(ui32MapPageCount * sizeof(IMG_DEV_PHYADDR));
- PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, e0);
+ PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, ErrReturnError);
pbValid = OSAllocMem(ui32MapPageCount * sizeof(IMG_BOOL));
- if (pbValid == NULL)
- {
- /* Should allocation fail, clean-up here before exit */
- OSFreeMem(psDevPAddr);
- PVR_LOG_GOTO_WITH_ERROR("pbValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0);
- }
+ PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrFreePAddrMappingArray);
}
else
{
((IMG_DEVMEM_OFFSET_T) ui32PhysPgOffset << uiLog2HeapPageSize),
psDevPAddr,
pbValid);
- PVR_GOTO_IF_ERROR(eError, e1);
+ PVR_GOTO_IF_ERROR(eError, ErrFreeValidArray);
}
/*Get the Page table level configuration */
uiMappingFlags,
&uiMMUProtFlags,
psMMUContext);
- PVR_GOTO_IF_ERROR(eError, e2);
+ PVR_GOTO_IF_ERROR(eError, ErrPutPTConfig);
/* Callback to get device specific protection flags */
if (psConfig->uiBytesPerEntry == 8)
}
else
{
- PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_INVALID_PARAMS, e2);
+ PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_INVALID_PARAMS, ErrPutPTConfig);
}
- uiDummyProtFlags = uiProtFlags;
if (PMR_IsSparse(psPMR))
{
uiMappingFlags & ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT,
&uiMMUProtFlags,
psMMUContext);
- PVR_GOTO_IF_ERROR(eError, e2);
+ PVR_GOTO_IF_ERROR(eError, ErrPutPTConfig);
- /* Callback to get device specific protection flags */
- if (psConfig->uiBytesPerEntry == 8)
- {
- uiDummyProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize);
- }
- else
- {
- /* We've already validated possible values of uiBytesPerEntry at the start of this function */
- PVR_ASSERT(psConfig->uiBytesPerEntry == 4);
- uiDummyProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
- }
+ /* We've already validated possible values of uiBytesPerEntry at the start of this function */
+ PVR_ASSERT(psConfig->uiBytesPerEntry == 4 || psConfig->uiBytesPerEntry == 8);
}
}
uiPgOffset * uiPageSize,
&sDevPAddr,
&bValid);
- PVR_GOTO_IF_ERROR(eError, e3);
+ PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages);
}
else
{
{
if (bZeroBacking)
{
+ eError = _MMU_GetBackingPage(psDevNode,
+ &psDevNode->sDevZeroPage,
+ PVR_ZERO_PAGE_INIT_VALUE,
+ DEV_ZERO_PAGE,
+ IMG_TRUE);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage",
+ ErrUnlockAndUnmapPages);
+
sDevPAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr;
/* Ensure the zero back page PTE is read only */
uiDefProtFlags = uiProtFlagsReadOnly;
}
else
{
+ eError = _MMU_GetBackingPage(psDevNode,
+ &psDevNode->sDummyPage,
+ PVR_DUMMY_PAGE_INIT_VALUE,
+ DUMMY_PAGE,
+ IMG_TRUE);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_MMU_GetBackingPage",
+ ErrUnlockAndUnmapPages);
+
sDevPAddr.uiAddr = psDevNode->sDummyPage.ui64PgPhysAddr;
}
}
PVR_ASSERT(ui32BitLength <= i32FeatureVal);
eError = PVRSRV_ERROR_INVALID_PARAMS;
- goto e3;
+ goto ErrUnlockAndUnmapPages;
}
} while (0);
}
&psPrevLevel->sMemDesc.psMapping->sMemHandle,
uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset,
(uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
- PVR_GOTO_IF_ERROR(eError, e3);
+ PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages);
}
uiFlushStart = uiPTEIndex;
(bValid)?uiSymbolicAddrOffset:0,
#endif /*PDUMP*/
uiDefProtFlags);
- PVR_LOG_GOTO_IF_ERROR(eError, "_SetupPTE", e3);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_SetupPTE", ErrUnlockAndUnmapPages);
if (bValid)
{
&psLevel->sMemDesc.psMapping->sMemHandle,
uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
(uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
- PVR_GOTO_IF_ERROR(eError, e3);
+ PVR_GOTO_IF_ERROR(eError, ErrUnlockAndUnmapPages);
}
OSLockRelease(psMMUContext->hLock);
return PVRSRV_OK;
-e3:
+ErrUnlockAndUnmapPages:
OSLockRelease(psMMUContext->hLock);
if (PMR_IsSparse(psPMR) && PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags))
uiLoop,
paui32MapIndices,
uiLog2HeapPageSize,
- PMR_IsSparse(psPMR));
-e2:
+ uiMappingFlags);
+ErrPutPTConfig:
_MMU_PutPTConfig(psMMUContext, hPriv);
-e1:
+ErrFreeValidArray:
if (psDevPAddr != asDevPAddr)
{
OSFreeMem(pbValid);
+ }
+ErrFreePAddrMappingArray:
+ if (psDevPAddr != asDevPAddr)
+ {
OSFreeMem(psDevPAddr);
}
-e0:
+ErrReturnError:
return eError;
}
return;
}
-/*
- MMU_ChangeValidity
- */
-PVRSRV_ERROR
-MMU_ChangeValidity(MMU_CONTEXT *psMMUContext,
- IMG_DEV_VIRTADDR sDevVAddr,
- IMG_DEVMEM_SIZE_T uiNumPages,
- IMG_UINT32 uiLog2PageSize,
- IMG_BOOL bMakeValid,
- PMR *psPMR)
-{
- PVRSRV_ERROR eError = PVRSRV_OK;
-
- IMG_HANDLE hPriv;
- const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
- const MMU_PxE_CONFIG *psConfig;
- MMU_Levelx_INFO *psLevel = NULL;
- IMG_UINT32 uiFlushStart = 0;
- IMG_UINT32 uiPTIndex = 0;
- IMG_UINT32 i;
- IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
- IMG_BOOL bValid;
-
- PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode;
-
-#if defined(PDUMP)
- IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
- IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
- IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
- IMG_DEVMEM_OFFSET_T uiNextSymName;
-
- PDUMPCOMMENT(psDevNode,
- "Change valid bit of the data pages to %d (0x%"IMG_UINT64_FMTSPECX" - 0x%"IMG_UINT64_FMTSPECX")",
- bMakeValid,
- sDevVAddr.uiAddr,
- sDevVAddr.uiAddr + (uiNumPages<<uiLog2PageSize) - 1);
-#endif /*PDUMP*/
-
- /* We should verify the size and contiguity when supporting variable page size */
- PVR_ASSERT (psMMUContext != NULL);
- PVR_ASSERT (psPMR != NULL);
-
- /* Get general PT and address configs */
- _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
- &psConfig, &hPriv, &psDevVAddrConfig);
-
- _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
- &psLevel, &uiPTIndex);
- uiFlushStart = uiPTIndex;
-
- /* Do a page table walk and change attribute for every page in range. */
- for (i=0; i < uiNumPages;)
- {
- /* Set the entry */
- if (bMakeValid)
- {
- /* Only set valid if physical address exists (sparse allocs might have none)*/
- eError = PMR_IsOffsetValid(psPMR, uiLog2PageSize, 1, (IMG_DEVMEM_OFFSET_T) i << uiLog2PageSize, &bValid);
- PVR_LOG_GOTO_IF_ERROR(eError, "PMR_IsOffsetValid", e_exit);
-
- if (bValid)
- {
- if (psConfig->uiBytesPerEntry == 8)
- {
- ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask);
- }
- else if (psConfig->uiBytesPerEntry == 4)
- {
- ((IMG_UINT32 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask);
- }
- else
- {
- PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, e_exit);
- }
- }
- }
- else
- {
- if (psConfig->uiBytesPerEntry == 8)
- {
- ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask);
- }
- else if (psConfig->uiBytesPerEntry == 4)
- {
- ((IMG_UINT32 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask);
- }
- else
- {
- PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, e_exit);
- }
- }
-
-#if defined(PDUMP)
-
- PMR_PDumpSymbolicAddr(psPMR, i<<uiLog2PageSize,
- sizeof(aszMemspaceName), &aszMemspaceName[0],
- sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
- &uiSymbolicAddrOffset,
- &uiNextSymName);
-
- PDumpMMUDumpPxEntries(psDevNode,
- MMU_LEVEL_1,
- psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
- psLevel->sMemDesc.pvCpuVAddr,
- psLevel->sMemDesc.sDevPAddr,
- uiPTIndex,
- 1,
- aszMemspaceName,
- aszSymbolicAddress,
- uiSymbolicAddrOffset,
- psConfig->uiBytesPerEntry,
- psConfig->uiAddrLog2Align,
- psConfig->uiAddrShift,
- psConfig->uiAddrMask,
- psConfig->uiProtMask,
- psConfig->uiValidEnMask,
- 0,
- psMMUContext->psDevAttrs->eMMUType);
-#endif /*PDUMP*/
-
- sDevVAddr.uiAddr += uiPageSize;
- i++;
-
- /* Calculate PT index and get new table descriptor */
- if (uiPTIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (i != uiNumPages))
- {
- uiPTIndex++;
- }
- else
- {
-
- eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap,
- &psLevel->sMemDesc.psMapping->sMemHandle,
- uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
- (uiPTIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
- PVR_GOTO_IF_ERROR(eError, e_exit);
-
- _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
- &psLevel, &uiPTIndex);
- uiFlushStart = uiPTIndex;
- }
- }
-
-e_exit:
-
- _MMU_PutPTConfig(psMMUContext, hPriv);
-
- /* Flush TLB for PTs*/
- psDevNode->pfnMMUCacheInvalidate(psDevNode,
- psMMUContext,
- MMU_LEVEL_1,
- !bMakeValid);
-
- PVR_ASSERT(eError == PVRSRV_OK);
- return eError;
-}
-
-
/*
MMU_AcquireBaseAddr
*/
return (IMG_UINT32)OSAtomicExchange(&psMMUContext->sCacheFlags, (IMG_INT)ui32NewCacheFlags);
}
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(SUPPORT_CUSTOM_OSID_EMISSION)
/*
MMU_GetOSids
*/
return PVRSRV_OK;
}
-/**************************************************************************
- * Function Name : PDumpMalloc
- * Inputs :
- * Outputs :
- * Returns : PVRSRV_ERROR
- * Description :
- **************************************************************************/
+/*************************************************************************/ /*!
+@Function PDumpMalloc
+@Description Builds and writes an allocation command to pdump output. Whilst
+ writing the thread is locked.
+
+@Input psDeviceNode A pointer to a device node.
+@Input pszDevSpace Device space string.
+@Input pszSymbolicAddress Name of the allocation.
+@Input ui64Size String size.
+@Input uiAlign Command alignment.
+@Input bInitialise Should the command initialise the allocation.
+@Input ui8InitValue The value memory is initialised to.
+@Input phHandlePtr PDump allocation handle.
+@Input ui32PDumpFlags PDump allocation flags.
+
+@Return This function returns a PVRSRV_ERROR. PVRSRV_OK on success.
+*/ /**************************************************************************/
PVRSRV_ERROR PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode,
const IMG_CHAR *pszDevSpace,
const IMG_CHAR *pszSymbolicAddress,
IMG_UINT64 ui64Size,
IMG_DEVMEM_ALIGN_T uiAlign,
IMG_BOOL bInitialise,
- IMG_UINT32 ui32InitValue,
+ IMG_UINT8 ui8InitValue,
IMG_HANDLE *phHandlePtr,
IMG_UINT32 ui32PDumpFlags)
{
PVRSRV_ERROR eError = PVRSRV_OK;
+ PDUMP_LOCK(ui32PDumpFlags);
+ eError = PDumpMallocUnlocked(psDeviceNode,
+ pszDevSpace,
+ pszSymbolicAddress,
+ ui64Size,
+ uiAlign,
+ bInitialise,
+ ui8InitValue,
+ phHandlePtr,
+ ui32PDumpFlags);
+ PDUMP_UNLOCK(ui32PDumpFlags);
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function PDumpMallocUnlocked
+@Description Builds and writes an allocation command to pdump output. Whilst
+ writing the thread remains unlocked.
+
+@Input psDeviceNode A pointer to a device node.
+@Input pszDevSpace Device space string.
+@Input pszSymbolicAddress Name of the allocation.
+@Input ui64Size String size.
+@Input uiAlign Command alignment.
+@Input bInitialise Should the command initialise the allocation.
+@Input ui8InitValue The value memory is initialised to.
+@Input phHandlePtr PDump allocation handle.
+@Input ui32PDumpFlags PDump allocation flags.
+
+@Return This function returns a PVRSRV_ERROR. PVRSRV_OK on success.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpMallocUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode,
+ const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_UINT64 ui64Size,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_BOOL bInitialise,
+ IMG_UINT8 ui8InitValue,
+ IMG_HANDLE *phHandlePtr,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
PDUMP_GET_SCRIPT_STRING()
psPDumpAllocationInfo->aszSymbolicAddress,
ui64Size,
uiAlign,
- ui32InitValue);
+ ui8InitValue);
}
else
{
goto _return;
}
- PDUMP_LOCK(ui32PDumpFlags);
PDumpWriteScript(psDeviceNode, hScript, ui32PDumpFlags);
- PDUMP_UNLOCK(ui32PDumpFlags);
psPDumpAllocationInfo->ui64Size = ui64Size;
psPDumpAllocationInfo->ui32Align = TRUNCATE_64BITS_TO_32BITS(uiAlign);
return eError;
}
+/*************************************************************************/ /*!
+@Function PDumpFree
+@Description Writes a FREE command for an allocation handle to the pdump out2
+ stream. When writing to the output stream the thread is locked.
-/**************************************************************************
- * Function Name : PDumpFree
- * Inputs :
- * Outputs :
- * Returns : PVRSRV_ERROR
- * Description :
- **************************************************************************/
+@Input psDeviceNode A pointer to a device node.
+@Input hPDumpAllocationInfoHandle A PDump allocation handle.
+
+@Return This function returns a PVRSRV_ERROR. PVRSRV_OK on success.
+*/ /**************************************************************************/
PVRSRV_ERROR PDumpFree(PVRSRV_DEVICE_NODE *psDeviceNode,
IMG_HANDLE hPDumpAllocationInfoHandle)
{
PVRSRV_ERROR eError = PVRSRV_OK;
- IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA;
+ PDUMP_LOCK(PDUMP_FLAGS_NONE);
+ eError = PDumpFreeUnlocked(psDeviceNode, hPDumpAllocationInfoHandle);
+ PDUMP_UNLOCK(PDUMP_FLAGS_NONE);
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function PDumpFreeUnlocked
+@Description Writes a FREE command for an allocation handle to the pdump
+ out2 stream. When writing to the output stream the thread
+ remains unlocked.
+
+@Input psDeviceNode A pointer to a device node.
+@Input hPDumpAllocationInfoHandle A PDump allocation handle.
+
+@Return This function returns a PVRSRV_ERROR. PVRSRV_OK on success.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpFreeUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA;
PDUMP_GET_SCRIPT_STRING()
psPDumpAllocationInfo->aszSymbolicAddress);
PVR_GOTO_IF_ERROR(eError, _return);
- PDUMP_LOCK(ui32Flags);
PDumpWriteScript(psDeviceNode, hScript, ui32Flags);
OSFreeMem(psPDumpAllocationInfo);
- PDUMP_UNLOCK(ui32Flags);
_return:
PDUMP_RELEASE_SCRIPT_STRING();
IMG_CHAR szZeroPageFilename[PDUMP_PARAM_MAX_FILE_NAME]; /*< PRM file name where the zero page was pdumped */
} PDUMP_PARAMETERS;
+/* PDump global connection count - used to determine when/if the last
+ * connection (from a PDump generating app) has been closed.
+ * This is used to key the AUTO_TERMINATED behaviour if enabled.
+ */
+static ATOMIC_T gPDumpNumConnex;
+
/* PDump lock to keep pdump write atomic.
* Which will protect g_PDumpScript & g_PDumpParameters pdump
* specific shared variable.
#define FLAG_IS_DRIVER_IN_INIT_PHASE 0x1 /*! Control flag that keeps track of State of driver initialisation phase */
#define FLAG_IS_IN_CAPTURE_RANGE 0x2 /*! Control flag that keeps track of Current capture status, is current frame in range */
#define FLAG_IS_IN_CAPTURE_INTERVAL 0x4 /*! Control flag that keeps track of Current capture status, is current frame in an interval where no capture takes place. */
+#define FLAG_IS_AUTO_TERMINATED 0x8 /*! Control flag that indicates app has auto-terminated. */
#define CHECK_PDUMP_CONTROL_FLAG(PDUMP_CONTROL_FLAG) BITMASK_HAS(g_PDumpCtrl.ui32Flags, PDUMP_CONTROL_FLAG)
#define SET_PDUMP_CONTROL_FLAG(PDUMP_CONTROL_FLAG) BITMASK_SET(g_PDumpCtrl.ui32Flags, PDUMP_CONTROL_FLAG)
POS_LOCK hLock; /*!< Exclusive lock to this structure */
IMG_PID InPowerTransitionPID;/*!< pid of thread requesting power transition */
+ IMG_UINT32 ui32TimeoutFrequency;/*!< Timer frequency for checking process existence */
} PDUMP_CTRL_STATE;
static PDUMP_CTRL_STATE g_PDumpCtrl =
},
NULL,
+ 0,
0
};
+static IMG_HANDLE g_PDumpTimerID;
+
static void PDumpAssertWriteLockHeld(void);
#if defined(PDUMP_SPLIT_64BIT_REGISTER_ACCESS)
return g_PDumpCtrl.eServiceState;
}
+PVRSRV_ERROR PDumpValidateUMFlags(PDUMP_FLAGS_T uiFlags)
+{
+ /* If these flags are or'd together, they are invalid */
+ return ((uiFlags & (~(PDUMP_BLKDATA | PDUMP_CONT))) != 0) ? PVRSRV_ERROR_INVALID_PARAMS : PVRSRV_OK;
+}
+
PVRSRV_ERROR PDumpReady(void)
{
switch (PDumpCtrlGetModuleState())
#endif
}
-static void PDumpCtrlSetDefaultCaptureParams(IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval)
+static void PDumpCtrlSetDefaultCaptureParams(IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval, IMG_UINT32 ui32AutoTermTimeout)
{
/* Set the capture range to that supplied by the PDump client tool
*/
g_PDumpCtrl.sCaptureRange.ui32End = ui32End;
g_PDumpCtrl.sCaptureRange.ui32Interval = ui32Interval;
+ /* Disable / Enable AUTO_TERMINATE behaviour if AutoTermTimeout is set */
+ if (ui32AutoTermTimeout == 0U)
+ {
+ if (g_PDumpTimerID != NULL)
+ {
+ PVRSRV_ERROR eError;
+
+ eError = OSDisableTimer(g_PDumpTimerID);
+ PVR_LOG_IF_ERROR(eError, "OSDisableTimer");
+
+ /* Now destroy it too */
+ eError = OSRemoveTimer(g_PDumpTimerID);
+ PVR_LOG_IF_ERROR(eError, "OSRemoveTimer");
+
+ g_PDumpTimerID = NULL;
+ }
+ g_PDumpCtrl.ui32TimeoutFrequency = 0U;
+ UNSET_PDUMP_CONTROL_FLAG(FLAG_IS_AUTO_TERMINATED);
+ }
+
/* Set pdump block mode ctrl variables */
g_PDumpCtrl.sBlockCtrl.ui32BlockLength = (ui32Mode == PDUMP_CAPMODE_BLOCKED)? ui32Interval : 0; /* ui32Interval is interpreted as block length */
g_PDumpCtrl.sBlockCtrl.ui32CurrentBlock = PDUMP_BLOCKNUM_INVALID;
*ui64State |= PDUMP_STATE_SUSPENDED;
}
+ if (CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_AUTO_TERMINATED))
+ {
+ *ui64State |= PDUMP_STATE_APP_TERMINATED;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PDumpSetAutoTerminate(IMG_UINT32 ui32TimeoutFrequency)
+{
+ PVRSRV_ERROR eError;
+ IMG_BOOL bEnable = (ui32TimeoutFrequency != 0U) ? IMG_TRUE : IMG_FALSE;
+
+ eError = PDumpReady();
+ PVR_LOG_RETURN_IF_ERROR(eError, "PDumpReady");
+
+ g_PDumpCtrl.ui32TimeoutFrequency = ui32TimeoutFrequency;
+
+ if (bEnable)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: ENABLING Auto Termination - Timeout %u",
+ __func__, ui32TimeoutFrequency));
+ PDUMP_REFCOUNT_PRINT(("%s: gPDumpNumConnex (%p) = %d", __func__,
+ &gPDumpNumConnex, OSAtomicRead(&gPDumpNumConnex)));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: DISABLING Auto Termination",
+ __func__));
+ PDUMP_REFCOUNT_PRINT(("%s: gPDumpNumConnex (%p) = %d", __func__,
+ &gPDumpNumConnex,
+ OSAtomicRead(&gPDumpNumConnex)));
+ }
+
return PVRSRV_OK;
}
if (psDeviceNode)
{
+ if (psDeviceNode->eDevState < PVRSRV_DEVICE_STATE_CREATED)
+ {
+ PVR_DPF((PVR_DBG_FATAL,"%s: PDump output requested for Device %d "
+ "before device created. Not permitted - please fix driver.",
+ __func__, psDeviceNode->sDevId.ui32InternalID));
+ return IMG_FALSE;
+ }
if ((psDeviceNode->sDevId.ui32InternalID > PVRSRV_MAX_DEVICES) ||
((psPVRSRVData->ui32PDumpBoundDevice < PVRSRV_MAX_DEVICES) &&
(psDeviceNode->sDevId.ui32InternalID != psPVRSRVData->ui32PDumpBoundDevice)))
IMG_UINT32 ui32Off = 0;
IMG_BYTE *pbyDataBuffer;
IMG_UINT32 ui32BytesAvailable = 0;
- static IMG_UINT32 ui32TotalBytesWritten;
PVRSRV_ERROR eError;
IMG_UINT32 uiRetries = 0;
if (eError == PVRSRV_OK)
{
- ui32TotalBytesWritten += ui32BytesToBeWritten;
-
PVR_ASSERT(pbyDataBuffer != NULL);
OSDeviceMemCopy((void*)pbyDataBuffer, pui8Data + ui32Off, ui32BytesToBeWritten);
IMG_UINT32 ui32Start,
IMG_UINT32 ui32End,
IMG_UINT32 ui32Interval,
- IMG_UINT32 ui32MaxParamFileSize)
+ IMG_UINT32 ui32MaxParamFileSize,
+ IMG_UINT32 ui32AutoTermTimeout)
{
PVRSRV_ERROR eError;
PDumping app may be reading the state data for some checks
*/
PDumpCtrlLockAcquire();
- PDumpCtrlSetDefaultCaptureParams(ui32Mode, ui32Start, ui32End, ui32Interval);
+ PDumpCtrlSetDefaultCaptureParams(ui32Mode, ui32Start, ui32End, ui32Interval, ui32AutoTermTimeout);
+ PDumpSetAutoTerminate(ui32AutoTermTimeout);
PDumpCtrlLockRelease();
if (ui32MaxParamFileSize == 0)
* The render data may be corrupted, so write out the raw
* image buffer to avoid errors in the post-processing tools.
*/
- bRawImageData |= (psDevInfo->ui32ValidationFlags & RGX_VAL_SIG_CHECK_ERR_EN);
+ bRawImageData |= (psDevInfo->ui32ValidationFlags & RGX_VAL_FBDC_SIG_CHECK_ERR_EN);
}
#endif
* Will set module state back to READY.
*/
eErr = PDumpSetDefaultCaptureParamsKM(NULL, psDeviceNode, PDUMP_CAPMODE_UNSET,
- PDUMP_FRAME_UNSET, PDUMP_FRAME_UNSET, 0, 0);
+ PDUMP_FRAME_UNSET, PDUMP_FRAME_UNSET, 0, 0, 0);
PVR_LOG_IF_ERROR(eErr, "PDumpSetDefaultCaptureParamsKM");
}
}
}
#endif /* defined(PDUMP_TRACE_STATE) || defined(PVR_TESTING_UTILS) */
+static void PDumpStartTimer(PVRSRV_DEVICE_NODE *psDeviceNode);
-PVRSRV_ERROR PDumpRegisterConnection(void *hSyncPrivData,
+PVRSRV_ERROR PDumpRegisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode,
+ void *hSyncPrivData,
PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks,
PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
{
*ppsPDumpConnectionData = psPDumpConnectionData;
+ if (PDumpIsDevicePermitted(psDeviceNode))
+ {
+ IMG_INT iRefCount;
+
+ /* Add this new reference to the global count of active connections */
+ iRefCount = OSAtomicIncrement(&gPDumpNumConnex);
+ PDUMP_REFCOUNT_PRINT("%s: gPDumpNumConnex (%p) = %d", __func__,
+ &gPDumpNumConnex, iRefCount);
+
+ if (((iRefCount > 1) &&
+ (g_PDumpCtrl.ui32TimeoutFrequency != 0U)) &&
+ (g_PDumpTimerID == NULL))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Starting Timeout Chain now, refcnt = %d",
+ __func__, iRefCount));
+ PDumpStartTimer(psDeviceNode);
+ }
+ }
+
return PVRSRV_OK;
fail_lockcreate:
return eError;
}
-void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+void PDumpUnregisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PDUMP_CONNECTION_DATA *psPDumpConnectionData)
{
+ IMG_INT iRefCount;
+
_PDumpConnectionRelease(psPDumpConnectionData);
+ if (PDumpIsDevicePermitted(psDeviceNode))
+ {
+ /* Remove this connection from the global count */
+ iRefCount = OSAtomicDecrement(&gPDumpNumConnex);
+ PDUMP_REFCOUNT_PRINT("%s: gPDumpNumConnex (%p) = %d", __func__,
+ &gPDumpNumConnex, iRefCount);
+ PVR_ASSERT(iRefCount >= 0);
+ }
+ PVR_UNREFERENCED_PARAMETER(iRefCount);
}
return PVRSRV_OK;
}
+void PDumpTimerCB(void *pvData);
+void PDumpTimerCB(void *pvData)
+{
+ IMG_INT iRefCount;
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ if (CHECK_PDUMP_CONTROL_FLAG(FLAG_IS_AUTO_TERMINATED))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Already flagged as TERMINATED",
+ __func__));
+ }
+
+ /* Simply check to see if the global connection count indicates that all
+ * subsequent applications requiring PDump logging have terminated.
+ * In a quiescent state we will have a singleton pdump utility still
+ * connected.
+ */
+ if ((iRefCount = OSAtomicRead(&gPDumpNumConnex)) == 1)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: No connections active (%d), flagging as AUTO_TERMINATED",
+ __func__, iRefCount));
+ SET_PDUMP_CONTROL_FLAG(FLAG_IS_AUTO_TERMINATED);
+ }
+}
+
+/*
+ * FunctionName PDumpStartTimer
+ * Description Start an OSTimer chain running to scan for active connections
+ * being present in the connections associated with the given
+ * psDeviceNode. Only started if we have AutoTerminate flagged
+ * in the internal PDump state.
+ * Inputs psDeviceNode associated device node to scan for connections
+ * Returns nothing
+ */
+static void PDumpStartTimer(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+ if (!PDumpIsDevicePermitted(psDeviceNode))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: DeviceID %u not valid", __func__,
+ psDeviceNode->sDevId.ui32InternalID));
+ return;
+ }
+
+ if (g_PDumpCtrl.ui32TimeoutFrequency == 0U)
+ {
+ return;
+ }
+
+ if (g_PDumpTimerID != NULL)
+ {
+ eError = OSDisableTimer(g_PDumpTimerID);
+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSDisableTimer");
+ eError = OSRemoveTimer(g_PDumpTimerID);
+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSRemoveTimer");
+ }
+
+ g_PDumpTimerID = OSAddTimer(PDumpTimerCB, NULL, g_PDumpCtrl.ui32TimeoutFrequency * 1000U);
+
+ if (g_PDumpTimerID != NULL)
+ {
+ eError = OSEnableTimer(g_PDumpTimerID);
+
+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEnableTimer");
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Timer %p now active.", __func__,
+ g_PDumpTimerID));
+ }
+}
+
#endif /* defined(PDUMP) */
#include "physmem_hostmem.h"
#include "physmem_lma.h"
#include "physmem_osmem.h"
+#include "debug_common.h"
struct _PHYS_HEAP_
{
/*! The type of this heap */
PHYS_HEAP_TYPE eType;
+
+ /*! The allocation policy for this heap */
+ PHYS_HEAP_POLICY uiPolicy;
+
/* Config flags */
PHYS_HEAP_USAGE_FLAGS ui32UsageFlags;
struct _PHYS_HEAP_ *psNext;
};
-static PHYS_HEAP *g_psPhysHeapList;
-static POS_LOCK g_hPhysHeapLock;
-
#if defined(REFCOUNT_DEBUG)
#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) \
PVRSRVDebugPrintf(PVR_DBG_WARNING, \
#endif
+typedef enum _PVR_LAYER_HEAP_ACTION_
+{
+ PVR_LAYER_HEAP_ACTION_IGNORE, /* skip heap during heap init */
+ PVR_LAYER_HEAP_ACTION_INSTANTIATE, /* instantiate heap but don't acquire */
+ PVR_LAYER_HEAP_ACTION_INITIALISE /* instantiate and acquire */
+
+} PVR_LAYER_HEAP_ACTION;
typedef struct PHYS_HEAP_PROPERTIES_TAG
{
PVRSRV_PHYS_HEAP eFallbackHeap;
- IMG_BOOL bPVRLayerAcquire;
+ PVR_LAYER_HEAP_ACTION ePVRLayerAction;
IMG_BOOL bUserModeAlloc;
} PHYS_HEAP_PROPERTIES;
*/
static PHYS_HEAP_PROPERTIES gasHeapProperties[PVRSRV_PHYS_HEAP_LAST] =
{
- /* eFallbackHeap, bPVRLayerAcquire, bUserModeAlloc */
- { PVRSRV_PHYS_HEAP_DEFAULT, IMG_TRUE, IMG_TRUE }, /* DEFAULT */
- { PVRSRV_PHYS_HEAP_DEFAULT, IMG_TRUE, IMG_TRUE }, /* GPU_LOCAL */
- { PVRSRV_PHYS_HEAP_DEFAULT, IMG_TRUE, IMG_TRUE }, /* CPU_LOCAL */
- { PVRSRV_PHYS_HEAP_DEFAULT, IMG_TRUE, IMG_TRUE }, /* GPU_PRIVATE */
- { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_FALSE, IMG_FALSE }, /* FW_MAIN */
- { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_FALSE }, /* EXTERNAL */
- { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_FALSE }, /* GPU_COHERENT */
- { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_TRUE }, /* GPU_SECURE */
- { PVRSRV_PHYS_HEAP_FW_MAIN, IMG_FALSE, IMG_FALSE }, /* FW_CONFIG */
- { PVRSRV_PHYS_HEAP_FW_MAIN, IMG_FALSE, IMG_FALSE }, /* FW_CODE */
- { PVRSRV_PHYS_HEAP_FW_MAIN, IMG_FALSE, IMG_FALSE }, /* FW_DATA */
- { PVRSRV_PHYS_HEAP_FW_PREMAP0, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP0 */
- { PVRSRV_PHYS_HEAP_FW_PREMAP1, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP1 */
- { PVRSRV_PHYS_HEAP_FW_PREMAP2, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP2 */
- { PVRSRV_PHYS_HEAP_FW_PREMAP3, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP3 */
- { PVRSRV_PHYS_HEAP_FW_PREMAP4, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP4 */
- { PVRSRV_PHYS_HEAP_FW_PREMAP5, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP5 */
- { PVRSRV_PHYS_HEAP_FW_PREMAP6, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP6 */
- { PVRSRV_PHYS_HEAP_FW_PREMAP7, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP7 */
+ /* eFallbackHeap, ePVRLayerAction, bUserModeAlloc */
+ { PVRSRV_PHYS_HEAP_DEFAULT, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* DEFAULT */
+ { PVRSRV_PHYS_HEAP_DEFAULT, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* GPU_LOCAL */
+ { PVRSRV_PHYS_HEAP_DEFAULT, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* CPU_LOCAL */
+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* GPU_PRIVATE */
+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_MAIN */
+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_FALSE }, /* EXTERNAL */
+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_FALSE }, /* GPU_COHERENT */
+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_INITIALISE, IMG_TRUE }, /* GPU_SECURE */
+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_CONFIG */
+ { PVRSRV_PHYS_HEAP_FW_MAIN, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_CODE */
+ { PVRSRV_PHYS_HEAP_FW_MAIN, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PRIV_DATA */
+ { PVRSRV_PHYS_HEAP_GPU_LOCAL, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP_PT */
+ { PVRSRV_PHYS_HEAP_FW_PREMAP0, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP0 */
+ { PVRSRV_PHYS_HEAP_FW_PREMAP1, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP1 */
+ { PVRSRV_PHYS_HEAP_FW_PREMAP2, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP2 */
+ { PVRSRV_PHYS_HEAP_FW_PREMAP3, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP3 */
+ { PVRSRV_PHYS_HEAP_FW_PREMAP4, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP4 */
+ { PVRSRV_PHYS_HEAP_FW_PREMAP5, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP5 */
+ { PVRSRV_PHYS_HEAP_FW_PREMAP6, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP6 */
+ { PVRSRV_PHYS_HEAP_FW_PREMAP7, PVR_LAYER_HEAP_ACTION_IGNORE, IMG_FALSE }, /* FW_PREMAP7 */
+ { PVRSRV_PHYS_HEAP_WRAP, PVR_LAYER_HEAP_ACTION_INSTANTIATE, IMG_FALSE }, /* WRAP */
+ { PVRSRV_PHYS_HEAP_DISPLAY, PVR_LAYER_HEAP_ACTION_INSTANTIATE, IMG_FALSE }, /* DISPLAY */
};
static_assert((ARRAY_SIZE(gasHeapProperties) == PVRSRV_PHYS_HEAP_LAST),
"Size or order of gasHeapProperties entries incorrect for PVRSRV_PHYS_HEAP enum");
-void PVRSRVGetDevicePhysHeapCount(PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 *pui32PhysHeapCount)
-{
- *pui32PhysHeapCount = psDevNode->ui32UserAllocHeapCount;
-}
+static IMG_BOOL PhysHeapCreatedByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap);
+static IMG_BOOL PhysHeapAcquiredByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap);
static IMG_UINT32 PhysHeapOSGetPageShift(void)
{
.pfnGetPageShift = &PhysHeapOSGetPageShift,
};
+/**
+ * ! IMPORTANT !
+ * Do not change this string array unless the usage flag definitions in
+ * physheap_config.h have changed.
+ *
+ * NOTE: Use DebugCommonFlagStrings or GetPhysHeapUsageString to get
+ * usage flags string.
+ */
+static const IMG_FLAGS2DESC g_asPhysHeapUsageFlagStrings[] =
+{
+ {PHYS_HEAP_USAGE_GPU_LOCAL, "GPU_LOCAL"},
+ {PHYS_HEAP_USAGE_CPU_LOCAL, "CPU_LOCAL"},
+ {PHYS_HEAP_USAGE_GPU_PRIVATE, "GPU_PRIVATE"},
+ {PHYS_HEAP_USAGE_EXTERNAL, "EXTERNAL"},
+ {PHYS_HEAP_USAGE_GPU_COHERENT, "GPU_COHERENT"},
+ {PHYS_HEAP_USAGE_GPU_SECURE, "GPU_SECURE"},
+ {PHYS_HEAP_USAGE_FW_SHARED, "FW_SHARED"},
+ {PHYS_HEAP_USAGE_FW_PRIVATE, "FW_PRIVATE"},
+ {PHYS_HEAP_USAGE_FW_CODE, "FW_CODE"},
+ {PHYS_HEAP_USAGE_FW_PRIV_DATA, "FW_PRIV_DATA"},
+ {PHYS_HEAP_USAGE_FW_PREMAP_PT, "FW_PREMAP_PT"},
+ {PHYS_HEAP_USAGE_FW_PREMAP, "FW_PREMAP"},
+ {PHYS_HEAP_USAGE_WRAP, "WRAP"},
+ {PHYS_HEAP_USAGE_DISPLAY, "DISPLAY"}
+};
+
+/*************************************************************************/ /*!
+@Function PhysHeapCheckValidUsageFlags
+@Description Checks if any bits were set outside of the valid ones within
+ PHYS_HEAP_USAGE_FLAGS.
+
+@Input ui32PhysHeapUsage The value of the usage flag.
+
+@Return True or False depending on whether there were only valid bits set.
+*/ /**************************************************************************/
+static inline IMG_BOOL PhysHeapCheckValidUsageFlags(PHYS_HEAP_USAGE_FLAGS ui32PhysHeapUsage)
+{
+ return !(ui32PhysHeapUsage & ~PHYS_HEAP_USAGE_MASK);
+}
+
+/*************************************************************************/ /*!
+@Function GetPhysHeapUsageString
+@Description This function is used to create a comma separated string of all
+ usage flags passed in as a bitfield.
+
+@Input ui32UsageFlags The bitfield of usage flags.
+@Input ui32Size The size of the memory pointed to by
+ pszUsageString.
+@Output pszUsageString A pointer to memory where the created string
+ will be stored.
+
+@Return If successful PVRSRV_OK, else a PVRSRV_ERROR.
+*/ /**************************************************************************/
+static PVRSRV_ERROR GetPhysHeapUsageString(PHYS_HEAP_USAGE_FLAGS ui32UsageFlags,
+ IMG_UINT32 ui32Size,
+ IMG_CHAR *const pszUsageString)
+{
+ IMG_UINT32 i;
+ IMG_BOOL bFirst = IMG_TRUE;
+ size_t uiSize = 0;
+
+ PVR_LOG_RETURN_IF_INVALID_PARAM(pszUsageString != NULL, "pszUsageString");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(ui32Size > 0, "ui32Size");
+
+ /* Initialise the string to be null terminated at the beginning */
+ uiSize = OSStringLCopy(pszUsageString, "\0", sizeof(IMG_CHAR));
+
+ if (ui32UsageFlags == 0)
+ {
+ uiSize = OSStringLCopy(pszUsageString, "NONE", (size_t)ui32Size);
+ PVR_LOG_RETURN_IF_FALSE((uiSize < ui32Size), "OSStringLCopy", PVRSRV_ERROR_OUT_OF_MEMORY);
+
+ return PVRSRV_OK;
+ }
+
+ /* Process from left to right. */
+ for (i = sizeof(PHYS_HEAP_USAGE_FLAGS) * BITS_PER_BYTE; i > 0; i--)
+ {
+ IMG_UINT32 ui32Flag = BIT(i);
+
+ if (BITMASK_HAS(ui32UsageFlags, ui32Flag))
+ {
+ IMG_CHAR pszString[32] = "\0";
+
+ if (PhysHeapCheckValidUsageFlags(ui32Flag))
+ {
+ DebugCommonFlagStrings(pszString,
+ sizeof(pszString),
+ g_asPhysHeapUsageFlagStrings,
+ ARRAY_SIZE(g_asPhysHeapUsageFlagStrings),
+ ui32Flag);
+ }
+ else
+ {
+ uiSize = OSStringLCat(pszString,
+ "INVALID",
+ (size_t)ui32Size);
+ PVR_LOG_RETURN_IF_FALSE((uiSize < ui32Size), "OSStringLCat", PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+
+ if (!bFirst)
+ {
+ uiSize = OSStringLCat(pszUsageString,
+ ", ",
+ (size_t)ui32Size);
+ PVR_LOG_RETURN_IF_FALSE((uiSize < ui32Size), "OSStringLCat", PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+ else
+ {
+ bFirst = IMG_FALSE;
+ }
+
+ uiSize = OSStringLCat(pszUsageString,
+ pszString,
+ (size_t)ui32Size);
+ PVR_LOG_RETURN_IF_FALSE((uiSize < ui32Size), "OSStringLCat", PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function PhysHeapCreatePropertiesString
+@Description This function is used to create a string containing properties
+ of the specified physheap.
+
+@Input psPhysHeap The physheap to create the string from.
+@Input ui32Size The size of the memory pointed to by
+ pszPhysHeapString.
+@Output pszPhysHeapString A pointer to memory where the created string
+ will be stored.
+
+@Return If successful PVRSRV_OK, else a PVRSRV_ERROR.
+*/ /**************************************************************************/
+static PVRSRV_ERROR PhysHeapCreatePropertiesString(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32Size,
+ IMG_CHAR *pszPhysHeapString)
+{
+ static const IMG_CHAR *const pszTypeStrings[] = {
+ "UNKNOWN",
+ "UMA",
+ "LMA",
+ "DMA",
+#if defined(SUPPORT_WRAP_EXTMEMOBJECT)
+ "WRAP"
+#endif
+ };
+
+ IMG_UINT64 ui64TotalSize;
+ IMG_UINT64 ui64FreeSize;
+ IMG_CHAR pszUsageString[128] = "\0";
+ IMG_INT32 iCount;
+ PVRSRV_ERROR eError;
+
+ if (psPhysHeap->eType >= ARRAY_SIZE(pszTypeStrings))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PhysHeap at address %p eType is not a PHYS_HEAP_TYPE",
+ psPhysHeap));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAPINFO, failure);
+ }
+
+ psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData,
+ &ui64TotalSize,
+ &ui64FreeSize);
+
+ eError = GetPhysHeapUsageString(psPhysHeap->ui32UsageFlags,
+ sizeof(pszUsageString),
+ pszUsageString);
+ PVR_LOG_GOTO_IF_ERROR(eError, "GetPhysHeapUsageString", failure);
+
+ if (psPhysHeap->eType == PHYS_HEAP_TYPE_LMA)
+ {
+ IMG_CPU_PHYADDR sCPUPAddr;
+ IMG_DEV_PHYADDR sGPUPAddr;
+
+ PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetCPUPAddr != NULL);
+ PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetDevPAddr != NULL);
+
+ eError = psPhysHeap->psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData,
+ &sCPUPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "pfnGetCPUPAddr");
+ sCPUPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(IMG_UINT64_MAX);
+ }
+
+ eError = psPhysHeap->psImplFuncs->pfnGetDevPAddr(psPhysHeap->pvImplData,
+ &sGPUPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "pfnGetDevPAddr");
+ sGPUPAddr.uiAddr = IMG_UINT64_MAX;
+ }
+
+ iCount = OSSNPrintf(pszPhysHeapString,
+ ui32Size,
+ "0x%p -> PdMs: %s, Type: %s, "
+ "CPU PA Base: " CPUPHYADDR_UINT_FMTSPEC", "
+ "GPU PA Base: 0x%08"IMG_UINT64_FMTSPECx", "
+ "Usage Flags: 0x%08x (%s), Refs: %d, "
+ "Free Size: %"IMG_UINT64_FMTSPEC"B, "
+ "Total Size: %"IMG_UINT64_FMTSPEC"B",
+ psPhysHeap,
+ psPhysHeap->pszPDumpMemspaceName,
+ pszTypeStrings[psPhysHeap->eType],
+ CPUPHYADDR_FMTARG(sCPUPAddr.uiAddr),
+ sGPUPAddr.uiAddr,
+ psPhysHeap->ui32UsageFlags,
+ pszUsageString,
+ psPhysHeap->ui32RefCount,
+ ui64FreeSize,
+ ui64TotalSize);
+ }
+ else
+ {
+ iCount = OSSNPrintf(pszPhysHeapString,
+ ui32Size,
+ "0x%p -> PdMs: %s, Type: %s, "
+ "Usage Flags: 0x%08x (%s), Refs: %d, "
+ "Free Size: %"IMG_UINT64_FMTSPEC"B, "
+ "Total Size: %"IMG_UINT64_FMTSPEC"B",
+ psPhysHeap,
+ psPhysHeap->pszPDumpMemspaceName,
+ pszTypeStrings[psPhysHeap->eType],
+ psPhysHeap->ui32UsageFlags,
+ pszUsageString,
+ psPhysHeap->ui32RefCount,
+ ui64FreeSize,
+ ui64TotalSize);
+ }
+
+ if (0 < iCount && iCount < (IMG_INT32)ui32Size)
+ {
+ return PVRSRV_OK;
+ }
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+failure:
+ OSStringLCopy(pszPhysHeapString, "\0", ui32Size);
+ return eError;
+}
+
/*************************************************************************/ /*!
-@Function _PhysHeapDebugRequest
+@Function PhysHeapDebugRequest
@Description This function is used to output debug information for a given
device's PhysHeaps.
@Input pfnDbgRequestHandle Data required by this function that is
the print function if required.
@Return void
*/ /**************************************************************************/
-static void _PhysHeapDebugRequest(PVRSRV_DBGREQ_HANDLE pfnDbgRequestHandle,
+static void PhysHeapDebugRequest(PVRSRV_DBGREQ_HANDLE pfnDbgRequestHandle,
IMG_UINT32 ui32VerbLevel,
DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
void *pvDumpDebugFile)
{
- static const IMG_CHAR *const pszTypeStrings[] = {
- "UNKNOWN",
- "UMA",
- "LMA",
- "DMA",
-#if defined(SUPPORT_WRAP_EXTMEMOBJECT)
- "WRAP"
-#endif
- };
-
PPVRSRV_DEVICE_NODE psDeviceNode = (PPVRSRV_DEVICE_NODE)pfnDbgRequestHandle;
- PHYS_HEAP *psPhysHeap = NULL;
- IMG_UINT64 ui64TotalSize;
- IMG_UINT64 ui64FreeSize;
- IMG_UINT32 i;
+ PHYS_HEAP *psPhysHeap;
PVR_LOG_RETURN_VOID_IF_FALSE(psDeviceNode != NULL,
"Phys Heap debug request failed. psDeviceNode was NULL");
PVR_DUMPDEBUG_LOG("------[ Device ID: %d - Phys Heaps ]------",
- psDeviceNode->sDevId.i32OsDeviceID);
+ psDeviceNode->sDevId.i32KernelDeviceID);
- for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++)
+ for (psPhysHeap = psDeviceNode->psPhysHeapList; psPhysHeap != NULL; psPhysHeap = psPhysHeap->psNext)
{
- psPhysHeap = psDeviceNode->papsRegisteredPhysHeaps[i];
+ IMG_CHAR pszPhysHeapString[256] = "\0";
+ PVRSRV_ERROR eError = PVRSRV_OK;
- if (psPhysHeap->eType >= ARRAY_SIZE(pszTypeStrings))
+ eError = PhysHeapCreatePropertiesString(psPhysHeap,
+ sizeof(pszPhysHeapString),
+ pszPhysHeapString);
+ if (eError != PVRSRV_OK)
{
- PVR_DPF((PVR_DBG_ERROR,
- "PhysHeap at address %p eType is not a PHYS_HEAP_TYPE",
- psPhysHeap));
- break;
+ PVR_LOG_ERROR(eError, "PhysHeapCreatePropertiesString");
+ continue;
}
- psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData,
- &ui64TotalSize,
- &ui64FreeSize);
+ PVR_DUMPDEBUG_LOG("%s", pszPhysHeapString);
+ }
+}
- if (psPhysHeap->eType == PHYS_HEAP_TYPE_LMA)
+/*************************************************************************/ /*!
+@Function HeapCfgUsedByPVRLayer
+@Description Checks if a physheap config must be handled by the PVR Layer
+@Input psConfig PhysHeapConfig
+@Return IMG_BOOL
+*/ /**************************************************************************/
+static IMG_BOOL HeapCfgUsedByPVRLayer(PHYS_HEAP_CONFIG *psConfig)
+{
+ PVRSRV_PHYS_HEAP eHeap;
+ IMG_BOOL bPVRHeap = IMG_FALSE;
+
+ /* Heaps are triaged for initialisation by either
+ * the PVR Layer or the device-specific heap handler. */
+ for (eHeap = PVRSRV_PHYS_HEAP_DEFAULT;
+ eHeap < PVRSRV_PHYS_HEAP_LAST;
+ eHeap++)
+ {
+ if (BIT_ISSET(psConfig->ui32UsageFlags, eHeap) &&
+ PhysHeapCreatedByPVRLayer(eHeap))
{
- IMG_CPU_PHYADDR sCPUPAddr;
- IMG_DEV_PHYADDR sGPUPAddr;
- PVRSRV_ERROR eError;
+ bPVRHeap = IMG_TRUE;
+ break;
+ }
+ }
- PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetCPUPAddr != NULL);
- PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetDevPAddr != NULL);
+ return bPVRHeap;
+}
- eError = psPhysHeap->psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData,
- &sCPUPAddr);
- if (eError != PVRSRV_OK)
- {
- PVR_LOG_ERROR(eError, "pfnGetCPUPAddr");
- sCPUPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(IMG_UINT64_MAX);
- }
+/*************************************************************************/ /*!
+@Function PhysHeapCreateDeviceHeapsFromConfigs
+@Description Create new heaps for a device from configs.
+@Input psDevNode Pointer to device node struct
+@Input pasConfigs Pointer to array of Heap configurations.
+@Input ui32NumConfigs Number of configurations in array.
+@Return PVRSRV_ERROR PVRSRV_OK or error code
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+PhysHeapCreateDeviceHeapsFromConfigs(PPVRSRV_DEVICE_NODE psDevNode,
+ PHYS_HEAP_CONFIG *pasConfigs,
+ IMG_UINT32 ui32NumConfigs)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
- eError = psPhysHeap->psImplFuncs->pfnGetDevPAddr(psPhysHeap->pvImplData,
- &sGPUPAddr);
- if (eError != PVRSRV_OK)
- {
- PVR_LOG_ERROR(eError, "pfnGetDevPAddr");
- sGPUPAddr.uiAddr = IMG_UINT64_MAX;
- }
+ psDevNode->psPhysHeapList = NULL;
- PVR_DUMPDEBUG_LOG("0x%p -> Name: %s, Type: %s, "
-
- "CPU PA Base: " CPUPHYADDR_UINT_FMTSPEC", "
- "GPU PA Base: 0x%08"IMG_UINT64_FMTSPECx", "
- "Usage Flags: 0x%08x, Refs: %d, "
- "Free Size: %"IMG_UINT64_FMTSPEC", "
- "Total Size: %"IMG_UINT64_FMTSPEC,
- psPhysHeap,
- psPhysHeap->pszPDumpMemspaceName,
- pszTypeStrings[psPhysHeap->eType],
- CPUPHYADDR_FMTARG(sCPUPAddr.uiAddr),
- sGPUPAddr.uiAddr,
- psPhysHeap->ui32UsageFlags,
- psPhysHeap->ui32RefCount,
- ui64FreeSize,
- ui64TotalSize);
- }
- else
+ for (i = 0; i < ui32NumConfigs; i++)
+ {
+ /* A PhysHeapConfig can have multiple usage flags. If any flag in a
+ * heap's set points to a heap type that is handled by the PVR Layer
+ * then we assume that a single heap is shared between multiple
+ * allocators and it is safe to instantiate it here. If the heap
+ * is not marked to be initialised by the PVR Layer, leave it
+ * to the device specific handler. */
+ if (HeapCfgUsedByPVRLayer(&pasConfigs[i]))
{
- PVR_DUMPDEBUG_LOG("0x%p -> Name: %s, Type: %s, "
- "Usage Flags: 0x%08x, Refs: %d, "
- "Free Size: %"IMG_UINT64_FMTSPEC", "
- "Total Size: %"IMG_UINT64_FMTSPEC,
- psPhysHeap,
- psPhysHeap->pszPDumpMemspaceName,
- pszTypeStrings[psPhysHeap->eType],
- psPhysHeap->ui32UsageFlags,
- psPhysHeap->ui32RefCount,
- ui64FreeSize,
- ui64TotalSize);
+ eError = PhysHeapCreateHeapFromConfig(psDevNode, &pasConfigs[i], NULL);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap");
}
}
+
+#if defined(SUPPORT_PHYSMEM_TEST)
+ /* For a temporary device node there will never be a debug dump
+ * request targeting it */
+ if (psDevNode->hDebugTable != NULL)
+#endif
+ {
+ eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hPhysHeapDbgReqNotify,
+ psDevNode,
+ PhysHeapDebugRequest,
+ DEBUG_REQUEST_SYS,
+ psDevNode);
+
+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRegisterDeviceDbgRequestNotify");
+ }
+ return PVRSRV_OK;
}
PVRSRV_ERROR
#endif
)
{
- eResult = PhysHeapCreate(psDevNode, psConfig, NULL,
- &_sPHEAPImplFuncs, ppsPhysHeap);
+ eResult = PhysHeapCreate(psDevNode, psConfig, PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG,
+ NULL, &_sPHEAPImplFuncs, ppsPhysHeap);
}
- else if (psConfig->eType == PHYS_HEAP_TYPE_LMA ||
- psConfig->eType == PHYS_HEAP_TYPE_DMA)
+ else if ((psConfig->eType == PHYS_HEAP_TYPE_LMA) ||
+ (psConfig->eType == PHYS_HEAP_TYPE_DMA))
{
- eResult = PhysmemCreateHeapLMA(psDevNode, psConfig, "GPU LMA (Sys)", ppsPhysHeap);
+ PHYS_HEAP_POLICY uiHeapPolicy;
+
+ if (psDevNode->pfnPhysHeapGetLMAPolicy != NULL)
+ {
+ uiHeapPolicy = psDevNode->pfnPhysHeapGetLMAPolicy(psConfig->ui32UsageFlags);
+ }
+ else
+ {
+ uiHeapPolicy = OSIsMapPhysNonContigSupported() ?
+ PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG :
+ PHYS_HEAP_POLICY_DEFAULT;
+ }
+
+ eResult = PhysmemCreateHeapLMA(psDevNode,
+ uiHeapPolicy,
+ psConfig,
+ (psConfig->eType == PHYS_HEAP_TYPE_LMA) ?
+ "GPU LMA (Sys)" :
+ "GPU LMA DMA (Sys)",
+ ppsPhysHeap);
}
else
{
return eResult;
}
-PVRSRV_ERROR
-PhysHeapCreateDeviceHeapsFromConfigs(PPVRSRV_DEVICE_NODE psDevNode,
- PHYS_HEAP_CONFIG *pasConfigs,
- IMG_UINT32 ui32NumConfigs)
+#define PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE (0x100000ULL * 32ULL) /* 32MB */
+
+static PVRSRV_ERROR PVRSRVValidatePhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ IMG_UINT32 ui32FlagsAccumulate = 0;
+ IMG_UINT32 i;
+
+ PVR_LOG_RETURN_IF_FALSE(psDevConfig->ui32PhysHeapCount > 0,
+ "Device config must specify at least one phys heap config.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+
+ for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++)
+ {
+ PHYS_HEAP_CONFIG *psHeapConf = &psDevConfig->pasPhysHeaps[i];
+
+ PVR_LOG_RETURN_IF_FALSE_VA(psHeapConf->ui32UsageFlags != 0,
+ PVRSRV_ERROR_PHYSHEAP_CONFIG,
+ "Phys heap config %d: must specify usage flags.", i);
+
+ PVR_LOG_RETURN_IF_FALSE_VA((ui32FlagsAccumulate & psHeapConf->ui32UsageFlags) == 0,
+ PVRSRV_ERROR_PHYSHEAP_CONFIG,
+ "Phys heap config %d: duplicate usage flags.", i);
+
+ ui32FlagsAccumulate |= psHeapConf->ui32UsageFlags;
+
+ /* Output message if default heap is LMA and smaller than recommended minimum */
+ if (BITMASK_ANY((1U << psDevConfig->eDefaultHeap), PHYS_HEAP_USAGE_MASK) &&
+ BITMASK_ANY((1U << psDevConfig->eDefaultHeap), psHeapConf->ui32UsageFlags) &&
+#if defined(__KERNEL__)
+ ((psHeapConf->eType == PHYS_HEAP_TYPE_LMA) ||
+ (psHeapConf->eType == PHYS_HEAP_TYPE_DMA)) &&
+#else
+ (psHeapConf->eType == PHYS_HEAP_TYPE_LMA) &&
+#endif
+ (psHeapConf->uiSize < PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Size of default heap is 0x%" IMG_UINT64_FMTSPECX
+ " (recommended minimum heap size is 0x%llx)",
+ __func__, psHeapConf->uiSize,
+ PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE));
+ }
+ }
+
+ if (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_GPU_LOCAL)
+ {
+ PVR_LOG_RETURN_IF_FALSE(((ui32FlagsAccumulate & PHYS_HEAP_USAGE_GPU_LOCAL) != 0) ,
+ "Device config must specify GPU local phys heap config.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+ }
+ else if (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_CPU_LOCAL)
+ {
+ PVR_LOG_RETURN_IF_FALSE(((ui32FlagsAccumulate & PHYS_HEAP_USAGE_CPU_LOCAL) != 0) ,
+ "Device config must specify CPU local phys heap config.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+ }
+
+ return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*************************************************************************/ /*!
+@Function CreateGpuVirtValArenas
+@Description Create virtualization validation arenas
+@Input psDeviceNode The device node
+@Return PVRSRV_ERROR PVRSRV_OK on success
+*/ /**************************************************************************/
+static PVRSRV_ERROR CreateGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode)
{
+ /* aui64OSidMin and aui64OSidMax are what we program into HW registers.
+ The values are different from base/size of arenas. */
+ IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS];
+ IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS];
+ PHYS_HEAP_CONFIG *psGPULocalHeap = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_GPU_LOCAL);
+ PHYS_HEAP_CONFIG *psDisplayHeap = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_DISPLAY);
+ IMG_UINT64 uBase;
+ IMG_UINT64 uSize;
+ IMG_UINT64 uBaseShared;
+ IMG_UINT64 uSizeShared;
+ IMG_UINT64 uSizeSharedReg;
IMG_UINT32 i;
+
+ /* Shared region is fixed size, the remaining space is divided amongst OSes */
+ uSizeShared = PVR_ALIGN(GPUVIRT_SIZEOF_SHARED, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+ uSize = psGPULocalHeap->uiSize - uSizeShared;
+ uSize /= GPUVIRT_VALIDATION_NUM_OS;
+ uSize = uSize & ~((IMG_UINT64)OSGetPageSize() - 1ULL); /* Align, round down */
+
+ uBase = psGPULocalHeap->sCardBase.uiAddr;
+ uBaseShared = uBase + uSize * GPUVIRT_VALIDATION_NUM_OS;
+ uSizeShared = psGPULocalHeap->uiSize - (uBaseShared - uBase);
+
+ PVR_LOG(("GPUVIRT_VALIDATION split GPU_LOCAL base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".",
+ psGPULocalHeap->sCardBase.uiAddr,
+ psGPULocalHeap->uiSize));
+
+ /* If a display heap config exists, include the display heap in the non-secure regions */
+ if (psDisplayHeap)
+ {
+ /* Only works when DISPLAY heap follows GPU_LOCAL heap. */
+ PVR_LOG(("GPUVIRT_VALIDATION include DISPLAY in shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".",
+ psDisplayHeap->sCardBase.uiAddr,
+ psDisplayHeap->uiSize));
+
+ uSizeSharedReg = uSizeShared + psDisplayHeap->uiSize;
+ }
+ else
+ {
+ uSizeSharedReg = uSizeShared;
+ }
+
+ PVR_ASSERT(uSize >= GPUVIRT_MIN_SIZE);
+ PVR_ASSERT(uSizeSharedReg >= GPUVIRT_SIZEOF_SHARED);
+
+ FOREACH_VALIDATION_OSID(i)
+ {
+ IMG_CHAR aszOSRAName[RA_MAX_NAME_LENGTH];
+
+ PVR_LOG(("GPUVIRT_VALIDATION create arena OS: %d, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", i, uBase, uSize));
+
+ OSSNPrintf(aszOSRAName, RA_MAX_NAME_LENGTH, "GPUVIRT_OS%d", i);
+
+ psDeviceNode->psOSidSubArena[i] = RA_Create_With_Span(aszOSRAName,
+ OSGetPageShift(),
+ 0,
+ uBase,
+ uSize,
+ RA_POLICY_DEFAULT);
+ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSidSubArena[i], "RA_Create_With_Span");
+
+ aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i] = uBase;
+
+ if (i == 0)
+ {
+ /* OSid0 has access to all regions */
+ aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = psGPULocalHeap->uiSize - 1ULL;
+ }
+ else
+ {
+ aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = uBase + uSize - 1ULL;
+ }
+
+ /* uSizeSharedReg includes display heap */
+ aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared;
+ aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared + uSizeSharedReg - 1ULL;
+
+ PVR_LOG(("GPUVIRT_VALIDATION HW reg regions %d: min[0]: 0x%" IMG_UINT64_FMTSPECX ", max[0]: 0x%" IMG_UINT64_FMTSPECX ", min[1]: 0x%" IMG_UINT64_FMTSPECX ", max[1]: 0x%" IMG_UINT64_FMTSPECX ",",
+ i,
+ aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i],
+ aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i],
+ aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i],
+ aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i]));
+ uBase += uSize;
+ }
+
+ PVR_LOG(("GPUVIRT_VALIDATION create arena Shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", uBaseShared, uSizeShared));
+
+ PVR_ASSERT(uSizeShared >= GPUVIRT_SIZEOF_SHARED);
+
+ /* uSizeShared does not include display heap */
+ psDeviceNode->psOSSharedArena = RA_Create_With_Span("GPUVIRT_SHARED",
+ OSGetPageShift(),
+ 0,
+ uBaseShared,
+ uSizeShared,
+ RA_POLICY_DEFAULT);
+ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSSharedArena, "RA_Create_With_Span");
+
+ if (psDeviceNode->psDevConfig->pfnSysDevVirtInit != NULL)
+ {
+ psDeviceNode->psDevConfig->pfnSysDevVirtInit(psDeviceNode->psDevConfig->hSysData, aui64OSidMin, aui64OSidMax);
+ }
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Counter-part to CreateGpuVirtValArenas.
+ */
+static void DestroyGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ IMG_UINT32 uiCounter = 0;
+
+ FOREACH_VALIDATION_OSID(uiCounter)
+ {
+ if (uiCounter == RGXFW_HOST_DRIVER_ID)
+ {
+ /*
+ * NOTE: We overload psOSidSubArena[0] into the psLocalMemArena so we must
+ * not free it here as it gets cleared later.
+ */
+ continue;
+ }
+
+ if (psDeviceNode->psOSidSubArena[uiCounter] == NULL)
+ {
+ continue;
+ }
+ RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]);
+ }
+
+ if (psDeviceNode->psOSSharedArena != NULL)
+ {
+ RA_Delete(psDeviceNode->psOSSharedArena);
+ }
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function PhysHeapMMUPxSetup
+@Description Setup MMU Px allocation function pointers.
+@Input psDeviceNode Pointer to device node struct
+@Return PVRSRV_ERROR PVRSRV_OK on success.
+*/ /**************************************************************************/
+static PVRSRV_ERROR PhysHeapMMUPxSetup(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+ PHYS_HEAP_TYPE eHeapType;
PVRSRV_ERROR eError;
- /* Register the physical memory heaps */
- psDevNode->papsRegisteredPhysHeaps =
- OSAllocZMem(sizeof(*psDevNode->papsRegisteredPhysHeaps) * ui32NumConfigs);
- PVR_LOG_RETURN_IF_NOMEM(psDevNode->papsRegisteredPhysHeaps, "OSAllocZMem");
+ eError = PhysHeapAcquireByID(psDeviceNode->psDevConfig->eDefaultHeap,
+ psDeviceNode, &psDeviceNode->psMMUPhysHeap);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByID", ErrorDeinit);
- psDevNode->ui32RegisteredPhysHeaps = 0;
+ eHeapType = PhysHeapGetType(psDeviceNode->psMMUPhysHeap);
- for (i = 0; i < ui32NumConfigs; i++)
+ if (eHeapType == PHYS_HEAP_TYPE_UMA)
{
- eError = PhysHeapCreateHeapFromConfig(psDevNode,
- pasConfigs + i,
- psDevNode->papsRegisteredPhysHeaps + i);
- PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap");
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses OS System memory (UMA)", __func__));
- psDevNode->ui32RegisteredPhysHeaps++;
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Virtualisation Validation builds are currently only"
+ " supported on systems with local memory (LMA).", __func__));
+ eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ goto ErrorDeinit;
+#endif
}
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses local memory managed by the driver (LMA)", __func__));
-#if defined(SUPPORT_PHYSMEM_TEST)
- /* For a temporary device node there will never be a debug dump
- * request targeting it */
- if (psDevNode->hDebugTable != NULL)
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ eError = CreateGpuVirtValArenas(psDeviceNode);
+ PVR_LOG_GOTO_IF_ERROR(eError, "CreateGpuVirtValArenas", ErrorDeinit);
#endif
+ }
+
+ return PVRSRV_OK;
+ErrorDeinit:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function PhysHeapMMUPxDeInit
+@Description Deinit after PhysHeapMMUPxSetup.
+@Input psDeviceNode Pointer to device node struct
+*/ /**************************************************************************/
+static void PhysHeapMMUPxDeInit(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ /* Remove local LMA subarenas */
+ DestroyGpuVirtValArenas(psDeviceNode);
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+ if (psDeviceNode->psMMUPhysHeap != NULL)
{
- eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hPhysHeapDbgReqNotify,
- psDevNode,
- _PhysHeapDebugRequest,
- DEBUG_REQUEST_SYS,
- psDevNode);
+ PhysHeapRelease(psDeviceNode->psMMUPhysHeap);
+ psDeviceNode->psMMUPhysHeap = NULL;
+ }
+}
- PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRegisterDeviceDbgRequestNotify");
+PVRSRV_ERROR PhysHeapInitDeviceHeaps(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_PHYS_HEAP ePhysHeap;
+
+ eError = OSLockCreate(&psDeviceNode->hPhysHeapLock);
+ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate");
+
+ eError = PVRSRVValidatePhysHeapConfig(psDevConfig);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVValidatePhysHeapConfig");
+
+ eError = PhysHeapCreateDeviceHeapsFromConfigs(psDeviceNode,
+ psDevConfig->pasPhysHeaps,
+ psDevConfig->ui32PhysHeapCount);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreateDeviceHeapsFromConfigs", ErrorDeinit);
+
+ for (ePhysHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; ePhysHeap < PVRSRV_PHYS_HEAP_LAST; ePhysHeap++)
+ {
+ if (PhysHeapAcquiredByPVRLayer(ePhysHeap))
+ {
+ eError = PhysHeapAcquireByID(ePhysHeap, psDeviceNode, &psDeviceNode->apsPhysHeap[ePhysHeap]);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByID", ErrorDeinit);
+ }
}
+
+ if (PhysHeapValidateDefaultHeapExists(psDeviceNode))
+ {
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapValidateDefaultHeapExists", ErrorDeinit);
+ }
+
+ eError = PhysHeapMMUPxSetup(psDeviceNode);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapMMUPxSetup", ErrorDeinit);
+
return PVRSRV_OK;
+
+ErrorDeinit:
+ PVR_ASSERT(IMG_FALSE);
+ PhysHeapDeInitDeviceHeaps(psDeviceNode);
+
+ return eError;
+}
+
+void PhysHeapDeInitDeviceHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_PHYS_HEAP ePhysHeapIdx;
+ IMG_UINT32 i;
+
+ PhysHeapMMUPxDeInit(psDeviceNode);
+
+ /* Release heaps */
+ for (ePhysHeapIdx = PVRSRV_PHYS_HEAP_DEFAULT;
+ ePhysHeapIdx < ARRAY_SIZE(psDeviceNode->apsPhysHeap);
+ ePhysHeapIdx++)
+ {
+ if (psDeviceNode->apsPhysHeap[ePhysHeapIdx])
+ {
+ PhysHeapRelease(psDeviceNode->apsPhysHeap[ePhysHeapIdx]);
+ }
+ }
+
+ FOREACH_SUPPORTED_DRIVER(i)
+ {
+ if (psDeviceNode->apsFWPremapPhysHeap[i])
+ {
+ PhysHeapDestroy(psDeviceNode->apsFWPremapPhysHeap[i]);
+ psDeviceNode->apsFWPremapPhysHeap[i] = NULL;
+ }
+ }
+
+ PhysHeapDestroyDeviceHeaps(psDeviceNode);
+
+ OSLockDestroy(psDeviceNode->hPhysHeapLock);
}
PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode,
PHYS_HEAP_CONFIG *psConfig,
+ PHYS_HEAP_POLICY uiPolicy,
PHEAP_IMPL_DATA pvImplData,
PHEAP_IMPL_FUNCS *psImplFuncs,
PHYS_HEAP **ppsPhysHeap)
PVR_RETURN_IF_NOMEM(psNew);
psNew->psDevNode = psDevNode;
psNew->eType = psConfig->eType;
+ psNew->uiPolicy = uiPolicy;
psNew->psMemFuncs = psConfig->psMemFuncs;
psNew->hPrivData = psConfig->hPrivData;
psNew->ui32RefCount = 0;
psNew->pvImplData = pvImplData;
psNew->psImplFuncs = psImplFuncs;
- psNew->psNext = g_psPhysHeapList;
- g_psPhysHeapList = psNew;
+ if (ppsPhysHeap != NULL)
+ {
+ *ppsPhysHeap = psNew;
+ }
- *ppsPhysHeap = psNew;
+ psNew->psNext = psDevNode->psPhysHeapList;
+ psDevNode->psPhysHeapList = psNew;
- PVR_DPF_RETURN_RC1(PVRSRV_OK, *ppsPhysHeap);
+ PVR_DPF_RETURN_RC1(PVRSRV_OK, psNew);
}
void PhysHeapDestroyDeviceHeaps(PPVRSRV_DEVICE_NODE psDevNode)
{
- IMG_UINT32 i;
+ PHYS_HEAP *psNode = psDevNode->psPhysHeapList;
if (psDevNode->hPhysHeapDbgReqNotify)
{
PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hPhysHeapDbgReqNotify);
}
- /* Unregister heaps */
- for (i = 0; i < psDevNode->ui32RegisteredPhysHeaps; i++)
+ while (psNode)
{
- PhysHeapDestroy(psDevNode->papsRegisteredPhysHeaps[i]);
- }
+ PHYS_HEAP *psTmp = psNode;
- OSFreeMem(psDevNode->papsRegisteredPhysHeaps);
+ psNode = psNode->psNext;
+ PhysHeapDestroy(psTmp);
+ }
}
void PhysHeapDestroy(PHYS_HEAP *psPhysHeap)
{
PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+ PPVRSRV_DEVICE_NODE psDevNode = psPhysHeap->psDevNode;
PVR_DPF_ENTERED1(psPhysHeap);
PVR_ASSERT(psPhysHeap->ui32RefCount == 0);
}
- if (g_psPhysHeapList == psPhysHeap)
+ if (psDevNode->psPhysHeapList == psPhysHeap)
{
- g_psPhysHeapList = psPhysHeap->psNext;
+ psDevNode->psPhysHeapList = psPhysHeap->psNext;
}
else
{
- PHYS_HEAP *psTmp = g_psPhysHeapList;
+ PHYS_HEAP *psTmp = psDevNode->psPhysHeapList;
while (psTmp->psNext != psPhysHeap)
{
PVR_DPF_RETURN;
}
-PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap)
-{
- PVR_LOG_RETURN_IF_INVALID_PARAM(psPhysHeap != NULL, "psPhysHeap");
-
- psPhysHeap->ui32RefCount++;
-
- return PVRSRV_OK;
-}
-
-PVRSRV_ERROR PhysHeapAcquireByUsage(PHYS_HEAP_USAGE_FLAGS ui32UsageFlag,
- PPVRSRV_DEVICE_NODE psDevNode,
- PHYS_HEAP **ppsPhysHeap)
+static void _PhysHeapCountUserModeHeaps(PPVRSRV_DEVICE_NODE psDevNode,
+ PHYS_HEAP_USAGE_FLAGS ui32UsageFlags)
{
- PHYS_HEAP *psNode = g_psPhysHeapList;
- PVRSRV_ERROR eError = PVRSRV_OK;
-
- PVR_LOG_RETURN_IF_INVALID_PARAM(ui32UsageFlag != 0, "ui32UsageFlag");
- PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode");
-
- PVR_DPF_ENTERED1(ui32UsageFlag);
+ PVRSRV_PHYS_HEAP eHeap;
- OSLockAcquire(g_hPhysHeapLock);
-
- while (psNode)
+ for (eHeap = PVRSRV_PHYS_HEAP_DEFAULT;
+ eHeap <= PVRSRV_PHYS_HEAP_LAST;
+ eHeap++)
{
- if (psNode->psDevNode != psDevNode)
- {
- psNode = psNode->psNext;
- continue;
- }
- if (BITMASK_ANY(psNode->ui32UsageFlags, ui32UsageFlag))
+ if (BIT_ISSET(ui32UsageFlags, eHeap) &&
+ PhysHeapUserModeAlloc(eHeap))
{
+ psDevNode->ui32UserAllocHeapCount++;
break;
}
- psNode = psNode->psNext;
}
+}
- if (psNode == NULL)
- {
- eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID;
- }
- else
+PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap)
+{
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psPhysHeap != NULL, "psPhysHeap");
+
+ psPhysHeap->ui32RefCount++;
+
+ /* When acquiring a heap for the 1st time, perform a check and
+ * calculate the total number of user accessible physical heaps */
+ if (psPhysHeap->ui32RefCount == 1)
{
- psNode->ui32RefCount++;
- PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d",
- __func__, psNode, psNode->ui32RefCount);
+ _PhysHeapCountUserModeHeaps(psPhysHeap->psDevNode,
+ psPhysHeap->ui32UsageFlags);
}
- OSLockRelease(g_hPhysHeapLock);
-
- *ppsPhysHeap = psNode;
- PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap);
+ return PVRSRV_OK;
}
static PHYS_HEAP * _PhysHeapFindHeap(PVRSRV_PHYS_HEAP ePhysHeap,
PPVRSRV_DEVICE_NODE psDevNode)
{
- PHYS_HEAP *psPhysHeapNode = g_psPhysHeapList;
+ PHYS_HEAP *psPhysHeapNode = psDevNode->psPhysHeapList;
PVRSRV_PHYS_HEAP eFallback;
if (ePhysHeap == PVRSRV_PHYS_HEAP_DEFAULT)
ePhysHeap = psDevNode->psDevConfig->eDefaultHeap;
}
+ /* first check if Heap has been resolved before */
+ if (psDevNode->apsPhysHeap[ePhysHeap] != NULL)
+ {
+ return psDevNode->apsPhysHeap[ePhysHeap];
+ }
+
while (psPhysHeapNode)
{
- if ((psPhysHeapNode->psDevNode == psDevNode) &&
- BIT_ISSET(psPhysHeapNode->ui32UsageFlags, ePhysHeap))
+ if (BIT_ISSET(psPhysHeapNode->ui32UsageFlags, ePhysHeap))
{
return psPhysHeapNode;
}
}
}
-PVRSRV_ERROR PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP eDevPhysHeap,
- PPVRSRV_DEVICE_NODE psDevNode,
- PHYS_HEAP **ppsPhysHeap)
+PVRSRV_ERROR PhysHeapAcquireByID(PVRSRV_PHYS_HEAP eDevPhysHeap,
+ PPVRSRV_DEVICE_NODE psDevNode,
+ PHYS_HEAP **ppsPhysHeap)
{
PHYS_HEAP *psPhysHeap;
PVRSRV_ERROR eError = PVRSRV_OK;
- PVR_LOG_RETURN_IF_INVALID_PARAM(eDevPhysHeap != PVRSRV_PHYS_HEAP_DEFAULT, "eDevPhysHeap");
PVR_LOG_RETURN_IF_INVALID_PARAM(eDevPhysHeap < PVRSRV_PHYS_HEAP_LAST, "eDevPhysHeap");
PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode");
PVR_DPF_ENTERED1(ui32Flags);
- OSLockAcquire(g_hPhysHeapLock);
+ OSLockAcquire(psDevNode->hPhysHeapLock);
psPhysHeap = _PhysHeapFindHeap(eDevPhysHeap, psDevNode);
psPhysHeap->ui32RefCount++;
PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d",
__func__, psPhysHeap, psPhysHeap->ui32RefCount);
+
+ /* When acquiring a heap for the 1st time, perform a check and
+ * calculate the total number of user accessible physical heaps */
+ if (psPhysHeap->ui32RefCount == 1)
+ {
+ _PhysHeapCountUserModeHeaps(psDevNode, BIT(eDevPhysHeap));
+ }
}
else
{
eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID;
}
- OSLockRelease(g_hPhysHeapLock);
+ OSLockRelease(psDevNode->hPhysHeapLock);
*ppsPhysHeap = psPhysHeap;
PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap);
{
PVR_DPF_ENTERED1(psPhysHeap);
- OSLockAcquire(g_hPhysHeapLock);
+ OSLockAcquire(psPhysHeap->psDevNode->hPhysHeapLock);
psPhysHeap->ui32RefCount--;
PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d",
__func__, psPhysHeap, psPhysHeap->ui32RefCount);
- OSLockRelease(g_hPhysHeapLock);
+ OSLockRelease(psPhysHeap->psDevNode->hPhysHeapLock);
PVR_DPF_RETURN;
}
return psPhysHeap->eType;
}
+PHYS_HEAP_POLICY PhysHeapGetPolicy(PHYS_HEAP *psPhysHeap)
+{
+ return psPhysHeap->uiPolicy;
+}
+
PHYS_HEAP_USAGE_FLAGS PhysHeapGetFlags(PHYS_HEAP *psPhysHeap)
{
return psPhysHeap->ui32UsageFlags;
IMG_BOOL PhysHeapValidateDefaultHeapExists(PPVRSRV_DEVICE_NODE psDevNode)
{
- PHYS_HEAP *psDefaultHeap;
- IMG_BOOL bDefaultHeapFound;
- PhysHeapAcquireByUsage(1<<(psDevNode->psDevConfig->eDefaultHeap), psDevNode, &psDefaultHeap);
- if (psDefaultHeap == NULL)
- {
- bDefaultHeapFound = IMG_FALSE;
- }
- else
- {
- PhysHeapRelease(psDefaultHeap);
- bDefaultHeapFound = IMG_TRUE;
- }
- return bDefaultHeapFound;
-}
+ PVRSRV_PHYS_HEAP eDefaultHeap = psDevNode->psDevConfig->eDefaultHeap;
+ return ((psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_DEFAULT] != NULL) &&
+ ((psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_DEFAULT] ==
+ psDevNode->apsPhysHeap[eDefaultHeap])));
+}
/*
* This function will set the psDevPAddr to whatever the system layer
* has set it for the referenced region.
* It will not fail if the psCpuPAddr is invalid.
*/
-PVRSRV_ERROR PhysHeapGetCpuPAddr(PHYS_HEAP *psPhysHeap,
- IMG_CPU_PHYADDR *psCpuPAddr)
-{
- PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
- PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED;
-
- if (psImplFuncs->pfnGetCPUPAddr != NULL)
- {
- eResult = psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData,
- psCpuPAddr);
- }
-
- return eResult;
-}
-
-PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap,
- IMG_UINT64 *puiSize)
-{
- PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
- PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED;
-
- if (psImplFuncs->pfnGetSize != NULL)
- {
- eResult = psImplFuncs->pfnGetSize(psPhysHeap->pvImplData,
- puiSize);
- }
-
- return eResult;
-}
-
-PVRSRV_ERROR
-PhysHeapGetMemInfo(PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 ui32PhysHeapCount,
- PVRSRV_PHYS_HEAP *paePhysHeapID,
- PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats)
-{
- IMG_UINT32 i = 0;
- PHYS_HEAP *psPhysHeap;
-
- PVR_ASSERT(ui32PhysHeapCount <= PVRSRV_PHYS_HEAP_LAST);
-
- for (i = 0; i < ui32PhysHeapCount; i++)
- {
- if (paePhysHeapID[i] >= PVRSRV_PHYS_HEAP_LAST)
- {
- return PVRSRV_ERROR_PHYSHEAP_ID_INVALID;
- }
-
- if (paePhysHeapID[i] == PVRSRV_PHYS_HEAP_DEFAULT)
- {
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
+PVRSRV_ERROR PhysHeapGetCpuPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED;
- psPhysHeap = _PhysHeapFindHeap(paePhysHeapID[i], psDevNode);
+ if (psImplFuncs->pfnGetCPUPAddr != NULL)
+ {
+ eResult = psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData,
+ psCpuPAddr);
+ }
- paPhysHeapMemStats[i].ui32PhysHeapFlags = 0;
+ return eResult;
+}
- if (psPhysHeap && PhysHeapUserModeAlloc(paePhysHeapID[i])
- && psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats)
- {
- psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData,
- &paPhysHeapMemStats[i].ui64TotalSize,
- &paPhysHeapMemStats[i].ui64FreeSize);
- paPhysHeapMemStats[i].ui32PhysHeapFlags |= PhysHeapGetType(psPhysHeap);
+PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap,
+ IMG_UINT64 *puiSize)
+{
+ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs;
+ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED;
- if (paePhysHeapID[i] == psDevNode->psDevConfig->eDefaultHeap)
- {
- paPhysHeapMemStats[i].ui32PhysHeapFlags |= PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT;
- }
- }
- else
- {
- paPhysHeapMemStats[i].ui64TotalSize = 0;
- paPhysHeapMemStats[i].ui64FreeSize = 0;
- }
+ if (psImplFuncs->pfnGetSize != NULL)
+ {
+ eResult = psImplFuncs->pfnGetSize(psPhysHeap->pvImplData,
+ puiSize);
}
- return PVRSRV_OK;
+ return eResult;
}
PVRSRV_ERROR
-PhysHeapGetMemInfoPkd(PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 ui32PhysHeapCount,
- PVRSRV_PHYS_HEAP *paePhysHeapID,
- PHYS_HEAP_MEM_STATS_PKD_PTR paPhysHeapMemStats)
+PhysHeapGetMemInfo(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32PhysHeapCount,
+ PVRSRV_PHYS_HEAP *paePhysHeapID,
+ PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats)
{
IMG_UINT32 i = 0;
PHYS_HEAP *psPhysHeap;
- PVR_ASSERT(ui32PhysHeapCount <= PVRSRV_PHYS_HEAP_LAST);
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode invalid");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(ui32PhysHeapCount <= MAX_USER_MODE_ALLOC_PHYS_HEAPS, "ui32PhysHeapCount invalid");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(paePhysHeapID != NULL, "paePhysHeapID invalid");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(paPhysHeapMemStats != NULL, "paPhysHeapMemStats invalid");
for (i = 0; i < ui32PhysHeapCount; i++)
{
return PVRSRV_OK;
}
-void PhysheapGetPhysMemUsage(PHYS_HEAP *psPhysHeap, IMG_UINT64 *pui64TotalSize, IMG_UINT64 *pui64FreeSize)
-{
- if (psPhysHeap && psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats)
- {
- psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData,
- pui64TotalSize,
- pui64FreeSize);
- }
- else
- {
- *pui64TotalSize = *pui64FreeSize = 0;
- }
-}
-
void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
IMG_UINT32 ui32NumOfAddr,
IMG_DEV_PHYADDR *psDevPAddr,
PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap,
struct _CONNECTION_DATA_ *psConnection,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
return psImplFuncs->pfnCreatePMR(psPhysHeap,
psConnection,
uiSize,
- uiChunkSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
pui32MappingTable,
ui32PDumpFlags);
}
-PVRSRV_ERROR PhysHeapInit(void)
-{
- PVRSRV_ERROR eError;
-
- g_psPhysHeapList = NULL;
-
- eError = OSLockCreate(&g_hPhysHeapLock);
- PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate");
-
- return PVRSRV_OK;
-}
-
-void PhysHeapDeinit(void)
-{
- PVR_ASSERT(g_psPhysHeapList == NULL);
-
- OSLockDestroy(g_hPhysHeapLock);
-}
-
PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap)
{
PVR_ASSERT(psPhysHeap != NULL);
return psPhysHeap->psDevNode;
}
-IMG_BOOL PhysHeapPVRLayerAcquire(PVRSRV_PHYS_HEAP ePhysHeap)
+static IMG_BOOL PhysHeapCreatedByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap)
{
PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST);
- return gasHeapProperties[ePhysHeap].bPVRLayerAcquire;
+ return (gasHeapProperties[ePhysHeap].ePVRLayerAction != PVR_LAYER_HEAP_ACTION_IGNORE);
}
-IMG_BOOL PhysHeapUserModeAlloc(PVRSRV_PHYS_HEAP ePhysHeap)
+static IMG_BOOL PhysHeapAcquiredByPVRLayer(PVRSRV_PHYS_HEAP ePhysHeap)
{
PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST);
- return gasHeapProperties[ePhysHeap].bUserModeAlloc;
-}
-
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
-/*************************************************************************/ /*!
-@Function CreateGpuVirtValArenas
-@Description Create virtualization validation arenas
-@Input psDeviceNode The device node
-@Return PVRSRV_ERROR PVRSRV_OK on success
-*/ /**************************************************************************/
-static PVRSRV_ERROR CreateGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode)
-{
- /* aui64OSidMin and aui64OSidMax are what we program into HW registers.
- The values are different from base/size of arenas. */
- IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS];
- IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS];
- PHYS_HEAP_CONFIG *psGPULocalHeap = FindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_GPU_LOCAL);
- PHYS_HEAP_CONFIG *psDisplayHeap = FindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_DISPLAY);
- IMG_UINT64 uBase;
- IMG_UINT64 uSize;
- IMG_UINT64 uBaseShared;
- IMG_UINT64 uSizeShared;
- IMG_UINT64 uSizeSharedReg;
- IMG_UINT32 i;
-
- /* Shared region is fixed size, the remaining space is divided amongst OSes */
- uSizeShared = PVR_ALIGN(GPUVIRT_SIZEOF_SHARED, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
- uSize = psGPULocalHeap->uiSize - uSizeShared;
- uSize /= GPUVIRT_VALIDATION_NUM_OS;
- uSize = uSize & ~((IMG_UINT64)OSGetPageSize() - 1ULL); /* Align, round down */
-
- uBase = psGPULocalHeap->sCardBase.uiAddr;
- uBaseShared = uBase + uSize * GPUVIRT_VALIDATION_NUM_OS;
- uSizeShared = psGPULocalHeap->uiSize - (uBaseShared - uBase);
-
- PVR_LOG(("GPUVIRT_VALIDATION split GPU_LOCAL base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".",
- psGPULocalHeap->sCardBase.uiAddr,
- psGPULocalHeap->uiSize));
-
- /* If a display heap config exists, include the display heap in the non-secure regions */
- if (psDisplayHeap)
- {
- /* Only works when DISPLAY heap follows GPU_LOCAL heap. */
- PVR_LOG(("GPUVIRT_VALIDATION include DISPLAY in shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".",
- psDisplayHeap->sCardBase.uiAddr,
- psDisplayHeap->uiSize));
-
- uSizeSharedReg = uSizeShared + psDisplayHeap->uiSize;
- }
- else
- {
- uSizeSharedReg = uSizeShared;
- }
-
- PVR_ASSERT(uSize >= GPUVIRT_MIN_SIZE);
- PVR_ASSERT(uSizeSharedReg >= GPUVIRT_SIZEOF_SHARED);
-
- for (i = 0; i < GPUVIRT_VALIDATION_NUM_OS; i++)
- {
- IMG_CHAR aszOSRAName[RA_MAX_NAME_LENGTH];
-
- PVR_LOG(("GPUVIRT_VALIDATION create arena OS: %d, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", i, uBase, uSize));
-
- OSSNPrintf(aszOSRAName, RA_MAX_NAME_LENGTH, "GPUVIRT_OS%d", i);
-
- psDeviceNode->psOSidSubArena[i] = RA_Create_With_Span(aszOSRAName,
- OSGetPageShift(),
- 0,
- uBase,
- uSize);
- PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSidSubArena[i], "RA_Create_With_Span");
-
- aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i] = uBase;
-
- if (i == 0)
- {
- /* OSid0 has access to all regions */
- aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = psGPULocalHeap->uiSize - 1ULL;
- }
- else
- {
- aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = uBase + uSize - 1ULL;
- }
-
- /* uSizeSharedReg includes display heap */
- aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared;
- aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared + uSizeSharedReg - 1ULL;
-
- PVR_LOG(("GPUVIRT_VALIDATION HW reg regions %d: min[0]: 0x%" IMG_UINT64_FMTSPECX ", max[0]: 0x%" IMG_UINT64_FMTSPECX ", min[1]: 0x%" IMG_UINT64_FMTSPECX ", max[1]: 0x%" IMG_UINT64_FMTSPECX ",",
- i,
- aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i],
- aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i],
- aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i],
- aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i]));
- uBase += uSize;
- }
-
- PVR_LOG(("GPUVIRT_VALIDATION create arena Shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", uBaseShared, uSizeShared));
-
- PVR_ASSERT(uSizeShared >= GPUVIRT_SIZEOF_SHARED);
-
- /* uSizeShared does not include display heap */
- psDeviceNode->psOSSharedArena = RA_Create_With_Span("GPUVIRT_SHARED",
- OSGetPageShift(),
- 0,
- uBaseShared,
- uSizeShared);
- PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSSharedArena, "RA_Create_With_Span");
-
- if (psDeviceNode->psDevConfig->pfnSysDevVirtInit != NULL)
- {
- psDeviceNode->psDevConfig->pfnSysDevVirtInit(aui64OSidMin, aui64OSidMax);
- }
-
- return PVRSRV_OK;
-}
-
-/*
- * Counter-part to CreateGpuVirtValArenas.
- */
-static void DestroyGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode)
-{
- IMG_UINT32 uiCounter = 0;
-
- /*
- * NOTE: We overload psOSidSubArena[0] into the psLocalMemArena so we must
- * not free it here as it gets cleared later.
- */
- for (uiCounter = 1; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
- {
- if (psDeviceNode->psOSidSubArena[uiCounter] == NULL)
- {
- continue;
- }
- RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]);
- }
-
- if (psDeviceNode->psOSSharedArena != NULL)
- {
- RA_Delete(psDeviceNode->psOSSharedArena);
- }
-}
-#endif
-
-PVRSRV_ERROR PhysHeapMMUPxSetup(PPVRSRV_DEVICE_NODE psDeviceNode)
-{
- PHYS_HEAP_TYPE eHeapType;
- PVRSRV_ERROR eError;
-
- eError = PhysHeapAcquireByDevPhysHeap(psDeviceNode->psDevConfig->eDefaultHeap,
- psDeviceNode, &psDeviceNode->psMMUPhysHeap);
- PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByDevPhysHeap", ErrorDeinit);
-
- eHeapType = PhysHeapGetType(psDeviceNode->psMMUPhysHeap);
-
- if (eHeapType == PHYS_HEAP_TYPE_UMA)
- {
- PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses OS System memory (UMA)", __func__));
-
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
- PVR_DPF((PVR_DBG_ERROR, "%s: Virtualisation Validation builds are currently only"
- " supported on systems with local memory (LMA).", __func__));
- eError = PVRSRV_ERROR_NOT_SUPPORTED;
- goto ErrorDeinit;
-#endif
- }
- else
- {
- PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses local memory managed by the driver (LMA)", __func__));
-
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
- eError = CreateGpuVirtValArenas(psDeviceNode);
- PVR_LOG_GOTO_IF_ERROR(eError, "CreateGpuVirtValArenas", ErrorDeinit);
-#endif
- }
-
- return PVRSRV_OK;
-ErrorDeinit:
- return eError;
+ return (gasHeapProperties[ePhysHeap].ePVRLayerAction == PVR_LAYER_HEAP_ACTION_INITIALISE);
}
-void PhysHeapMMUPxDeInit(PPVRSRV_DEVICE_NODE psDeviceNode)
+IMG_BOOL PhysHeapUserModeAlloc(PVRSRV_PHYS_HEAP ePhysHeap)
{
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
- /* Remove local LMA subarenas */
- DestroyGpuVirtValArenas(psDeviceNode);
-#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+ PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST);
- if (psDeviceNode->psMMUPhysHeap != NULL)
- {
- PhysHeapRelease(psDeviceNode->psMMUPhysHeap);
- psDeviceNode->psMMUPhysHeap = NULL;
- }
+ return gasHeapProperties[ePhysHeap].bUserModeAlloc;
}
#if defined(SUPPORT_GPUVIRT_VALIDATION)
return ui32PageShift;
}
+
+PVRSRV_ERROR PhysHeapFreeMemCheck(PHYS_HEAP *psPhysHeap,
+ IMG_UINT64 ui64MinRequiredMem,
+ IMG_UINT64 *pui64FreeMem)
+{
+ IMG_UINT64 ui64TotalSize;
+ IMG_UINT64 ui64FreeSize;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psPhysHeap != NULL, "psPhysHeap");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(pui64FreeMem != NULL, "pui64FreeMem");
+
+ psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData,
+ &ui64TotalSize,
+ &ui64FreeSize);
+
+ *pui64FreeMem = ui64FreeSize;
+ if (ui64MinRequiredMem >= *pui64FreeMem)
+ {
+ eError = PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY;
+ }
+
+ return eError;
+}
#include "physmem_osmem.h"
#if defined(DEBUG)
-static IMG_UINT32 gPMRAllocFail;
+static IMG_UINT32 PMRAllocFail;
#if defined(__linux__)
#include <linux/moduleparam.h>
-module_param(gPMRAllocFail, uint, 0644);
-MODULE_PARM_DESC(gPMRAllocFail, "When number of PMR allocs reaches "
+module_param(PMRAllocFail, uint, 0644);
+MODULE_PARM_DESC(PMRAllocFail, "When number of PMR allocs reaches "
"this value, it will fail (default value is 0 which "
"means that alloc function will behave normally).");
#endif /* defined(__linux__) */
const IMG_CHAR *pszSymbolicAddress,
IMG_HANDLE *phHandlePtr,
#endif
+ IMG_PID uiPid,
IMG_HANDLE hMemHandle,
IMG_DEV_PHYADDR *psDevPhysAddr)
{
PG_HANDLE *psMemHandle;
IMG_UINT64 uiMask;
IMG_DEV_PHYADDR sDevPhysAddr_int;
- IMG_PID uiPid = 0;
psMemHandle = hMemHandle;
-#if defined(PVRSRV_ENABLE_PROCESS_STATS)
- uiPid = psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ?
- PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM();
-#endif
-
/* Allocate the pages */
eError = PhysHeapPagesAlloc(psDevNode->psMMUPhysHeap,
TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize),
IMG_UINT32 ui32NumVirtChunks,
PVRSRV_MEMALLOCFLAGS_T uiFlags,
IMG_UINT32 *puiLog2AllocPageSize,
- IMG_DEVMEM_SIZE_T *puiSize,
- PMR_SIZE_T *puiChunkSize)
+ IMG_DEVMEM_SIZE_T *puiSize)
{
IMG_UINT32 uiLog2AllocPageSize = *puiLog2AllocPageSize;
IMG_DEVMEM_SIZE_T uiSize = *puiSize;
- PMR_SIZE_T uiChunkSize = *puiChunkSize;
/* Sparse if we have different number of virtual and physical chunks plus
* in general all allocations with more than one virtual chunk */
IMG_BOOL bIsSparse = (ui32NumVirtChunks != ui32NumPhysChunks ||
ui32NumVirtChunks > 1) ? IMG_TRUE : IMG_FALSE;
+ if (PVRSRV_CHECK_ON_DEMAND(uiFlags) &&
+ PVRSRV_CHECK_PHYS_ALLOC_NOW(uiFlags))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid to specify both ON_DEMAND and NOW phys alloc flags: 0x%" IMG_UINT64_FMTSPECX, __func__, uiFlags));
+ return PVRSRV_ERROR_INVALID_FLAGS;
+ }
+
if (ui32NumPhysChunks == 0 && ui32NumVirtChunks == 0)
{
PVR_DPF((PVR_DBG_ERROR,
}
/* Protect against ridiculous page sizes */
- if (uiLog2AllocPageSize > RGX_HEAP_2MB_PAGE_SHIFT)
+ if (uiLog2AllocPageSize > RGX_HEAP_2MB_PAGE_SHIFT || uiLog2AllocPageSize < RGX_HEAP_4KB_PAGE_SHIFT)
{
- PVR_DPF((PVR_DBG_ERROR, "Page size is too big: 2^%u.", uiLog2AllocPageSize));
+ PVR_DPF((PVR_DBG_ERROR, "Page size is out of range: 2^%u.", uiLog2AllocPageSize));
return PVRSRV_ERROR_INVALID_PARAMS;
}
/* Range check of the alloc size */
- if (uiSize >= 0x1000000000ULL)
+ if (!PMRValidateSize(uiSize))
{
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Cancelling allocation request of over 64 GB. "
- "This is likely a bug."
- , __func__));
- return PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_LOG_VA(PVR_DBG_ERROR,
+ "PMR size exceeds limit #Chunks: %u ChunkSz %"IMG_UINT64_FMTSPECX"",
+ ui32NumVirtChunks,
+ (IMG_UINT64) 1ULL << uiLog2AllocPageSize);
+ return PVRSRV_ERROR_PMR_TOO_LARGE;
}
/* Fail if requesting coherency on one side but uncached on the other */
return PVRSRV_ERROR_INVALID_PARAMS;
}
- /* ... chunk size must be a equal to page size ... */
- if (uiChunkSize != (1 << uiLog2AllocPageSize))
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Invalid chunk size for sparse allocation. Requested "
- "%#" IMG_UINT64_FMTSPECx ", must be same as page size %#x.",
- __func__, uiChunkSize, 1 << uiLog2AllocPageSize));
-
- return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
- }
-
- if (ui32NumVirtChunks * uiChunkSize != uiSize)
+ if (ui32NumVirtChunks * (1 << uiLog2AllocPageSize) != uiSize)
{
PVR_DPF((PVR_DBG_ERROR,
"%s: Total alloc size (%#" IMG_UINT64_FMTSPECx ") "
"is not equal to virtual chunks * chunk size "
"(%#" IMG_UINT64_FMTSPECx ")",
- __func__, uiSize, ui32NumVirtChunks * uiChunkSize));
+ __func__, uiSize, (IMG_UINT64) (ui32NumVirtChunks * (1ULL << uiLog2AllocPageSize))));
return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
}
/* Same for total size */
uiSize = PVR_ALIGN(uiSize, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
- *puiChunkSize = uiSize;
}
if ((uiSize & ((1ULL << uiLog2AllocPageSize) - 1)) != 0)
case PVRSRV_PHYS_HEAP_FW_PREMAP7:
{
/* keep heap (with check) */
- PVR_RETURN_IF_INVALID_PARAM(PVRSRV_VZ_MODE_IS(HOST));
+ PVR_RETURN_IF_INVALID_PARAM(!PVRSRV_VZ_MODE_IS(GUEST));
break;
}
case PVRSRV_PHYS_HEAP_LAST:
PhysmemNewRamBackedPMR_direct(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDevNode,
IMG_DEVMEM_SIZE_T uiSize,
- PMR_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags)
{
PVRSRV_ERROR eError;
+ IMG_UINT32 i;
PVRSRV_PHYS_HEAP ePhysHeapIdx;
PVRSRV_MEMALLOCFLAGS_T uiPMRFlags = uiFlags;
- PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize =
- psDevNode->psDevConfig->pfnCheckMemAllocSize;
-
+ uiPid = (psConnection != NULL) ? OSGetCurrentClientProcessIDKM() : uiPid;
+
+ /* This is where we would expect to validate the uiAnnotationLength parameter
+ (to confirm it is sufficient to store the string in pszAnnotation plus a
+ terminating NULL). However, we do not make reference to this value when
+ we copy the string in PMRCreatePMR() - instead there we use strlcpy()
+ to copy at most chars and ensure whatever is copied is null-terminated.
+ The parameter is only used by the generated bridge code.
+ */
PVR_UNREFERENCED_PARAMETER(uiAnnotationLength);
eError = _ValidateParams(ui32NumPhysChunks,
ui32NumVirtChunks,
uiFlags,
&uiLog2AllocPageSize,
- &uiSize,
- &uiChunkSize);
+ &uiSize);
PVR_RETURN_IF_ERROR(eError);
+#if defined(PDUMP)
+ eError = PDumpValidateUMFlags(ui32PDumpFlags);
+ PVR_RETURN_IF_ERROR(eError);
+#endif
+
+ for (i = 0; i < ui32NumPhysChunks; i++)
+ {
+ PVR_LOG_RETURN_IF_FALSE(pui32MappingTable[i] < ui32NumVirtChunks,
+ "Mapping table value exceeds ui32NumVirtChunks",
+ PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
eError = _DevPhysHeapFromFlags(uiFlags, &ePhysHeapIdx);
PVR_RETURN_IF_ERROR(eError);
return PVRSRV_ERROR_INVALID_HEAP;
}
- /* Apply memory budgeting policy */
- if (pfnCheckMemAllocSize)
- {
- IMG_UINT64 uiMemSize = (IMG_UINT64)uiChunkSize * ui32NumPhysChunks;
-
- eError = pfnCheckMemAllocSize(psDevNode->psDevConfig->hSysData, uiMemSize);
- PVR_RETURN_IF_ERROR(eError);
- }
-
#if defined(DEBUG)
- if (gPMRAllocFail > 0)
+ if (PMRAllocFail > 0)
{
static IMG_UINT32 ui32AllocCount = 1;
- if (ui32AllocCount < gPMRAllocFail)
+ if (ui32AllocCount < PMRAllocFail)
{
ui32AllocCount++;
}
* should be attributed to the driver (PID 1) rather than to the
* process those allocations are made under. Same applies to the memory
* allocated for the Firmware. */
- if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ||
+ if (psDevNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE ||
PVRSRV_CHECK_FW_MAIN(uiFlags))
{
uiPid = PVR_SYS_ALLOC_PID;
eError = PhysHeapCreatePMR(psDevNode->apsPhysHeap[ePhysHeapIdx],
psConnection,
uiSize,
- uiChunkSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
pui32MappingTable,
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
if (eError != PVRSRV_OK)
{
- PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT,
+ PVRSRVStatsUpdateOOMStat(psConnection,
+ psDevNode,
+ PVRSRV_DEVICE_STAT_TYPE_OOM_PHYSMEM_COUNT,
OSGetCurrentClientProcessIDKM());
}
#endif
PhysmemNewRamBackedPMR(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDevNode,
IMG_DEVMEM_SIZE_T uiSize,
- PMR_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags)
{
PVRSRV_PHYS_HEAP ePhysHeap = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags);
+ PVRSRV_ERROR eError;
PVR_LOG_RETURN_IF_INVALID_PARAM(uiAnnotationLength != 0, "uiAnnotationLength");
PVR_LOG_RETURN_IF_INVALID_PARAM(pszAnnotation != NULL, "pszAnnotation");
return PVRSRV_ERROR_INVALID_PARAMS;
}
- return PhysmemNewRamBackedPMR_direct(psConnection,
- psDevNode,
- uiSize,
- uiChunkSize,
- ui32NumPhysChunks,
- ui32NumVirtChunks,
- pui32MappingTable,
- uiLog2AllocPageSize,
- uiFlags,
- uiAnnotationLength,
- pszAnnotation,
- uiPid,
- ppsPMRPtr,
- ui32PDumpFlags,
- puiPMRFlags);
-}
-
-PVRSRV_ERROR
-PhysmemNewRamBackedLockedPMR(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- IMG_DEVMEM_SIZE_T uiSize,
- PMR_SIZE_T uiChunkSize,
- IMG_UINT32 ui32NumPhysChunks,
- IMG_UINT32 ui32NumVirtChunks,
- IMG_UINT32 *pui32MappingTable,
- IMG_UINT32 uiLog2PageSize,
- PVRSRV_MEMALLOCFLAGS_T uiFlags,
- IMG_UINT32 uiAnnotationLength,
- const IMG_CHAR *pszAnnotation,
- IMG_PID uiPid,
- PMR **ppsPMRPtr,
- IMG_UINT32 ui32PDumpFlags,
- PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags)
-{
-
- PVRSRV_ERROR eError;
- eError = PhysmemNewRamBackedPMR(psConnection,
- psDevNode,
- uiSize,
- uiChunkSize,
- ui32NumPhysChunks,
- ui32NumVirtChunks,
- pui32MappingTable,
- uiLog2PageSize,
- uiFlags,
- uiAnnotationLength,
- pszAnnotation,
- uiPid,
- ppsPMRPtr,
- ui32PDumpFlags,
- puiPMRFlags);
-
+ eError = PhysmemNewRamBackedPMR_direct(psConnection,
+ psDevNode,
+ uiSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2AllocPageSize,
+ uiFlags,
+ uiAnnotationLength,
+ pszAnnotation,
+ uiPid,
+ ppsPMRPtr,
+ ui32PDumpFlags,
+ puiPMRFlags);
if (eError == PVRSRV_OK)
{
- eError = PMRLockSysPhysAddresses(*ppsPMRPtr);
+ /* Lock phys addresses if backing was allocated */
+ if (PVRSRV_CHECK_PHYS_ALLOC_NOW(uiFlags))
+ {
+ eError = PMRLockSysPhysAddresses(*ppsPMRPtr);
+ }
}
return eError;
}
-PVRSRV_ERROR
-PVRSRVGetMaxPhysHeapCountKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 *pui32PhysHeapCount)
-{
- PVR_UNREFERENCED_PARAMETER(psConnection);
- PVRSRVGetDevicePhysHeapCount(psDevNode, pui32PhysHeapCount);
- return PVRSRV_OK;
-}
-
PVRSRV_ERROR
PVRSRVGetDefaultPhysicalHeapKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- PVRSRV_PHYS_HEAP *peHeap)
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PVRSRV_PHYS_HEAP *peHeap)
{
PVR_UNREFERENCED_PARAMETER(psConnection);
*peHeap = psDevNode->psDevConfig->eDefaultHeap;
return PVRSRV_OK;
}
-PVRSRV_ERROR
-PVRSRVGetHeapPhysMemUsageKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 ui32PhysHeapCount,
- PHYS_HEAP_MEM_STATS *apPhysHeapMemStats)
-{
- PHYS_HEAP *psPhysHeap;
- IMG_UINT uiHeapIndex, i = 0;
-
- PVR_UNREFERENCED_PARAMETER(psConnection);
-
- if (ui32PhysHeapCount != psDevNode->ui32UserAllocHeapCount)
- {
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
-
- for (uiHeapIndex = PVRSRV_PHYS_HEAP_DEFAULT+1; (uiHeapIndex < PVRSRV_PHYS_HEAP_LAST); uiHeapIndex++)
- {
- psPhysHeap = psDevNode->apsPhysHeap[uiHeapIndex];
-
- if (psPhysHeap && PhysHeapUserModeAlloc(uiHeapIndex))
- {
- PVR_ASSERT(i < ui32PhysHeapCount);
-
- PhysheapGetPhysMemUsage(psPhysHeap, &apPhysHeapMemStats[i].ui64TotalSize,
- &apPhysHeapMemStats[i].ui64FreeSize);
-
- i++;
- }
- }
- return PVRSRV_OK;
-}
-
-PVRSRV_ERROR
-PVRSRVGetHeapPhysMemUsagePkdKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 ui32PhysHeapCount,
- PHYS_HEAP_MEM_STATS_PKD *apPhysHeapMemStats)
-{
- PHYS_HEAP *psPhysHeap;
- IMG_UINT uiHeapIndex, i = 0;
-
- PVR_UNREFERENCED_PARAMETER(psConnection);
-
- if (ui32PhysHeapCount != psDevNode->ui32UserAllocHeapCount)
- {
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
-
- for (uiHeapIndex = PVRSRV_PHYS_HEAP_DEFAULT+1; (uiHeapIndex < PVRSRV_PHYS_HEAP_LAST); uiHeapIndex++)
- {
- psPhysHeap = psDevNode->apsPhysHeap[uiHeapIndex];
-
- if (psPhysHeap && PhysHeapUserModeAlloc(uiHeapIndex))
- {
- PVR_ASSERT(i < ui32PhysHeapCount);
-
- PhysheapGetPhysMemUsage(psPhysHeap, &apPhysHeapMemStats[i].ui64TotalSize,
- &apPhysHeapMemStats[i].ui64FreeSize);
-
- i++;
- }
- }
- return PVRSRV_OK;
-}
-
PVRSRV_ERROR
PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 ui32PhysHeapCount,
- PVRSRV_PHYS_HEAP *paePhysHeapID,
- PHYS_HEAP_MEM_STATS *paPhysHeapMemStats)
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32PhysHeapCount,
+ PVRSRV_PHYS_HEAP *paePhysHeapID,
+ PHYS_HEAP_MEM_STATS *paPhysHeapMemStats)
{
PVR_UNREFERENCED_PARAMETER(psConnection);
return PhysHeapGetMemInfo(psDevNode,
- ui32PhysHeapCount,
- paePhysHeapID,
- paPhysHeapMemStats);
-}
-
-PVRSRV_ERROR
-PVRSRVPhysHeapGetMemInfoPkdKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 ui32PhysHeapCount,
- PVRSRV_PHYS_HEAP *paePhysHeapID,
- PHYS_HEAP_MEM_STATS_PKD *paPhysHeapMemStats)
-{
- PVR_UNREFERENCED_PARAMETER(psConnection);
- return PhysHeapGetMemInfoPkd(psDevNode,
- ui32PhysHeapCount,
- paePhysHeapID,
- paPhysHeapMemStats);
+ ui32PhysHeapCount,
+ paePhysHeapID,
+ paPhysHeapMemStats);
}
/* 'Wrapper' function to call PMRImportPMR(), which first checks the PMR is
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Implementation of PMR Mapping History for OS managed memory
+@Codingstyle IMG
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This module allows for an OS independent method of capture of
+ mapping history for OS memory.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_types.h"
+#include "img_defs.h"
+#include <powervr/mem_types.h>
+#include "di_common.h"
+#include "di_server.h"
+#include "pvr_notifier.h"
+#include "physmem_cpumap_history.h"
+#include "osfunc.h"
+#include "allocmem.h"
+
+#define MAPPING_HISTORY_CB_NUM_ENTRIES 10000
+#if defined(DEBUG)
+#define MAX_MAPPING_ANNOT_STR PVR_ANNOTATION_MAX_LEN
+#else
+#define MAX_MAPPING_ANNOT_STR 32
+#endif
+
+typedef struct _MAPPING_RECORD_ MAPPING_RECORD; /* Forward declaration */
+typedef void (*PFN_MAPPING_RECORD_STRING)(MAPPING_RECORD *psRecord,
+ IMG_CHAR (*pszBuffer)[PVR_MAX_DEBUG_MESSAGE_LEN]);
+
+typedef struct _MAP_DATA_
+{
+ IMG_CHAR aszAnnotation[MAX_MAPPING_ANNOT_STR];
+ IMG_PID uiPID;
+ IMG_CPU_VIRTADDR pvAddress;
+ IMG_CPU_PHYADDR sCpuPhyAddr;
+ IMG_UINT32 ui32CPUCacheFlags;
+ size_t uiMapOffset; /* Mapping offset used when we don't map the whole PMR */
+ IMG_UINT32 ui32PageCount; /* # pages mapped */
+} MAP_DATA;
+
+typedef struct _UNMAP_DATA_
+{
+ IMG_CPU_VIRTADDR pvAddress;
+ IMG_CPU_PHYADDR sCpuPhyAddr;
+ IMG_UINT32 ui32CPUCacheFlags;
+ IMG_UINT32 ui32PageCount;
+} UNMAP_DATA;
+
+struct _MAPPING_RECORD_
+{
+ enum MAPPING_OP
+ {
+ UNSET = 0,
+ MAP,
+ UNMAP,
+ } etype;
+ union
+ {
+ MAP_DATA sMapData;
+ UNMAP_DATA sUnmapData;
+ } u;
+ PFN_MAPPING_RECORD_STRING pfnRecordString;
+};
+
+typedef struct _RECORDS_
+{
+ /* CB of mapping records that will be overwritten by newer entries */
+ MAPPING_RECORD *pasMapRecordsCB;
+ /* Current head of CB, used to get next slot, newest record +1 */
+ IMG_UINT32 ui32Head;
+ /* Current tail of CB, oldest record */
+ IMG_UINT32 ui32Tail;
+ /* Have we overwritten any records */
+ IMG_BOOL bOverwrite;
+} RECORDS;
+
+typedef struct _PHYSMEM_CPUMAP_HISTORY_DATA_
+{
+ DI_ENTRY *psDIEntry;
+ RECORDS sRecords;
+ POS_LOCK hLock;
+ IMG_HANDLE hDbgNotifier;
+
+} PHYSMEM_CPUMAP_HISTORY_DATA;
+
+typedef struct _MAPPING_HISTORY_ITERATOR_
+{
+ IMG_UINT32 ui32Start;
+ IMG_UINT32 ui32Finish;
+ IMG_UINT32 ui32Current;
+} MAPPING_HISTORY_ITERATOR;
+
+static PHYSMEM_CPUMAP_HISTORY_DATA gsMappingHistoryData;
+
+static PVRSRV_ERROR CreateMappingRecords(void)
+{
+ gsMappingHistoryData.sRecords.pasMapRecordsCB =
+ OSAllocMem(sizeof(MAPPING_RECORD) * MAPPING_HISTORY_CB_NUM_ENTRIES);
+ PVR_RETURN_IF_NOMEM(gsMappingHistoryData.sRecords.pasMapRecordsCB);
+
+ return PVRSRV_OK;
+}
+
+static void InitMappingRecordCB(void)
+{
+ gsMappingHistoryData.sRecords.ui32Head = 0;
+ gsMappingHistoryData.sRecords.ui32Tail = 0;
+ gsMappingHistoryData.sRecords.bOverwrite = IMG_FALSE;
+}
+
+static void DestroyMappingRecords(void)
+{
+ OSFreeMem(gsMappingHistoryData.sRecords.pasMapRecordsCB);
+}
+
+static void MappingHistoryLock(void)
+{
+ if (gsMappingHistoryData.hLock)
+ {
+ OSLockAcquire(gsMappingHistoryData.hLock);
+ }
+}
+
+static void MappingHistoryUnlock(void)
+{
+ if (gsMappingHistoryData.hLock)
+ {
+ OSLockRelease(gsMappingHistoryData.hLock);
+ }
+}
+
+static MAPPING_HISTORY_ITERATOR CreateMappingHistoryIterator(void)
+{
+ MAPPING_HISTORY_ITERATOR sIter;
+
+ sIter.ui32Start = gsMappingHistoryData.sRecords.ui32Tail;
+ sIter.ui32Current = gsMappingHistoryData.sRecords.ui32Tail;
+ sIter.ui32Finish = gsMappingHistoryData.sRecords.ui32Head;
+
+ return sIter;
+}
+
+static IMG_BOOL MappingHistoryHasRecords(void)
+{
+ if ((gsMappingHistoryData.sRecords.ui32Head ==
+ gsMappingHistoryData.sRecords.ui32Tail) &&
+ !gsMappingHistoryData.sRecords.bOverwrite)
+ {
+ return IMG_FALSE;
+ }
+
+ return IMG_TRUE;
+}
+
+static IMG_BOOL MappingHistoryIteratorNext(MAPPING_HISTORY_ITERATOR *psIter)
+{
+ psIter->ui32Current = (psIter->ui32Current + 1) % MAPPING_HISTORY_CB_NUM_ENTRIES;
+
+ if (psIter->ui32Current == psIter->ui32Finish)
+ {
+ return IMG_FALSE;
+ }
+
+ return IMG_TRUE;
+}
+
+#define MAPPING_ITER_TO_PTR(psIter) (&(gsMappingHistoryData.sRecords.pasMapRecordsCB[psIter.ui32Current]))
+
+static MAPPING_RECORD* AcquireMappingHistoryCBSlot(void)
+{
+ MAPPING_RECORD *psSlot;
+
+ psSlot = &gsMappingHistoryData.sRecords.pasMapRecordsCB[gsMappingHistoryData.sRecords.ui32Head];
+
+ gsMappingHistoryData.sRecords.ui32Head =
+ (gsMappingHistoryData.sRecords.ui32Head + 1)
+ % MAPPING_HISTORY_CB_NUM_ENTRIES;
+
+ if (!gsMappingHistoryData.sRecords.bOverwrite)
+ {
+ if (gsMappingHistoryData.sRecords.ui32Head == gsMappingHistoryData.sRecords.ui32Tail)
+ {
+ gsMappingHistoryData.sRecords.bOverwrite = IMG_TRUE;
+ }
+ }
+ else
+ {
+ gsMappingHistoryData.sRecords.ui32Tail = gsMappingHistoryData.sRecords.ui32Head;
+ }
+
+ return psSlot;
+}
+
+static IMG_CHAR* CacheFlagsToStr(IMG_UINT32 ui32CPUCacheFlags)
+{
+ switch (ui32CPUCacheFlags)
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ return "UC";
+
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC:
+ return "WC";
+
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+ return "CA";
+
+ default:
+ return "UN";
+ }
+}
+
+static void MapRecordString(MAPPING_RECORD *psRecord,
+ IMG_CHAR (*pszBuffer)[PVR_MAX_DEBUG_MESSAGE_LEN])
+{
+ OSSNPrintf(*pszBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+ "%-32s %-8u 0x%p "CPUPHYADDR_UINT_FMTSPEC" %-5u (%-2s) %-12lu %-8u",
+ psRecord->u.sMapData.aszAnnotation,
+ psRecord->u.sMapData.uiPID,
+ psRecord->u.sMapData.pvAddress,
+ CPUPHYADDR_FMTARG(psRecord->u.sMapData.sCpuPhyAddr.uiAddr),
+ psRecord->u.sMapData.ui32CPUCacheFlags,
+ CacheFlagsToStr(psRecord->u.sMapData.ui32CPUCacheFlags),
+ (unsigned long) psRecord->u.sMapData.uiMapOffset,
+ psRecord->u.sMapData.ui32PageCount);
+}
+
+static void UnMapRecordString(MAPPING_RECORD *psRecord,
+ IMG_CHAR (*pszBuffer)[PVR_MAX_DEBUG_MESSAGE_LEN])
+{
+ OSSNPrintf(*pszBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+ "%-41s 0x%p "CPUPHYADDR_UINT_FMTSPEC" %-5u (%-2s) %-12s %-8u",
+ "UnMap", /* PADDING */
+ psRecord->u.sUnmapData.pvAddress,
+ CPUPHYADDR_FMTARG(psRecord->u.sUnmapData.sCpuPhyAddr.uiAddr),
+ psRecord->u.sUnmapData.ui32CPUCacheFlags,
+ CacheFlagsToStr(psRecord->u.sUnmapData.ui32CPUCacheFlags),
+ "", /* PADDING */
+ psRecord->u.sUnmapData.ui32PageCount);
+}
+
+static void MappingHistoryGetHeaderString(IMG_CHAR (*pszBuffer)[PVR_MAX_DEBUG_MESSAGE_LEN])
+{
+ OSSNPrintf(*pszBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+ "%-32s %-8s %-18s %-18s %-9s %-8s %-8s",
+ "PMRAnnotation",
+ "PID",
+ "CpuVirtAddr",
+ "CpuPhyAddr",
+ "CacheFlags",
+ "PMRMapOffset",
+ "ui32PageCount");
+}
+
+static void MappingHistoryPrintAll(OSDI_IMPL_ENTRY *psEntry)
+{
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+ MAPPING_HISTORY_ITERATOR sIter = CreateMappingHistoryIterator();
+
+ if (!MappingHistoryHasRecords())
+ {
+ DIPrintf(psEntry, "No Records...\n");
+ return;
+ }
+
+ MappingHistoryGetHeaderString(&szBuffer);
+ DIPrintf(psEntry, "%s\n", szBuffer);
+
+ do
+ {
+ MAPPING_RECORD *psRecord = MAPPING_ITER_TO_PTR(sIter);
+ psRecord->pfnRecordString(psRecord, &szBuffer);
+ DIPrintf(psEntry, "%s\n", szBuffer);
+
+ }
+ while (MappingHistoryIteratorNext(&sIter));
+}
+
+static int MappingHistoryPrintAllWrapper(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ MappingHistoryLock();
+ MappingHistoryPrintAll(psEntry);
+ MappingHistoryUnlock();
+
+ return 0;
+}
+
+static void MappingHistoryDebugDump(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MAX))
+ {
+ MAPPING_HISTORY_ITERATOR sIter = CreateMappingHistoryIterator();
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+
+ PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle);
+
+ PVR_DUMPDEBUG_LOG("------[ Physmem CPU Map History ]------");
+
+ if (!MappingHistoryHasRecords())
+ {
+ PVR_DUMPDEBUG_LOG("No Records...");
+ return;
+ }
+
+ MappingHistoryGetHeaderString(&szBuffer);
+ PVR_DUMPDEBUG_LOG("%s", szBuffer);
+
+ do
+ {
+ MAPPING_RECORD *psRecord = MAPPING_ITER_TO_PTR(sIter);
+ psRecord->pfnRecordString(psRecord, &szBuffer);
+ PVR_DUMPDEBUG_LOG("%s", szBuffer);
+ }
+ while (MappingHistoryIteratorNext(&sIter));
+ }
+}
+
+PVRSRV_ERROR CPUMappingHistoryInit(void)
+{
+ PVRSRV_ERROR eError;
+ DI_ITERATOR_CB sIterator =
+ { .pfnShow = MappingHistoryPrintAllWrapper };
+
+ eError = OSLockCreate(&gsMappingHistoryData.hLock);
+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", error);
+
+ eError = CreateMappingRecords();
+ PVR_LOG_GOTO_IF_ERROR(eError, "CreateMappingRecords", error);
+
+ InitMappingRecordCB();
+
+ eError = DICreateEntry("cpumap_history", NULL, &sIterator, NULL,
+ DI_ENTRY_TYPE_GENERIC,
+ &gsMappingHistoryData.psDIEntry);
+ PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", error);
+
+ eError = PVRSRVRegisterDriverDbgRequestNotify(&gsMappingHistoryData.hDbgNotifier,
+ MappingHistoryDebugDump,
+ DEBUG_REQUEST_SRV,
+ NULL);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterDriverDbgRequestNotify", error);
+
+ return PVRSRV_OK;
+
+error:
+ CPUMappingHistoryDeInit();
+ return eError;
+}
+
+void CPUMappingHistoryDeInit(void)
+{
+ if (gsMappingHistoryData.hDbgNotifier != NULL)
+ {
+ PVRSRVUnregisterDriverDbgRequestNotify(gsMappingHistoryData.hDbgNotifier);
+ gsMappingHistoryData.hDbgNotifier = NULL;
+ }
+
+ if (gsMappingHistoryData.psDIEntry != NULL)
+ {
+ DIDestroyEntry(gsMappingHistoryData.psDIEntry);
+ gsMappingHistoryData.psDIEntry = NULL;
+ }
+
+ if (gsMappingHistoryData.sRecords.pasMapRecordsCB)
+ {
+ DestroyMappingRecords();
+ gsMappingHistoryData.sRecords.pasMapRecordsCB = NULL;
+ }
+
+ if (gsMappingHistoryData.hLock != NULL)
+ {
+ OSLockDestroy(gsMappingHistoryData.hLock);
+ gsMappingHistoryData.hLock = NULL;
+ }
+}
+
+void InsertMappingRecord(const IMG_CHAR *pszAnnotation,
+ IMG_PID uiPID,
+ IMG_CPU_VIRTADDR pvAddress,
+ IMG_CPU_PHYADDR sCpuPhyAddr,
+ IMG_UINT32 ui32CPUCacheFlags,
+ size_t uiMapOffset,
+ IMG_UINT32 ui32PageCount)
+{
+ MAPPING_RECORD *psRecord;
+
+ psRecord = AcquireMappingHistoryCBSlot();
+ psRecord->etype = MAP;
+ psRecord->pfnRecordString = MapRecordString;
+
+ OSStringLCopy(psRecord->u.sMapData.aszAnnotation, pszAnnotation, MAX_MAPPING_ANNOT_STR);
+ psRecord->u.sMapData.uiPID = uiPID;
+ psRecord->u.sMapData.pvAddress = pvAddress;
+ psRecord->u.sMapData.ui32CPUCacheFlags = ui32CPUCacheFlags;
+ psRecord->u.sMapData.sCpuPhyAddr = sCpuPhyAddr;
+ psRecord->u.sMapData.uiMapOffset = uiMapOffset;
+ psRecord->u.sMapData.ui32PageCount = ui32PageCount;
+}
+
+void InsertUnMappingRecord(IMG_CPU_VIRTADDR pvAddress,
+ IMG_CPU_PHYADDR sCpuPhyAddr,
+ IMG_UINT32 ui32CPUCacheFlags,
+ IMG_UINT32 ui32PageCount)
+{
+ MAPPING_RECORD *psRecord;
+
+ psRecord = AcquireMappingHistoryCBSlot();
+ psRecord->etype = UNMAP;
+ psRecord->pfnRecordString = UnMapRecordString;
+
+ psRecord->u.sUnmapData.pvAddress = pvAddress;
+ psRecord->u.sUnmapData.ui32CPUCacheFlags = ui32CPUCacheFlags;
+ psRecord->u.sUnmapData.sCpuPhyAddr = sCpuPhyAddr;
+ psRecord->u.sUnmapData.ui32PageCount = ui32PageCount;
+}
static PHYS_HEAP_FUNCTIONS gsHostMemDevPhysHeapFuncs =
{
/* pfnCpuPAddrToDevPAddr */
- HostMemCpuPAddrToDevPAddr,
+ .pfnCpuPAddrToDevPAddr = HostMemCpuPAddrToDevPAddr,
/* pfnDevPAddrToCpuPAddr */
- HostMemDevPAddrToCpuPAddr,
+ .pfnDevPAddrToCpuPAddr = HostMemDevPAddrToCpuPAddr,
};
static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[];
/* early save return pointer to aid clean-up */
*ppsDeviceNode = psDeviceNode;
+ psDeviceNode->sDevId.ui32InternalID = PVRSRV_HOST_DEVICE_ID;
psDeviceNode->psDevConfig = psDevConfig;
- psDeviceNode->papsRegisteredPhysHeaps =
- OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) *
- psDevConfig->ui32PhysHeapCount);
- PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->papsRegisteredPhysHeaps, "OSAllocZMem");
+ psDeviceNode->psPhysHeapList = NULL;
+
+ eError = OSLockCreate(&psDeviceNode->hPhysHeapLock);
+ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate");
eError = PhysHeapCreateHeapFromConfig(psDeviceNode,
&psDevConfig->pasPhysHeaps[0],
- &psDeviceNode->papsRegisteredPhysHeaps[0]);
+ NULL);
PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig");
- psDeviceNode->ui32RegisteredPhysHeaps = 1;
/* Only CPU local heap is valid on host-mem DevNode, so enable minimal callbacks */
- eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_CPU_LOCAL,
- psDeviceNode,
- &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]);
+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_CPU_LOCAL,
+ psDeviceNode,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]);
PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquire");
return PVRSRV_OK;
{
return;
}
-
- if (psDeviceNode->papsRegisteredPhysHeaps)
+ else
{
if (psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL])
{
PhysHeapRelease(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]);
}
- if (psDeviceNode->papsRegisteredPhysHeaps[0])
- {
- /* clean-up function as well is aware of only one heap */
- PVR_ASSERT(psDeviceNode->ui32RegisteredPhysHeaps == 1);
- PhysHeapDestroy(psDeviceNode->papsRegisteredPhysHeaps[0]);
- }
-
- OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+ PhysHeapDestroyDeviceHeaps(psDeviceNode);
}
+
OSFreeMem(psDeviceNode);
}
#include "integrity_memobject.h"
#endif
+/* Assert that the conversions between the RA base type and the device
+ * physical address are safe.
+ */
+static_assert(sizeof(IMG_DEV_PHYADDR) == sizeof(RA_BASE_T),
+ "Size IMG_DEV_PHYADDR != RA_BASE_T");
+
/* Since 0x0 is a valid DevPAddr, we rely on max 64-bit value to be an invalid
* page address */
#define INVALID_PAGE_ADDR ~((IMG_UINT64)0x0)
+#define ZERO_PAGE_VALUE 0
+
+typedef struct _PMR_KERNEL_MAP_HANDLE_ {
+ void *vma;
+ void *pvKernelAddress;
+ /* uiSize has 2 uses:
+ * In Physically contiguous case it is used to track size of the mapping
+ * for free.
+ * In Physically sparse case it is used to determine free path to use, single page
+ * sparse mapping or multi page
+ */
+ size_t uiSize;
+} PMR_KERNEL_MAPPING;
typedef struct _PMR_LMALLOCARRAY_DATA_ {
IMG_PID uiPid;
- IMG_INT32 iNumPagesAllocated;
+
/*
- * uiTotalNumPages:
- * Total number of pages supported by this PMR.
- * (Fixed as of now due the fixed Page table array size)
+ * N.B Chunks referenced in this struct commonly are
+ * to OS page sized. But in reality it is dependent on
+ * the uiLog2ChunkSize.
+ * Chunks will always be one 1 << uiLog2ChunkSize in size.
+ * */
+
+ /*
+ * The number of chunks currently allocated in the PMR.
*/
- IMG_UINT32 uiTotalNumPages;
- IMG_UINT32 uiPagesToAlloc;
+ IMG_INT32 iNumChunksAllocated;
- IMG_UINT32 uiLog2AllocSize;
- IMG_UINT32 uiContigAllocSize;
- IMG_DEV_PHYADDR *pasDevPAddr;
+ /*
+ * Total number of (Virtual) chunks supported by this PMR.
+ */
+ IMG_UINT32 uiTotalNumChunks;
+
+ /* The number of chunks to next be allocated for the PMR.
+ * This will initially be the number allocated at first alloc
+ * but may be changed in later calls to change sparse.
+ * It represents the number of chunks to next be allocated.
+ * This is used to store this value because we have the ability to
+ * defer allocation.
+ */
+ IMG_UINT32 uiChunksToAlloc;
+
+ /*
+ * Log2 representation of the chunksize.
+ */
+ IMG_UINT32 uiLog2ChunkSize;
+
+ IMG_BOOL bIsSparse; /* Is the PMR sparse */
+ IMG_BOOL bPhysContig; /* Is the alloc Physically contiguous */
IMG_BOOL bZeroOnAlloc;
IMG_BOOL bPoisonOnAlloc;
direct-bridge originating calls
*/
CONNECTION_DATA *psConnection;
+
+ RA_BASE_ARRAY_T aBaseArray; /* Array of RA Bases */
+
} PMR_LMALLOCARRAY_DATA;
#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__)
#endif
typedef struct PHYSMEM_LMA_DATA_TAG {
- RA_ARENA *psRA;
-
- IMG_CPU_PHYADDR sStartAddr;
- IMG_DEV_PHYADDR sCardBase;
- IMG_UINT64 uiSize;
+ RA_ARENA *psRA;
+ IMG_CPU_PHYADDR sStartAddr;
+ IMG_DEV_PHYADDR sCardBase;
+ IMG_UINT64 uiSize;
} PHYSMEM_LMA_DATA;
/*
*pui64FreeSize = sRAUsageStats.ui64FreeArenaSize;
}
+#if !defined(SUPPORT_GPUVIRT_VALIDATION)
static PVRSRV_ERROR
PhysmemGetArenaLMA(PHYS_HEAP *psPhysHeap,
RA_ARENA **ppsArena)
return PVRSRV_OK;
}
+#endif
static PVRSRV_ERROR
-_CreateArenas(PHEAP_IMPL_DATA pvImplData, IMG_CHAR *pszLabel)
+_CreateArenas(PHEAP_IMPL_DATA pvImplData, IMG_CHAR *pszLabel, PHYS_HEAP_POLICY uiPolicy)
{
PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData;
+ IMG_UINT32 ui32RAPolicy =
+ ((uiPolicy & PHYS_HEAP_POLOCY_ALLOC_ALLOW_NONCONTIG_MASK) == PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG)
+ ? RA_POLICY_ALLOC_ALLOW_NONCONTIG : RA_POLICY_DEFAULT;
+
psLMAData->psRA = RA_Create_With_Span(pszLabel,
OSGetPageShift(),
psLMAData->sStartAddr.uiAddr,
psLMAData->sCardBase.uiAddr,
- psLMAData->uiSize);
+ psLMAData->uiSize,
+ ui32RAPolicy);
PVR_LOG_RETURN_IF_NOMEM(psLMAData->psRA, "RA_Create_With_Span");
return PVRSRV_OK;
/* Remove RAs and RA names for local card memory */
if (psLMAData->psRA)
{
- OSFreeMem(psLMAData->psRA);
+ RA_Delete(psLMAData->psRA);
psLMAData->psRA = NULL;
}
}
};
PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode,
- PHYS_HEAP_USAGE_FLAGS ui32Flags,
+ PVRSRV_PHYS_HEAP ePhysHeap,
PHYS_HEAP_ITERATOR **ppsIter)
{
PVRSRV_ERROR eError;
PVR_LOG_RETURN_IF_INVALID_PARAM(ppsIter != NULL, "ppsIter");
PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode");
- PVR_LOG_RETURN_IF_INVALID_PARAM(ui32Flags != 0, "ui32Flags");
- eError = PhysHeapAcquireByUsage(ui32Flags, psDevNode, &psPhysHeap);
- PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquireByUsage");
+ eError = PhysHeapAcquireByID(ePhysHeap, psDevNode, &psPhysHeap);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquireByID");
PVR_LOG_GOTO_IF_FALSE(PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA,
"PhysHeap must be of LMA type", release_heap);
RA_BASE_T uiCardAddr = 0;
RA_LENGTH_T uiActualSize;
PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Log2NumPages;
+
#if defined(DEBUG)
- static IMG_UINT32 ui32MaxLog2NumPages = 4; /* 16 pages => 64KB */
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
+ static IMG_UINT32 ui32MaxLog2NumPages = 7; /* 128 pages => 512KB */
+#else
+ static IMG_UINT32 ui32MaxLog2NumPages = 4; /* 16 pages => 64KB */
+#endif
#endif /* defined(DEBUG) */
- IMG_UINT32 ui32Log2NumPages = 0;
-
PVR_ASSERT(uiSize != 0);
ui32Log2NumPages = OSGetOrder(uiSize);
uiSize = (1 << ui32Log2NumPages) * OSGetPageSize();
&uiActualSize,
NULL); /* No private handle */
+ if (eError != PVRSRV_OK)
+ {
+ RA_USAGE_STATS sRAStats;
+ RA_Get_Usage_Stats(pArena, &sRAStats);
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "Failed to Allocate size = 0x"IMG_SIZE_FMTSPECX", align = 0x"
+ IMG_SIZE_FMTSPECX" Arena Free Space 0x%"IMG_UINT64_FMTSPECX,
+ uiSize, uiSize, sRAStats.ui64FreeArenaSize));
+ return eError;
+ }
+
PVR_ASSERT(uiSize == uiActualSize);
psMemHandle->u.ui64Handle = uiCardAddr;
psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr;
- if (PVRSRV_OK == eError)
- {
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
- PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
- uiSize,
- uiCardAddr,
- uiPid);
+ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+ uiSize,
+ uiCardAddr,
+ uiPid);
#else
+ {
IMG_CPU_PHYADDR sCpuPAddr;
sCpuPAddr.uiAddr = psDevPAddr->uiAddr;
PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
- NULL,
- sCpuPAddr,
- uiSize,
- NULL,
- uiPid
- DEBUG_MEMSTATS_VALUES);
+ NULL,
+ sCpuPAddr,
+ uiSize,
+ uiPid
+ DEBUG_MEMSTATS_VALUES);
+ }
#endif
#endif
#if defined(SUPPORT_GPUVIRT_VALIDATION)
- PVR_DPF((PVR_DBG_MESSAGE,
- "%s: (GPU Virtualisation) Allocated 0x" IMG_SIZE_FMTSPECX " at 0x%" IMG_UINT64_FMTSPECX ", Arena ID %u",
- __func__, uiSize, psDevPAddr->uiAddr, psMemHandle->uiOSid));
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: (GPU Virtualisation) Allocated 0x" IMG_SIZE_FMTSPECX " at 0x%"
+ IMG_UINT64_FMTSPECX ", Arena ID %u",
+ __func__, uiSize, psDevPAddr->uiAddr, psMemHandle->uiOSid));
#endif
#if defined(DEBUG)
- PVR_ASSERT((ui32Log2NumPages <= ui32MaxLog2NumPages));
- if (ui32Log2NumPages > ui32MaxLog2NumPages)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: ui32MaxLog2NumPages = %u, increasing to %u", __func__,
- ui32MaxLog2NumPages, ui32Log2NumPages ));
- ui32MaxLog2NumPages = ui32Log2NumPages;
- }
-#endif /* defined(DEBUG) */
- psMemHandle->uiOrder = ui32Log2NumPages;
+ PVR_ASSERT((ui32Log2NumPages <= ui32MaxLog2NumPages));
+ if (ui32Log2NumPages > ui32MaxLog2NumPages)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: ui32MaxLog2NumPages = %u, increasing to %u", __func__,
+ ui32MaxLog2NumPages, ui32Log2NumPages ));
+ ui32MaxLog2NumPages = ui32Log2NumPages;
}
+#endif /* defined(DEBUG) */
+ psMemHandle->uiOrder = ui32Log2NumPages;
return eError;
}
*pvPtr,
sCpuPAddr,
ui32NumPages * OSGetPageSize(),
- NULL,
OSGetCurrentClientProcessIDKM()
DEBUG_MEMSTATS_VALUES);
}
PVRSRV_ERROR
PhysmemCreateHeapLMA(PVRSRV_DEVICE_NODE *psDevNode,
- PHYS_HEAP_CONFIG *psConfig,
- IMG_CHAR *pszLabel,
- PHYS_HEAP **ppsPhysHeap)
+ PHYS_HEAP_POLICY uiPolicy,
+ PHYS_HEAP_CONFIG *psConfig,
+ IMG_CHAR *pszLabel,
+ PHYS_HEAP **ppsPhysHeap)
{
PHYSMEM_LMA_DATA *psLMAData;
PVRSRV_ERROR eError;
psLMAData->sCardBase = psConfig->sCardBase;
psLMAData->uiSize = psConfig->uiSize;
-
eError = PhysHeapCreate(psDevNode,
psConfig,
+ uiPolicy,
(PHEAP_IMPL_DATA)psLMAData,
&_sPHEAPImplFuncs,
ppsPhysHeap);
return eError;
}
- eError = _CreateArenas(psLMAData, pszLabel);
+ eError = _CreateArenas(psLMAData, pszLabel, uiPolicy);
PVR_LOG_RETURN_IF_ERROR(eError, "_CreateArenas");
return eError;
}
-static PVRSRV_ERROR _MapAlloc(PHYS_HEAP *psPhysHeap,
- IMG_DEV_PHYADDR *psDevPAddr,
- size_t uiSize,
- PMR_FLAGS_T ulFlags,
- void **pvPtr)
+static PVRSRV_ERROR _MapPhysicalContigAlloc(PHYS_HEAP *psPhysHeap,
+ RA_BASE_ARRAY_T paBaseArray,
+ size_t uiSize,
+ PMR_FLAGS_T ulFlags,
+ PMR_KERNEL_MAPPING *psMapping)
{
IMG_UINT32 ui32CPUCacheFlags;
- IMG_CPU_PHYADDR sCpuPAddr;
PVRSRV_ERROR eError;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ sDevPAddr.uiAddr = RA_BASE_STRIP_GHOST_BIT(*paBaseArray);
eError = DevmemCPUCacheMode(PhysHeapDeviceNode(psPhysHeap), ulFlags, &ui32CPUCacheFlags);
PVR_RETURN_IF_ERROR(eError);
- PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr);
+ PhysHeapDevPAddrToCpuPAddr(psPhysHeap,
+ 1,
+ &sCpuPAddr,
+ &sDevPAddr);
- *pvPtr = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags);
- PVR_RETURN_IF_NOMEM(*pvPtr);
+ psMapping->pvKernelAddress = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags);
+ PVR_LOG_RETURN_IF_FALSE(psMapping->pvKernelAddress,
+ "OSMapPhyToLin: out of VM Mem",
+ PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING);
+ psMapping->vma = NULL;
+ psMapping->uiSize = uiSize;
return PVRSRV_OK;
}
-static void _UnMapAlloc(size_t uiSize,
- void *pvPtr)
+static PVRSRV_ERROR _MapPhysicalSparseAlloc(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData,
+ RA_BASE_ARRAY_T paBaseArray,
+ size_t uiSize,
+ PMR_FLAGS_T ulFlags,
+ PMR_KERNEL_MAPPING *psMapping)
{
- OSUnMapPhysToLin(pvPtr, uiSize);
+ IMG_UINT32 uiChunkCount = uiSize >> psLMAllocArrayData->uiLog2ChunkSize;
+ IMG_CPU_PHYADDR uiPages[PMR_MAX_TRANSLATION_STACK_ALLOC], *puiPages;
+ PVRSRV_ERROR eError;
+ size_t uiPageShift = OSGetPageShift();
+ IMG_UINT32 uiOSPageCnt = psLMAllocArrayData->uiLog2ChunkSize - uiPageShift;
+
+ if ((uiChunkCount << uiOSPageCnt) > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ puiPages = OSAllocZMem(sizeof(IMG_CPU_PHYADDR) * (uiChunkCount << uiOSPageCnt));
+ PVR_RETURN_IF_NOMEM(puiPages);
+ }
+ else
+ {
+ puiPages = &uiPages[0];
+ }
+
+ if (uiOSPageCnt == 0)
+ {
+ IMG_UINT32 i;
+ PhysHeapDevPAddrToCpuPAddr(psLMAllocArrayData->psPhysHeap,
+ uiChunkCount,
+ puiPages,
+ (IMG_DEV_PHYADDR *)paBaseArray);
+
+ /* If the ghost bit is present then the addrs returned will be off by 1
+ * Strip the ghost bit to correct to real page aligned addresses.
+ * */
+ for (i = 0; i < uiChunkCount; i++)
+ {
+ puiPages[i].uiAddr = RA_BASE_STRIP_GHOST_BIT(puiPages[i].uiAddr);
+ }
+ }
+ else
+ {
+ IMG_UINT32 i = 0, j = 0, index = 0;
+ for (i = 0; i < uiChunkCount; i++)
+ {
+ IMG_UINT32 ui32OSPagesPerDeviceChunk = (1 << uiOSPageCnt);
+ IMG_DEV_PHYADDR uiDevAddr;
+ uiDevAddr.uiAddr = RA_BASE_STRIP_GHOST_BIT(paBaseArray[i]);
+ for (j = 0; j < ui32OSPagesPerDeviceChunk; j++)
+ {
+ uiDevAddr.uiAddr += (1ULL << uiPageShift);
+ PhysHeapDevPAddrToCpuPAddr(psLMAllocArrayData->psPhysHeap,
+ 1,
+ &puiPages[index],
+ &uiDevAddr);
+ index++;
+ }
+ }
+ }
+
+ eError = OSMapPhysArrayToLin(puiPages,
+ uiChunkCount,
+ &psMapping->pvKernelAddress,
+ &psMapping->vma);
+ if (eError == PVRSRV_OK)
+ {
+ psMapping->uiSize = uiSize;
+ }
+
+ if (puiPages != &uiPages[0])
+ {
+ OSFreeMem(puiPages);
+ }
+
+ return eError;
}
-static PVRSRV_ERROR
-_PoisonAlloc(PHYS_HEAP *psPhysHeap,
- IMG_DEV_PHYADDR *psDevPAddr,
- IMG_UINT32 uiContigAllocSize,
- IMG_BYTE ui8PoisonValue)
+static PVRSRV_ERROR _MapPMRKernel(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData,
+ RA_BASE_ARRAY_T paBaseArray,
+ size_t uiSize,
+ PMR_FLAGS_T ulFlags,
+ PMR_KERNEL_MAPPING *psMapping)
{
PVRSRV_ERROR eError;
- void *pvKernLin = NULL;
-
- eError = _MapAlloc(psPhysHeap,
- psDevPAddr,
- uiContigAllocSize,
- PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC,
- &pvKernLin);
- PVR_GOTO_IF_ERROR(eError, map_failed);
+ PHYS_HEAP *psPhysHeap = psLMAllocArrayData->psPhysHeap;
+ if (!psLMAllocArrayData->bIsSparse)
+ {
+ /* Physically Contig */
+ if (psLMAllocArrayData->bPhysContig)
+ {
+ eError = _MapPhysicalContigAlloc(psPhysHeap,
+ paBaseArray,
+ uiSize,
+ ulFlags,
+ psMapping);
+ }
+ /* Physically Sparse */
+ else
+ {
+ eError = _MapPhysicalSparseAlloc(psLMAllocArrayData,
+ paBaseArray,
+ uiSize,
+ ulFlags,
+ psMapping);
+ }
+ }
+ else
+ {
+ /* Sparse Alloc Single Chunk */
+ if (uiSize == (1 << psLMAllocArrayData->uiLog2ChunkSize))
+ {
+ eError = _MapPhysicalContigAlloc(psPhysHeap,
+ paBaseArray,
+ uiSize,
+ ulFlags,
+ psMapping);
+ }
+ /* Sparse Alloc Multi Chunk */
+ else
+ {
+ eError = _MapPhysicalSparseAlloc(psLMAllocArrayData,
+ paBaseArray,
+ uiSize,
+ ulFlags,
+ psMapping);
+ }
+ }
- OSCachedMemSetWMB(pvKernLin, ui8PoisonValue, uiContigAllocSize);
+ return eError;
+}
- _UnMapAlloc(uiContigAllocSize, pvKernLin);
+static void _UnMapPhysicalContigAlloc(PMR_KERNEL_MAPPING *psKernelMapping)
+{
+ OSUnMapPhysToLin(psKernelMapping->pvKernelAddress, psKernelMapping->uiSize);
+}
- return PVRSRV_OK;
+static void _UnMapPhysicalSparseAlloc(PMR_KERNEL_MAPPING *psKernelMapping)
+{
+ OSUnMapPhysArrayToLin(psKernelMapping->pvKernelAddress,
+ psKernelMapping->vma);
+}
-map_failed:
- PVR_DPF((PVR_DBG_ERROR, "Failed to poison allocation"));
- return eError;
+static void _UnMapPMRKernel(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData,
+ PMR_KERNEL_MAPPING *psKernelMapping)
+{
+ if (!psLMAllocArrayData->bIsSparse)
+ {
+ /* Physically Contig */
+ if (psLMAllocArrayData->bPhysContig)
+ {
+ _UnMapPhysicalContigAlloc(psKernelMapping);
+ }
+ /* Physically Sparse */
+ else
+ {
+ _UnMapPhysicalSparseAlloc(psKernelMapping);
+ }
+ }
+ else
+ {
+ /* Sparse Alloc Single Chunk */
+ if (psKernelMapping->uiSize == (1 << psLMAllocArrayData->uiLog2ChunkSize))
+ {
+ _UnMapPhysicalContigAlloc(psKernelMapping);
+ }
+ /* Sparse Alloc Multi Chunk */
+ else
+ {
+ _UnMapPhysicalSparseAlloc(psKernelMapping);
+ }
+ }
}
static PVRSRV_ERROR
-_ZeroAlloc(PHYS_HEAP *psPhysHeap,
- IMG_DEV_PHYADDR *psDevPAddr,
- IMG_UINT32 uiContigAllocSize)
+_PhysPgMemSet(PMR_LMALLOCARRAY_DATA *psLMAllocArrayData,
+ RA_BASE_ARRAY_T paBaseArray,
+ size_t uiSize,
+ IMG_BYTE ui8SetValue)
{
- void *pvKernLin = NULL;
PVRSRV_ERROR eError;
+ PMR_KERNEL_MAPPING sKernelMapping;
- eError = _MapAlloc(psPhysHeap,
- psDevPAddr,
- uiContigAllocSize,
- PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC,
- &pvKernLin);
+ eError = _MapPMRKernel(psLMAllocArrayData,
+ paBaseArray,
+ uiSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC,
+ &sKernelMapping);
PVR_GOTO_IF_ERROR(eError, map_failed);
- OSCachedMemSetWMB(pvKernLin, 0, uiContigAllocSize);
+ OSCachedMemSetWMB(sKernelMapping.pvKernelAddress, ui8SetValue, uiSize);
- _UnMapAlloc(uiContigAllocSize, pvKernLin);
+ _UnMapPMRKernel(psLMAllocArrayData, &sKernelMapping);
return PVRSRV_OK;
map_failed:
- PVR_DPF((PVR_DBG_ERROR, "Failed to zero allocation"));
+ PVR_DPF((PVR_DBG_ERROR, "Failed to poison/zero allocation"));
return eError;
}
_AllocLMPageArray(PMR_SIZE_T uiSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
- IMG_UINT32 *pabMappingTable,
IMG_UINT32 uiLog2AllocPageSize,
IMG_BOOL bZero,
IMG_BOOL bPoisonOnAlloc,
IMG_BOOL bPoisonOnFree,
- IMG_BOOL bContig,
+ IMG_BOOL bIsSparse,
IMG_BOOL bOnDemand,
PHYS_HEAP* psPhysHeap,
PVRSRV_MEMALLOCFLAGS_T uiAllocFlags,
IMG_PID uiPid,
PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr,
- CONNECTION_DATA *psConnection
- )
+ CONNECTION_DATA *psConnection)
{
PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL;
- IMG_UINT32 ui32Index;
PVRSRV_ERROR eError;
+ IMG_UINT32 uiNumPages;
PVR_ASSERT(!bZero || !bPoisonOnAlloc);
PVR_ASSERT(OSGetPageShift() <= uiLog2AllocPageSize);
- psPageArrayData = OSAllocZMem(sizeof(PMR_LMALLOCARRAY_DATA));
+ /* Use of cast below is justified by the assertion that follows to
+ prove that no significant bits have been truncated */
+ uiNumPages = (IMG_UINT32)(((uiSize - 1) >> uiLog2AllocPageSize) + 1);
+ PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2AllocPageSize) == uiSize);
+
+ psPageArrayData = OSAllocMem(sizeof(PMR_LMALLOCARRAY_DATA) + (sizeof(RA_BASE_T) * uiNumPages));
PVR_GOTO_IF_NOMEM(psPageArrayData, eError, errorOnAllocArray);
- if (bContig)
+ if (bIsSparse)
{
- /*
- Some allocations require kernel mappings in which case in order
- to be virtually contiguous we also have to be physically contiguous.
- */
- psPageArrayData->uiTotalNumPages = 1;
- psPageArrayData->uiPagesToAlloc = psPageArrayData->uiTotalNumPages;
- psPageArrayData->uiContigAllocSize = TRUNCATE_64BITS_TO_32BITS(uiSize);
- psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize;
+ /* Since no pages are allocated yet, initialise page addresses to INVALID_PAGE_ADDR */
+ OSCachedMemSet(psPageArrayData->aBaseArray,
+ 0xFF,
+ sizeof(RA_BASE_T) *
+ uiNumPages);
}
else
{
- IMG_UINT32 uiNumPages;
-
- /* Use of cast below is justified by the assertion that follows to
- prove that no significant bits have been truncated */
- uiNumPages = (IMG_UINT32)(((uiSize - 1) >> uiLog2AllocPageSize) + 1);
- PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2AllocPageSize) == uiSize);
+ /* Base pointers have been allocated for the full PMR in case we require a non
+ * physically contiguous backing for the virtually contiguous allocation but the most
+ * common case will be contiguous and so only require the first Base to be present
+ */
+ psPageArrayData->aBaseArray[0] = INVALID_BASE_ADDR;
+ }
- psPageArrayData->uiTotalNumPages = uiNumPages;
+ psPageArrayData->uiTotalNumChunks = uiNumPages;
+ psPageArrayData->uiChunksToAlloc = bIsSparse ? ui32NumPhysChunks : uiNumPages;
+ psPageArrayData->uiLog2ChunkSize = uiLog2AllocPageSize;
- if ((ui32NumVirtChunks != ui32NumPhysChunks) || (1 < ui32NumVirtChunks))
- {
- psPageArrayData->uiPagesToAlloc = ui32NumPhysChunks;
- }
- else
- {
- psPageArrayData->uiPagesToAlloc = uiNumPages;
- }
- psPageArrayData->uiContigAllocSize = 1 << uiLog2AllocPageSize;
- psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize;
- }
psPageArrayData->psConnection = psConnection;
psPageArrayData->uiPid = uiPid;
- psPageArrayData->pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR) *
- psPageArrayData->uiTotalNumPages);
- PVR_GOTO_IF_NOMEM(psPageArrayData->pasDevPAddr, eError, errorOnAllocAddr);
-
- /* Since no pages are allocated yet, initialise page addresses to INVALID_PAGE_ADDR */
- for (ui32Index = 0; ui32Index < psPageArrayData->uiTotalNumPages; ui32Index++)
- {
- psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
- }
-
- psPageArrayData->iNumPagesAllocated = 0;
+ psPageArrayData->iNumChunksAllocated = 0;
+ psPageArrayData->bIsSparse = bIsSparse;
+ psPageArrayData->bPhysContig = IMG_FALSE;
psPageArrayData->bZeroOnAlloc = bZero;
psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
psPageArrayData->bPoisonOnFree = bPoisonOnFree;
return PVRSRV_OK;
- /*
- error exit paths follow:
- */
-errorOnAllocAddr:
- OSFreeMem(psPageArrayData);
+/*
+ error exit path follows:
+*/
errorOnAllocArray:
PVR_ASSERT(eError != PVRSRV_OK);
return eError;
}
-
static PVRSRV_ERROR
-_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable)
+_AllocLMPagesContig(PMR_LMALLOCARRAY_DATA *psPageArrayData)
{
PVRSRV_ERROR eError;
- RA_BASE_T uiCardAddr;
- RA_LENGTH_T uiActualSize;
- IMG_UINT32 i, ui32Index = 0;
- IMG_UINT32 uiContigAllocSize;
- IMG_UINT32 uiLog2AllocSize;
- PVRSRV_DEVICE_NODE *psDevNode;
- IMG_BOOL bPoisonOnAlloc;
- IMG_BOOL bZeroOnAlloc;
- RA_ARENA *pArena;
-
- PVR_ASSERT(NULL != psPageArrayData);
- PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
-
- psDevNode = PhysHeapDeviceNode(psPageArrayData->psPhysHeap);
- uiContigAllocSize = psPageArrayData->uiContigAllocSize;
- uiLog2AllocSize = psPageArrayData->uiLog2AllocSize;
- bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc;
- bZeroOnAlloc = psPageArrayData->bZeroOnAlloc;
+ IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize;
+ IMG_UINT64 uiPhysSize = (IMG_UINT64) psPageArrayData->uiChunksToAlloc << uiLog2ChunkSize;
+ IMG_BOOL bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc;
+ IMG_BOOL bZeroOnAlloc = psPageArrayData->bZeroOnAlloc;
- /* Get suitable local memory region for this GPU physheap allocation */
- eError = PhysmemGetArenaLMA(psPageArrayData->psPhysHeap, &pArena);
- PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLMA");
- if (psPageArrayData->uiTotalNumPages <
- (psPageArrayData->iNumPagesAllocated + psPageArrayData->uiPagesToAlloc))
+ eError = RA_AllocMulti(psPageArrayData->psArena,
+ uiPhysSize,
+ uiLog2ChunkSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0, /* No flags */
+ "LMA_Page_Alloc",
+ psPageArrayData->aBaseArray,
+ psPageArrayData->uiTotalNumChunks,
+ &psPageArrayData->bPhysContig);
+ if (PVRSRV_OK != eError)
{
- PVR_DPF((PVR_DBG_ERROR, "Pages requested to allocate don't fit PMR alloc Size. "
- "Allocated: %u + Requested: %u > Total Allowed: %u",
- psPageArrayData->iNumPagesAllocated,
- psPageArrayData->uiPagesToAlloc,
- psPageArrayData->uiTotalNumPages));
- return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
- }
+ RA_USAGE_STATS sRAStats;
+ IMG_CHAR *pszArenaName;
+ RA_Get_Usage_Stats(psPageArrayData->psArena, &sRAStats);
+ pszArenaName = RA_GetArenaName(psPageArrayData->psArena);
+ PVR_DPF((PVR_DBG_ERROR,
+ "Contig: Failed to Allocate size = 0x%llx, align = 0x%llx"
+ " Arena Free Space 0x%"IMG_UINT64_FMTSPECX""
+ " Arena Name: '%s'",
+ (unsigned long long)uiPhysSize,
+ 1ULL << uiLog2ChunkSize,
+ sRAStats.ui64FreeArenaSize,
+ pszArenaName));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc);
+ }
#if defined(SUPPORT_GPUVIRT_VALIDATION)
- {
- IMG_UINT32 ui32OSid=0;
-
- /* Obtain the OSid specific data from our connection handle */
- if (psPageArrayData->psConnection != NULL)
- {
- ui32OSid = psPageArrayData->psConnection->ui32OSid;
- }
-
- if (PVRSRV_CHECK_SHARED_BUFFER(psPageArrayData->uiAllocFlags))
- {
- pArena=psDevNode->psOSSharedArena;
- PVR_DPF((PVR_DBG_MESSAGE,
- "(GPU Virtualization Validation): Giving from shared mem"));
- }
- else
- {
- pArena=psDevNode->psOSidSubArena[ui32OSid];
- PVR_DPF((PVR_DBG_MESSAGE,
- "(GPU Virtualization Validation): Giving from OS slot %d",
- ui32OSid));
- }
- }
+{
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "(GPU Virtualization Validation): First RealBase: %"IMG_UINT64_FMTSPECX,
+ psPageArrayData->aBaseArray[0]));
+}
#endif
- psPageArrayData->psArena = pArena;
-
- for (i = 0; i < psPageArrayData->uiPagesToAlloc; i++)
+ if (bPoisonOnAlloc)
{
- /* This part of index finding should happen before allocating the page.
- * Just avoiding intricate paths */
- if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
- {
- ui32Index = i;
- }
- else
- {
- if (NULL == pui32MapTable)
- {
- PVR_LOG_GOTO_WITH_ERROR("pui32MapTable", eError, PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY, errorOnRAAlloc);
- }
+ eError = _PhysPgMemSet(psPageArrayData,
+ psPageArrayData->aBaseArray,
+ uiPhysSize,
+ PVRSRV_POISON_ON_ALLOC_VALUE);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnPoison);
+ }
- ui32Index = pui32MapTable[i];
- if (ui32Index >= psPageArrayData->uiTotalNumPages)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Page alloc request Index out of bounds for PMR @0x%p",
- __func__,
- psPageArrayData));
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, errorOnRAAlloc);
- }
+ if (bZeroOnAlloc)
+ {
+ eError = _PhysPgMemSet(psPageArrayData,
+ psPageArrayData->aBaseArray,
+ uiPhysSize,
+ ZERO_PAGE_VALUE);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnZero);
+ }
- if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
- {
- PVR_LOG_GOTO_WITH_ERROR("Mapping already exists", eError, PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS, errorOnRAAlloc);
- }
- }
+ psPageArrayData->iNumChunksAllocated += psPageArrayData->uiChunksToAlloc;
- eError = RA_Alloc(pArena,
- uiContigAllocSize,
- RA_NO_IMPORT_MULTIPLIER,
- 0, /* No flags */
- 1ULL << uiLog2AllocSize,
- "LMA_Page_Alloc",
- &uiCardAddr,
- &uiActualSize,
- NULL); /* No private handle */
- if (PVRSRV_OK != eError)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "Failed to Allocate the page @index:%d, size = 0x%llx",
- ui32Index, 1ULL << uiLog2AllocSize));
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc);
- }
+ /* We have alloc'd the previous request, set 0 for book keeping */
+ psPageArrayData->uiChunksToAlloc = 0;
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
-{
- PVR_DPF((PVR_DBG_MESSAGE,
- "(GPU Virtualization Validation): Address: 0x%"IMG_UINT64_FMTSPECX,
- uiCardAddr));
-}
-#endif
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
- /* Allocation is done a page at a time */
- PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiActualSize, psPageArrayData->uiPid);
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiPhysSize, psPageArrayData->uiPid);
#else
+ if (psPageArrayData->bPhysContig)
+ {
+ IMG_CPU_PHYADDR sLocalCpuPAddr;
+ sLocalCpuPAddr.uiAddr = (IMG_UINT64) psPageArrayData->aBaseArray[0];
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ NULL,
+ sLocalCpuPAddr,
+ psPageArrayData->uiTotalNumChunks << uiLog2ChunkSize,
+ psPageArrayData->uiPid
+ DEBUG_MEMSTATS_VALUES);
+ }
+ else
+ {
+ IMG_UINT32 i, j;
+ IMG_CPU_PHYADDR sLocalCpuPAddr;
+
+ for (i = 0; i < psPageArrayData->uiTotalNumChunks;)
{
- IMG_CPU_PHYADDR sLocalCpuPAddr;
+ IMG_UINT32 ui32AllocSizeInChunks = 1;
+
+ for (j = i;
+ j + 1 != psPageArrayData->uiTotalNumChunks &&
+ RA_BASE_IS_GHOST(psPageArrayData->aBaseArray[j + 1]);
+ j++)
+ {
+ ui32AllocSizeInChunks++;
+ }
- sLocalCpuPAddr.uiAddr = (IMG_UINT64)uiCardAddr;
+ sLocalCpuPAddr.uiAddr = (IMG_UINT64) psPageArrayData->aBaseArray[i];
PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
NULL,
sLocalCpuPAddr,
- uiActualSize,
- NULL,
+ ui32AllocSizeInChunks << uiLog2ChunkSize,
psPageArrayData->uiPid
DEBUG_MEMSTATS_VALUES);
- }
-#endif
-#endif
-
- psPageArrayData->pasDevPAddr[ui32Index].uiAddr = uiCardAddr;
- if (bPoisonOnAlloc)
- {
- eError = _PoisonAlloc(psPageArrayData->psPhysHeap,
- &psPageArrayData->pasDevPAddr[ui32Index],
- uiContigAllocSize,
- PVRSRV_POISON_ON_ALLOC_VALUE);
- PVR_LOG_GOTO_IF_ERROR(eError, "_PoisonAlloc", errorOnPoison);
- }
- if (bZeroOnAlloc)
- {
- eError = _ZeroAlloc(psPageArrayData->psPhysHeap,
- &psPageArrayData->pasDevPAddr[ui32Index],
- uiContigAllocSize);
- PVR_LOG_GOTO_IF_ERROR(eError, "_ZeroAlloc", errorOnZero);
+ i += ui32AllocSizeInChunks;
}
}
- psPageArrayData->iNumPagesAllocated += psPageArrayData->uiPagesToAlloc;
+#endif
+#endif
return PVRSRV_OK;
errorOnZero:
errorOnPoison:
eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
-errorOnRAAlloc:
- PVR_DPF((PVR_DBG_ERROR,
- "%s: alloc_pages failed to honour request %d @index: %d of %d pages: (%s)",
- __func__,
- ui32Index,
- i,
- psPageArrayData->uiPagesToAlloc,
- PVRSRVGetErrorString(eError)));
- while (--i < psPageArrayData->uiPagesToAlloc)
- {
- if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
- {
- ui32Index = i;
- }
- else
- {
- if (NULL == pui32MapTable)
- {
- break;
- }
- ui32Index = pui32MapTable[i];
- }
+ RA_FreeMulti(psPageArrayData->psArena,
+ psPageArrayData->aBaseArray,
+ psPageArrayData->uiTotalNumChunks);
- if (ui32Index < psPageArrayData->uiTotalNumPages)
- {
-#if defined(PVRSRV_ENABLE_PROCESS_STATS)
-#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
- /* Allocation is done a page at a time */
- PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
- uiContigAllocSize,
- psPageArrayData->uiPid);
-#else
- {
- PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
- psPageArrayData->pasDevPAddr[ui32Index].uiAddr,
- psPageArrayData->uiPid);
- }
-#endif
+errorOnRAAlloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*
+ * Fully allocated variant of sparse allocation does not take in as argument an
+ * array of indices. It is used in cases where the amount of chunks to allocate is
+ * the same as the total the PMR can represent. I.E when we want to fully populate
+ * a sparse PMR.
+ */
+static PVRSRV_ERROR
+_AllocLMPagesSparseFull(PMR_LMALLOCARRAY_DATA *psPageArrayData)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize;
+ IMG_UINT64 uiPhysSize = (IMG_UINT64) psPageArrayData->uiChunksToAlloc << uiLog2ChunkSize;
+ IMG_BOOL bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc;
+ IMG_BOOL bZeroOnAlloc = psPageArrayData->bZeroOnAlloc;
+
+
+ eError = RA_AllocMultiSparse(psPageArrayData->psArena,
+ uiLog2ChunkSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0, /* No flags */
+ "LMA_Page_Alloc",
+ psPageArrayData->aBaseArray,
+ psPageArrayData->uiTotalNumChunks,
+ NULL, /* No indices given meaning allocate full base array using chunk count below */
+ psPageArrayData->uiChunksToAlloc);
+ if (PVRSRV_OK != eError)
+ {
+ RA_USAGE_STATS sRAStats;
+ IMG_CHAR *pszArenaName;
+ RA_Get_Usage_Stats(psPageArrayData->psArena, &sRAStats);
+ pszArenaName = RA_GetArenaName(psPageArrayData->psArena);
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "SparseFull: Failed to Allocate size = 0x%llx, align = 0x%llx"
+ " Arena Free Space 0x%"IMG_UINT64_FMTSPECX""
+ " Arena Name: '%s'",
+ (unsigned long long)uiPhysSize,
+ 1ULL << uiLog2ChunkSize,
+ sRAStats.ui64FreeArenaSize,
+ pszArenaName));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc);
+ }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "(GPU Virtualization Validation): First RealBase: %"IMG_UINT64_FMTSPECX,
+ psPageArrayData->aBaseArray[0]));
+}
#endif
- RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
- psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+
+ if (bPoisonOnAlloc)
+ {
+ eError = _PhysPgMemSet(psPageArrayData,
+ psPageArrayData->aBaseArray,
+ uiPhysSize,
+ PVRSRV_POISON_ON_ALLOC_VALUE);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnPoison);
+ }
+
+ if (bZeroOnAlloc)
+ {
+ eError = _PhysPgMemSet(psPageArrayData,
+ psPageArrayData->aBaseArray,
+ uiPhysSize,
+ ZERO_PAGE_VALUE);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnZero);
+ }
+
+ psPageArrayData->iNumChunksAllocated += psPageArrayData->uiChunksToAlloc;
+
+ /* We have alloc'd the previous request, set 0 for book keeping */
+ psPageArrayData->uiChunksToAlloc = 0;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiPhysSize, psPageArrayData->uiPid);
+#else
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psPageArrayData->uiTotalNumChunks; i++)
+ {
+ IMG_CPU_PHYADDR sLocalCpuPAddr;
+ sLocalCpuPAddr.uiAddr =
+ (IMG_UINT64) RA_BASE_STRIP_GHOST_BIT(psPageArrayData->aBaseArray[i]);
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ NULL,
+ sLocalCpuPAddr,
+ 1 << uiLog2ChunkSize,
+ psPageArrayData->uiPid
+ DEBUG_MEMSTATS_VALUES);
}
}
+#endif
+#endif
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow:
+ */
+errorOnZero:
+errorOnPoison:
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+
+ RA_FreeMulti(psPageArrayData->psArena,
+ psPageArrayData->aBaseArray,
+ psPageArrayData->uiTotalNumChunks);
+
+errorOnRAAlloc:
PVR_ASSERT(eError != PVRSRV_OK);
return eError;
}
static PVRSRV_ERROR
-_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData)
+_AllocLMPagesSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable)
{
- OSFreeMem(psPageArrayData->pasDevPAddr);
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize;
+ IMG_UINT32 uiChunkSize = 1ULL << uiLog2ChunkSize;
+ IMG_UINT32 uiChunksToAlloc = psPageArrayData->uiChunksToAlloc;
+ IMG_BOOL bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc;
+ IMG_BOOL bZeroOnAlloc = psPageArrayData->bZeroOnAlloc;
+
+ if (!pui32MapTable)
+ {
+ PVR_LOG_GOTO_WITH_ERROR("pui32MapTable", eError, PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY, errorOnRAAlloc);
+ }
+#if defined(DEBUG)
+ /*
+ * This block performs validation of the mapping table input in the following ways:
+ * Check that each index in the mapping table does not exceed the number of the chunks
+ * the whole PMR supports.
+ * Check that each index given by the mapping table is not already allocated.
+ * Check that there are no duplicated indices given in the mapping table.
+ */
+ {
+ IMG_UINT32 i;
+ IMG_BOOL bIssueDetected = IMG_FALSE;
+ PVRSRV_ERROR eMapCheckError;
+
+ for (i = 0; i < uiChunksToAlloc; i++)
+ {
+ if (pui32MapTable[i] >= psPageArrayData->uiTotalNumChunks)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Page alloc request Index out of bounds for PMR @0x%p",
+ __func__,
+ psPageArrayData));
+ eMapCheckError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ bIssueDetected = IMG_TRUE;
+ break;
+ }
+
+ if (!RA_BASE_IS_INVALID(psPageArrayData->aBaseArray[pui32MapTable[i]]))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Mapping already exists Index %u Mapping index %u",
+ __func__,
+ i,
+ pui32MapTable[i]));
+ eMapCheckError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
+ bIssueDetected = IMG_TRUE;
+ break;
+ }
+
+ if (RA_BASE_IS_SPARSE_PREP(psPageArrayData->aBaseArray[pui32MapTable[i]]))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Mapping already exists in mapping table given Index %u Mapping index %u",
+ __func__,
+ i,
+ pui32MapTable[i]));
+ eMapCheckError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
+ bIssueDetected = IMG_TRUE;
+ break;
+ }
+ else
+ {
+ /* Set the To Prep value so we can detect duplicated map indices */
+ psPageArrayData->aBaseArray[pui32MapTable[i]] = RA_BASE_SPARSE_PREP_ALLOC_ADDR;
+ }
+ }
+ /* Unwind the Alloc Prep Values */
+ if (bIssueDetected)
+ {
+ /* We don't want to affect the index of the issue seen
+ * as it could be a valid mapping. If it is a duplicated
+ * mapping in the given table then we will clean-up the
+ * previous instance anyway.
+ */
+ IMG_UINT32 uiUnwind = i;
+
+ for (i = 0; i < uiUnwind; i++)
+ {
+ psPageArrayData->aBaseArray[pui32MapTable[i]] = INVALID_BASE_ADDR;
+ }
+
+ PVR_GOTO_WITH_ERROR(eError, eMapCheckError, errorOnRAAlloc);
+ }
+ }
+#endif
+
+ eError = RA_AllocMultiSparse(psPageArrayData->psArena,
+ psPageArrayData->uiLog2ChunkSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0,
+ "LMA_Page_Alloc",
+ psPageArrayData->aBaseArray,
+ psPageArrayData->uiTotalNumChunks,
+ pui32MapTable,
+ uiChunksToAlloc);
+ if (PVRSRV_OK != eError)
+ {
+ RA_USAGE_STATS sRAStats;
+ IMG_CHAR *pszArenaName;
+ RA_Get_Usage_Stats(psPageArrayData->psArena, &sRAStats);
+ pszArenaName = RA_GetArenaName(psPageArrayData->psArena);
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "Sparse: Failed to Allocate size = 0x%llx, align = 0x%llx"
+ " Arena Free Space 0x%"IMG_UINT64_FMTSPECX""
+ " Arena Name: '%s'",
+ (unsigned long long) uiChunksToAlloc << uiLog2ChunkSize,
+ 1ULL << uiLog2ChunkSize,
+ sRAStats.ui64FreeArenaSize,
+ pszArenaName));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc);
+ }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
PVR_DPF((PVR_DBG_MESSAGE,
- "physmem_lma.c: freed local memory array structure for PMR @0x%p",
- psPageArrayData));
+ "(GPU Virtualization Validation): First RealBase: %"IMG_UINT64_FMTSPECX,
+ psPageArrayData->aBaseArray[pui32MapTable[0]]));
+}
+#endif
- OSFreeMem(psPageArrayData);
+ if (bPoisonOnAlloc || bZeroOnAlloc)
+ {
+ IMG_UINT32 i, ui32Index = 0;
+ for (i = 0; i < uiChunksToAlloc; i++)
+ {
+ ui32Index = pui32MapTable[i];
+
+ eError = _PhysPgMemSet(psPageArrayData,
+ &psPageArrayData->aBaseArray[ui32Index],
+ uiChunkSize,
+ bPoisonOnAlloc ? PVRSRV_POISON_ON_ALLOC_VALUE :
+ ZERO_PAGE_VALUE);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_PhysPgMemSet", errorOnPoisonZero);
+ }
+ }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ uiChunksToAlloc << uiLog2ChunkSize,
+ psPageArrayData->uiPid);
+#else
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psPageArrayData->uiChunksToAlloc; i++)
+ {
+ IMG_UINT32 ui32Index = pui32MapTable[i];
+ IMG_CPU_PHYADDR sLocalCpuPAddr;
+ sLocalCpuPAddr.uiAddr =
+ (IMG_UINT64) RA_BASE_STRIP_GHOST_BIT(psPageArrayData->aBaseArray[ui32Index]);
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ NULL,
+ sLocalCpuPAddr,
+ uiChunkSize,
+ psPageArrayData->uiPid
+ DEBUG_MEMSTATS_VALUES);
+ }
+ }
+#endif
+#endif
+
+ psPageArrayData->iNumChunksAllocated += uiChunksToAlloc;
+
+ /* We have alloc'd the previous request, set 0 for book keeping */
+ psPageArrayData->uiChunksToAlloc = 0;
return PVRSRV_OK;
+
+ /*
+ error exit paths follow:
+ */
+errorOnPoisonZero:
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+
+ RA_FreeMultiSparse(psPageArrayData->psArena,
+ psPageArrayData->aBaseArray,
+ psPageArrayData->uiTotalNumChunks,
+ psPageArrayData->uiLog2ChunkSize,
+ pui32MapTable,
+ &uiChunksToAlloc);
+
+errorOnRAAlloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+
}
static PVRSRV_ERROR
-_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData,
- IMG_UINT32 *pui32FreeIndices,
- IMG_UINT32 ui32FreePageCount)
+_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable)
{
- IMG_UINT32 uiContigAllocSize;
- IMG_UINT32 i, ui32PagesToFree=0, ui32PagesFreed=0, ui32Index=0;
- RA_ARENA *pArena = psPageArrayData->psArena;
+ PVRSRV_ERROR eError;
+ RA_ARENA *pArena;
- PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
+ PVR_ASSERT(NULL != psPageArrayData);
+ PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated);
- uiContigAllocSize = psPageArrayData->uiContigAllocSize;
+ if (psPageArrayData->uiTotalNumChunks <
+ (psPageArrayData->iNumChunksAllocated + psPageArrayData->uiChunksToAlloc))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Pages requested to allocate don't fit PMR alloc Size. "
+ "Allocated: %u + Requested: %u > Total Allowed: %u",
+ psPageArrayData->iNumChunksAllocated,
+ psPageArrayData->uiChunksToAlloc,
+ psPageArrayData->uiTotalNumChunks));
+ return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+ }
- ui32PagesToFree = (NULL == pui32FreeIndices) ?
- psPageArrayData->uiTotalNumPages : ui32FreePageCount;
+ /* If we have a non-backed sparse PMR then we can just return */
+ if (psPageArrayData->uiChunksToAlloc == 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Non-Backed Sparse PMR Created: %p.",
+ __func__,
+ psPageArrayData));
+ return PVRSRV_OK;
+ }
- for (i = 0; i < ui32PagesToFree; i++)
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
{
- if (NULL == pui32FreeIndices)
+ IMG_UINT32 ui32OSid=0;
+ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPageArrayData->psPhysHeap);
+
+ /* Obtain the OSid specific data from our connection handle */
+ if (psPageArrayData->psConnection != NULL)
+ {
+ ui32OSid = psPageArrayData->psConnection->ui32OSid;
+ }
+
+ if (PVRSRV_CHECK_SHARED_BUFFER(psPageArrayData->uiAllocFlags))
{
- ui32Index = i;
+ pArena=psDevNode->psOSSharedArena;
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "(GPU Virtualization Validation): Giving from shared mem"));
}
else
{
- ui32Index = pui32FreeIndices[i];
+ pArena=psDevNode->psOSidSubArena[ui32OSid];
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "(GPU Virtualization Validation): Giving from OS slot %d",
+ ui32OSid));
}
+ }
+#else
+ /* Get suitable local memory region for this GPU physheap allocation */
+ eError = PhysmemGetArenaLMA(psPageArrayData->psPhysHeap, &pArena);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLMA");
+#endif
+
+ psPageArrayData->psArena = pArena;
- if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
+ /*
+ * 3 cases:
+ * Sparse allocation populating the whole PMR.
+ * [**********]
+ * Sparse allocation partially populating the PMR at given indices.
+ * [*** *** **]
+ * Contiguous allocation.
+ * [**********]
+ *
+ * Note: Separate cases are required for 1 and 3 due to memstats tracking.
+ * In Contiguous case we can track the block as a single memstat record as we know
+ * we will also free in that size record.
+ * Sparse allocations require a memstat record per chunk as they can be arbitrarily
+ * free'd.
+ */
+ if (psPageArrayData->bIsSparse)
+ {
+ if (psPageArrayData->uiTotalNumChunks == psPageArrayData->uiChunksToAlloc &&
+ !pui32MapTable)
{
- ui32PagesFreed++;
- if (psPageArrayData->bPoisonOnFree)
- {
- _PoisonAlloc(psPageArrayData->psPhysHeap,
- &psPageArrayData->pasDevPAddr[ui32Index],
- uiContigAllocSize,
- PVRSRV_POISON_ON_FREE_VALUE);
- }
+ eError = _AllocLMPagesSparseFull(psPageArrayData);
+ }
+ else
+ {
+ eError = _AllocLMPagesSparse(psPageArrayData, pui32MapTable);
+ }
+ }
+ else
+ {
+ eError = _AllocLMPagesContig(psPageArrayData);
+ }
+
+ return eError;
+}
+
+static void
+_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData)
+{
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "physmem_lma.c: freed local memory array structure for PMR @0x%p",
+ psPageArrayData));
+
+ OSFreeMem(psPageArrayData);
+}
+
+static PVRSRV_ERROR
+_FreeLMPagesContig(PMR_LMALLOCARRAY_DATA *psPageArrayData)
+{
+ RA_ARENA *pArena = psPageArrayData->psArena;
+ IMG_UINT64 uiPhysSize =
+ (IMG_UINT64) psPageArrayData->uiTotalNumChunks << psPageArrayData->uiLog2ChunkSize;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psPageArrayData->iNumChunksAllocated != 0);
+ PVR_ASSERT(psPageArrayData->iNumChunksAllocated ==
+ psPageArrayData->uiTotalNumChunks);
- RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+ if (psPageArrayData->bPoisonOnFree)
+ {
+ eError = _PhysPgMemSet(psPageArrayData,
+ psPageArrayData->aBaseArray,
+ uiPhysSize,
+ PVRSRV_POISON_ON_FREE_VALUE);
+ PVR_LOG_IF_ERROR(eError, "_PhysPgMemSet");
+ }
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
- /* Allocation is done a page at a time */
- PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
- uiContigAllocSize,
- psPageArrayData->uiPid);
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ uiPhysSize,
+ psPageArrayData->uiPid);
#else
+ if (psPageArrayData->bPhysContig)
+ {
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ (IMG_UINT64) psPageArrayData->aBaseArray[0],
+ psPageArrayData->uiPid);
+ }
+ else
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < psPageArrayData->uiTotalNumChunks; i++)
+ {
+ if (RA_BASE_IS_REAL(psPageArrayData->aBaseArray[i]))
{
PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
- psPageArrayData->pasDevPAddr[ui32Index].uiAddr,
- psPageArrayData->uiPid);
+ (IMG_UINT64) psPageArrayData->aBaseArray[i],
+ psPageArrayData->uiPid);
}
+ }
+
+ }
+#endif
+#endif
+
+ if (psPageArrayData->bPhysContig)
+ {
+ eError = RA_FreeMulti(pArena,
+ psPageArrayData->aBaseArray,
+ 1);
+ PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMulti");
+ }
+ else
+ {
+ eError = RA_FreeMulti(pArena,
+ psPageArrayData->aBaseArray,
+ psPageArrayData->iNumChunksAllocated);
+ PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMulti");
+ }
+
+ psPageArrayData->iNumChunksAllocated = 0;
+
+ PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated);
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: freed %"IMG_UINT64_FMTSPEC" local memory for PMR @0x%p",
+ __func__,
+ uiPhysSize,
+ psPageArrayData));
+
+ return eError;
+}
+
+static PVRSRV_ERROR
+_FreeLMPagesRemainingSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiChunkSize = 1ULL << psPageArrayData->uiLog2ChunkSize;
+ IMG_BOOL bPoisonOnFree = psPageArrayData->bPoisonOnFree;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ psPageArrayData->iNumChunksAllocated << psPageArrayData->uiLog2ChunkSize,
+ psPageArrayData->uiPid);
#endif
+
+ for (i = 0; i < psPageArrayData->uiTotalNumChunks;)
+ {
+ if (RA_BASE_IS_REAL(psPageArrayData->aBaseArray[i]))
+ {
+ IMG_UINT32 j;
+ IMG_UINT32 ui32AccumulatedChunks = 1;
+
+ for (j = i;
+ j + 1 != psPageArrayData->uiTotalNumChunks &&
+ RA_BASE_IS_GHOST(psPageArrayData->aBaseArray[j + 1]);
+ j++)
+ {
+ ui32AccumulatedChunks++;
+ }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS)
+ for (j = i; j < (i + ui32AccumulatedChunks); j++)
+ {
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ RA_BASE_STRIP_GHOST_BIT(psPageArrayData->aBaseArray[j]),
+ psPageArrayData->uiPid);
+ if (bPoisonOnFree)
+#else
+ for (j = i; j < (i + ui32AccumulatedChunks) && bPoisonOnFree; j++)
+ {
#endif
- psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+ {
+ eError = _PhysPgMemSet(psPageArrayData,
+ &psPageArrayData->aBaseArray[j],
+ uiChunkSize,
+ PVRSRV_POISON_ON_FREE_VALUE);
+ PVR_LOG_IF_ERROR(eError, "_PhysPgMemSet");
+ }
+ }
+
+ eError = RA_FreeMulti(psPageArrayData->psArena,
+ &psPageArrayData->aBaseArray[i],
+ ui32AccumulatedChunks);
+ PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMulti");
+
+ psPageArrayData->iNumChunksAllocated -= ui32AccumulatedChunks;
+ i += ui32AccumulatedChunks;
+ }
+ else if (RA_BASE_IS_INVALID(psPageArrayData->aBaseArray[i]))
+ {
+ i++;
}
}
- psPageArrayData->iNumPagesAllocated -= ui32PagesFreed;
- PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+ /* We have freed all allocations in the previous loop */
+ PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_FreeLMPagesSparse(PMR_LMALLOCARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *pui32FreeIndices,
+ IMG_UINT32 ui32FreeChunkCount)
+{
+ RA_ARENA *pArena = psPageArrayData->psArena;
+ IMG_UINT32 uiLog2ChunkSize = psPageArrayData->uiLog2ChunkSize;
+ IMG_UINT32 uiChunkSize = 1ULL << uiLog2ChunkSize;
+ IMG_BOOL bPoisonOnFree = psPageArrayData->bPoisonOnFree;
+ IMG_UINT32 uiActualFreeCount = ui32FreeChunkCount;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psPageArrayData->iNumChunksAllocated != 0);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS)
+ {
+ IMG_UINT32 i;
+ for (i = 0; i < ui32FreeChunkCount; i++)
+ {
+ IMG_UINT32 ui32Index = pui32FreeIndices[i];
+
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ (IMG_UINT64) RA_BASE_STRIP_GHOST_BIT(
+ psPageArrayData->aBaseArray[ui32Index]),
+ psPageArrayData->uiPid);
+ }
+ }
+#endif
+
+ if (bPoisonOnFree)
+ {
+ IMG_UINT32 i, ui32Index = 0;
+ for (i = 0; i < ui32FreeChunkCount; i++)
+ {
+ ui32Index = pui32FreeIndices[i];
+
+ eError = _PhysPgMemSet(psPageArrayData,
+ &psPageArrayData->aBaseArray[ui32Index],
+ uiChunkSize,
+ PVRSRV_POISON_ON_FREE_VALUE);
+ PVR_LOG_IF_ERROR(eError, "_PhysPgMemSet");
+ }
+ }
+
+ eError = RA_FreeMultiSparse(pArena,
+ psPageArrayData->aBaseArray,
+ psPageArrayData->uiTotalNumChunks,
+ uiLog2ChunkSize,
+ pui32FreeIndices,
+ &uiActualFreeCount);
+ psPageArrayData->iNumChunksAllocated -= uiActualFreeCount;
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ uiActualFreeCount << psPageArrayData->uiLog2ChunkSize,
+ psPageArrayData->uiPid);
+#endif
+ if (eError == PVRSRV_ERROR_RA_FREE_INVALID_CHUNK)
+ {
+ /* Log the RA error but convert it to PMR level to match the interface,
+ * this is important because other PMR factories may not use the RA but
+ * still return error, returning a PMR based error
+ * keeps the interface agnostic to implementation behaviour.
+ */
+ PVR_LOG_IF_ERROR(eError, "RA_FreeMultiSparse");
+ return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK;
+ }
+ PVR_LOG_RETURN_IF_ERROR(eError, "RA_FreeMultiSparse");
+
+ PVR_ASSERT(0 <= psPageArrayData->iNumChunksAllocated);
+
PVR_DPF((PVR_DBG_MESSAGE,
"%s: freed %d local memory for PMR @0x%p",
__func__,
- (ui32PagesFreed * uiContigAllocSize),
+ (uiActualFreeCount * uiChunkSize),
psPageArrayData));
return PVRSRV_OK;
}
+static PVRSRV_ERROR
+_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *pui32FreeIndices,
+ IMG_UINT32 ui32FreeChunkCount)
+{
+ PVRSRV_ERROR eError;
+
+ if (psPageArrayData->bIsSparse)
+ {
+ if (!pui32FreeIndices)
+ {
+ eError = _FreeLMPagesRemainingSparse(psPageArrayData);
+ }
+ else
+ {
+ eError = _FreeLMPagesSparse(psPageArrayData, pui32FreeIndices, ui32FreeChunkCount);
+ }
+ }
+ else
+ {
+ eError = _FreeLMPagesContig(psPageArrayData);
+ }
+
+ return eError;
+}
+
/*
*
* Implementation of callback functions
/* destructor func is called after last reference disappears, but
before PMR itself is freed. */
-static PVRSRV_ERROR
+static void
PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv)
{
PVRSRV_ERROR eError;
psLMAllocArrayData = pvPriv;
/* We can't free pages until now. */
- if (psLMAllocArrayData->iNumPagesAllocated != 0)
+ if (psLMAllocArrayData->iNumChunksAllocated != 0)
{
#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__)
PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
mutex_unlock(&g_sLMALeakMutex);
PVR_DPF((PVR_DBG_WARNING, "%s: Skipped freeing of PMR 0x%p to trigger memory leak.", __func__, pvPriv));
- return PVRSRV_OK;
+ return;
}
mutex_unlock(&g_sLMALeakMutex);
#endif
eError = _FreeLMPages(psLMAllocArrayData, NULL, 0);
- PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+ PVR_LOG_IF_ERROR(eError, "_FreeLMPages");
+ PVR_ASSERT (eError == PVRSRV_OK);
}
- eError = _FreeLMPageArray(psLMAllocArrayData);
- PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
-
- return PVRSRV_OK;
+ _FreeLMPageArray(psLMAllocArrayData);
}
/* callback function for locking the system physical page addresses.
IMG_BOOL *pbValid,
IMG_DEV_PHYADDR *psDevPAddr)
{
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv;
IMG_UINT32 idx;
IMG_UINT32 uiLog2AllocSize;
- IMG_UINT32 uiNumAllocs;
IMG_UINT64 uiAllocIndex;
IMG_DEVMEM_OFFSET_T uiInAllocOffset;
- PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv;
+ IMG_UINT32 uiNumAllocs = psLMAllocArrayData->uiTotalNumChunks;
- if (psLMAllocArrayData->uiLog2AllocSize < ui32Log2PageSize)
+ if (psLMAllocArrayData->uiLog2ChunkSize < ui32Log2PageSize)
{
PVR_DPF((PVR_DBG_ERROR,
"%s: Requested physical addresses from PMR "
return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
}
- uiNumAllocs = psLMAllocArrayData->uiTotalNumPages;
- if (uiNumAllocs > 1)
- {
- PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
- uiLog2AllocSize = psLMAllocArrayData->uiLog2AllocSize;
+ PVR_ASSERT(psLMAllocArrayData->uiLog2ChunkSize != 0);
+ PVR_ASSERT(ui32Log2PageSize >= RA_BASE_FLAGS_LOG2);
+ if (psLMAllocArrayData->bPhysContig)
+ {
for (idx=0; idx < ui32NumOfPages; idx++)
{
if (pbValid[idx])
{
- uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize;
- uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize);
-
- PVR_LOG_RETURN_IF_FALSE(uiAllocIndex < uiNumAllocs,
- "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE);
-
- PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize));
-
- psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[uiAllocIndex].uiAddr + uiInAllocOffset;
+ psDevPAddr[idx].uiAddr = psLMAllocArrayData->aBaseArray[0] + puiOffset[idx];
}
}
}
else
{
+ uiLog2AllocSize = psLMAllocArrayData->uiLog2ChunkSize;
+
for (idx=0; idx < ui32NumOfPages; idx++)
{
if (pbValid[idx])
{
- psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[0].uiAddr + puiOffset[idx];
+ uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize;
+ uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize);
+
+ PVR_LOG_RETURN_IF_FALSE(uiAllocIndex < uiNumAllocs,
+ "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE);
+
+ PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize));
+
+ /* The base may or may not be a ghost base, but we don't care,
+ * we just need the real representation of the base.
+ */
+ psDevPAddr[idx].uiAddr = RA_BASE_STRIP_GHOST_BIT(
+ psLMAllocArrayData->aBaseArray[uiAllocIndex]) + uiInAllocOffset;
}
}
}
PMR_FLAGS_T ulFlags)
{
PVRSRV_ERROR eError;
- PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
- void *pvKernLinAddr = NULL;
- IMG_UINT32 ui32PageIndex = 0;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv;
+ PMR_KERNEL_MAPPING *psKernelMapping;
+ RA_BASE_T *paBaseArray;
+ IMG_UINT32 ui32ChunkIndex = 0;
size_t uiOffsetMask = uiOffset;
- psLMAllocArrayData = pvPriv;
+ IMG_UINT32 uiLog2ChunkSize = psLMAllocArrayData->uiLog2ChunkSize;
+ IMG_UINT64 uiChunkSize = 1ULL << uiLog2ChunkSize;
+ IMG_UINT64 uiPhysSize;
- /* Check that we can map this in contiguously */
- if (psLMAllocArrayData->uiTotalNumPages != 1)
+ PVR_ASSERT(psLMAllocArrayData);
+ PVR_ASSERT(ppvKernelAddressOut);
+ PVR_ASSERT(phHandleOut);
+
+ if (psLMAllocArrayData->bIsSparse)
{
- size_t uiStart = uiOffset;
- size_t uiEnd = uiOffset + uiSize - 1;
- size_t uiPageMask = ~((1 << psLMAllocArrayData->uiLog2AllocSize) - 1);
+ IMG_UINT32 i;
+ /* Locate the desired physical chunk to map in */
+ ui32ChunkIndex = uiOffset >> psLMAllocArrayData->uiLog2ChunkSize;
- /* We can still map if only one page is required */
- if ((uiStart & uiPageMask) != (uiEnd & uiPageMask))
+ if (OSIsMapPhysNonContigSupported())
{
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, e0);
+ /* If a size hasn't been supplied assume we are mapping a single page */
+ IMG_UINT32 uiNumChunksToMap;
+
+ /* This is to support OSMapPMR originated parameters */
+ if (uiOffset == 0 && uiSize == 0)
+ {
+ uiNumChunksToMap = psLMAllocArrayData->iNumChunksAllocated;
+ }
+ else
+ {
+ uiNumChunksToMap = uiSize >> psLMAllocArrayData->uiLog2ChunkSize;
+ }
+
+ /* Check we are attempting to map at least a chunk in size */
+ if (uiNumChunksToMap < 1)
+ {
+ PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_INVALID_PARAMS, "uiNumChunksToMap < 1");
+ }
+
+ /* Check contiguous region doesn't exceed size of PMR */
+ if (ui32ChunkIndex + (uiNumChunksToMap - 1) > psLMAllocArrayData->uiTotalNumChunks)
+ {
+ PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_INVALID_PARAMS,
+ "Mapping range exceeds total num chunks in PMR");
+ }
+
+ /* Check the virtually contiguous region given is physically backed */
+ for (i = ui32ChunkIndex; i < ui32ChunkIndex + uiNumChunksToMap; i++)
+ {
+ if (RA_BASE_IS_INVALID(psLMAllocArrayData->aBaseArray[i]))
+ {
+ PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, "Sparse contiguity check");
+ }
+ }
+ /* Size of virtually contiguous sparse alloc */
+ uiPhysSize = (IMG_UINT64) uiNumChunksToMap << psLMAllocArrayData->uiLog2ChunkSize;
}
+ else
+ {
+ size_t uiStart = uiOffset;
+ size_t uiEnd = uiOffset + uiSize - 1;
+ size_t uiChunkMask = ~((1 << psLMAllocArrayData->uiLog2ChunkSize) - 1);
- /* Locate the desired physical page to map in */
- ui32PageIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
- uiOffsetMask = (1U << psLMAllocArrayData->uiLog2AllocSize) - 1;
- }
+ /* We can still map if only one chunk is required */
+ if ((uiStart & uiChunkMask) != (uiEnd & uiChunkMask))
+ {
+ PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, "Sparse contiguity check");
+ }
+ /* Map a single chunk */
+ uiPhysSize = uiChunkSize;
+ }
- PVR_ASSERT(ui32PageIndex < psLMAllocArrayData->uiTotalNumPages);
+ paBaseArray = &psLMAllocArrayData->aBaseArray[ui32ChunkIndex];
- eError = _MapAlloc(psLMAllocArrayData->psPhysHeap,
- &psLMAllocArrayData->pasDevPAddr[ui32PageIndex],
- psLMAllocArrayData->uiContigAllocSize,
- ulFlags,
- &pvKernLinAddr);
+ /* Offset mask to be used for address offsets within a chunk */
+ uiOffsetMask = (1U << psLMAllocArrayData->uiLog2ChunkSize) - 1;
+ }
+ else
+ {
+ paBaseArray = psLMAllocArrayData->aBaseArray;
+ uiPhysSize = (IMG_UINT64) psLMAllocArrayData->uiTotalNumChunks << uiLog2ChunkSize;
+ }
- *ppvKernelAddressOut = ((IMG_CHAR *) pvKernLinAddr) + (uiOffset & uiOffsetMask);
- *phHandleOut = pvKernLinAddr;
+ PVR_ASSERT(ui32ChunkIndex < psLMAllocArrayData->uiTotalNumChunks);
- return eError;
+ psKernelMapping = OSAllocMem(sizeof(*psKernelMapping));
+ PVR_RETURN_IF_NOMEM(psKernelMapping);
+
+ eError = _MapPMRKernel(psLMAllocArrayData,
+ paBaseArray,
+ uiPhysSize,
+ ulFlags,
+ psKernelMapping);
+ if (eError == PVRSRV_OK)
+ {
+ /* uiOffset & uiOffsetMask is used to get the kernel addr within the page */
+ *ppvKernelAddressOut = ((IMG_CHAR *) psKernelMapping->pvKernelAddress) + (uiOffset & uiOffsetMask);
+ *phHandleOut = psKernelMapping;
+ }
+ else
+ {
+ OSFreeMem(psKernelMapping);
+ PVR_LOG_ERROR(eError, "_MapPMRKernel");
+ }
- /*
- error exit paths follow:
- */
-e0:
- PVR_ASSERT(eError != PVRSRV_OK);
return eError;
}
static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
- IMG_HANDLE hHandle)
+ IMG_HANDLE hHandle)
{
- PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
- void *pvKernLinAddr = NULL;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv;
+ PMR_KERNEL_MAPPING *psKernelMapping = (PMR_KERNEL_MAPPING *) hHandle;
- psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv;
- pvKernLinAddr = (void *) hHandle;
+ PVR_ASSERT(psLMAllocArrayData);
+ PVR_ASSERT(psKernelMapping);
- _UnMapAlloc(psLMAllocArrayData->uiContigAllocSize,
- pvKernLinAddr);
-}
+ _UnMapPMRKernel(psLMAllocArrayData,
+ psKernelMapping);
+ OSFreeMem(psKernelMapping);
+}
static PVRSRV_ERROR
CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
IMG_UINT8 *pcPMR,
size_t uiSize))
{
- PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv;
size_t uiBytesCopied;
size_t uiBytesToCopy;
size_t uiBytesCopyableFromAlloc;
- void *pvMapping = NULL;
+ PMR_KERNEL_MAPPING sMapping;
IMG_UINT8 *pcKernelPointer = NULL;
size_t uiBufferOffset;
IMG_UINT64 uiAllocIndex;
IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+ IMG_UINT32 uiLog2ChunkSize = psLMAllocArrayData->uiLog2ChunkSize;
+ IMG_UINT64 uiChunkSize = 1ULL << uiLog2ChunkSize;
+ IMG_UINT64 uiPhysSize;
PVRSRV_ERROR eError;
- psLMAllocArrayData = pvPriv;
-
uiBytesCopied = 0;
uiBytesToCopy = uiBufSz;
uiBufferOffset = 0;
- if (psLMAllocArrayData->uiTotalNumPages > 1)
+ if (psLMAllocArrayData->bIsSparse)
{
while (uiBytesToCopy > 0)
{
/* we have to map one alloc in at a time */
- PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
- uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
- uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2AllocSize);
+ PVR_ASSERT(psLMAllocArrayData->uiLog2ChunkSize != 0);
+ uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2ChunkSize;
+ uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2ChunkSize);
uiBytesCopyableFromAlloc = uiBytesToCopy;
- if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2AllocSize))
+ if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2ChunkSize))
{
- uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2AllocSize)-uiInAllocOffset);
+ uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2ChunkSize)-uiInAllocOffset);
}
+ /* Mapping a single chunk at a time */
+ uiPhysSize = uiChunkSize;
PVR_ASSERT(uiBytesCopyableFromAlloc != 0);
- PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumPages);
- PVR_ASSERT(uiInAllocOffset < (1ULL << psLMAllocArrayData->uiLog2AllocSize));
-
- eError = _MapAlloc(psLMAllocArrayData->psPhysHeap,
- &psLMAllocArrayData->pasDevPAddr[uiAllocIndex],
- psLMAllocArrayData->uiContigAllocSize,
- PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC,
- &pvMapping);
+ PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumChunks);
+ PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2ChunkSize));
+
+ eError = _MapPMRKernel(psLMAllocArrayData,
+ &psLMAllocArrayData->aBaseArray[uiAllocIndex],
+ uiPhysSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC,
+ &sMapping);
PVR_GOTO_IF_ERROR(eError, e0);
- pcKernelPointer = pvMapping;
+ pcKernelPointer = sMapping.pvKernelAddress;
pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc);
- _UnMapAlloc(psLMAllocArrayData->uiContigAllocSize,
- pvMapping);
+ _UnMapPMRKernel(psLMAllocArrayData,
+ &sMapping);
uiBufferOffset += uiBytesCopyableFromAlloc;
uiBytesToCopy -= uiBytesCopyableFromAlloc;
}
else
{
- PVR_ASSERT((uiOffset + uiBufSz) <= psLMAllocArrayData->uiContigAllocSize);
- PVR_ASSERT(psLMAllocArrayData->uiContigAllocSize != 0);
- eError = _MapAlloc(psLMAllocArrayData->psPhysHeap,
- &psLMAllocArrayData->pasDevPAddr[0],
- psLMAllocArrayData->uiContigAllocSize,
- PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC,
- &pvMapping);
- PVR_GOTO_IF_ERROR(eError, e0);
- pcKernelPointer = pvMapping;
- pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz);
-
- _UnMapAlloc(psLMAllocArrayData->uiContigAllocSize,
- pvMapping);
-
- uiBytesCopied = uiBufSz;
+ uiPhysSize = (IMG_UINT64) psLMAllocArrayData->uiTotalNumChunks << uiLog2ChunkSize;
+ PVR_ASSERT((uiOffset + uiBufSz) <= uiPhysSize);
+ PVR_ASSERT(uiChunkSize != 0);
+ eError = _MapPMRKernel(psLMAllocArrayData,
+ psLMAllocArrayData->aBaseArray,
+ uiPhysSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC,
+ &sMapping);
+ PVR_GOTO_IF_ERROR(eError, e0);
+ pcKernelPointer = sMapping.pvKernelAddress;
+ pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz);
+
+ _UnMapPMRKernel(psLMAllocArrayData,
+ &sMapping);
+
+ uiBytesCopied = uiBufSz;
}
*puiNumBytes = uiBytesCopied;
return PVRSRV_OK;
IMG_UINT32 uiFreepgidx;
PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
- IMG_DEV_PHYADDR sPhyAddr;
+ IMG_UINT32 uiLog2ChunkSize = psPMRPageArrayData->uiLog2ChunkSize;
+ IMG_UINT32 uiChunkSize = 1ULL << uiLog2ChunkSize;
#if defined(DEBUG)
IMG_BOOL bPoisonFail = IMG_FALSE;
#endif
/* Fetch the Page table array represented by the PMR */
- IMG_DEV_PHYADDR *psPageArray = psPMRPageArrayData->pasDevPAddr;
+ RA_BASE_T *paBaseArray = psPMRPageArrayData->aBaseArray;
PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappingTable(psPMR);
/* The incoming request is classified into two operations independent of
- * each other: alloc & free pages.
+ * each other: alloc & free chunks.
* These operations can be combined with two mapping operations as well
* which are GPU & CPU space mappings.
*
- * From the alloc and free page requests, the net amount of pages to be
- * allocated or freed is computed. Pages that were requested to be freed
+ * From the alloc and free chunk requests, the net amount of chunks to be
+ * allocated or freed is computed. Chunks that were requested to be freed
* will be reused to fulfil alloc requests.
*
* The order of operations is:
- * 1. Allocate new pages from the OS
- * 2. Move the free pages from free request to alloc positions.
- * 3. Free the rest of the pages not used for alloc
+ * 1. Allocate new Chunks.
+ * 2. Move the free chunks from free request to alloc positions.
+ * 3. Free the rest of the chunks not used for alloc
*
* Alloc parameters are validated at the time of allocation
* and any error will be handled then. */
ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ?
ui32FreePageCount : ui32AllocPageCount;
- PDUMP_PANIC(PMR_DeviceNode(psPMR), SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
+ PDUMP_PANIC(PMR_DeviceNode(psPMR), SPARSEMEM_SWAP, "Request to swap alloc & free chunks not supported");
}
if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
/* Validate the free page indices */
if (ui32FreePageCount)
{
- if (NULL != pai32FreeIndices)
+ if (pai32FreeIndices != NULL)
{
for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
{
uiFreepgidx = pai32FreeIndices[ui32Loop];
- if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages)
+ if (uiFreepgidx > psPMRPageArrayData->uiTotalNumChunks)
{
PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0);
}
- if (INVALID_PAGE_ADDR == psPageArray[uiFreepgidx].uiAddr)
+ if (RA_BASE_IS_INVALID(paBaseArray[uiFreepgidx]))
{
- PVR_LOG_GOTO_WITH_ERROR("psPageArray[uiFreepgidx].uiAddr", eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
+ PVR_LOG_GOTO_WITH_ERROR("paBaseArray[uiFreepgidx]", eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
}
}
- }else
+ }
+ else
{
PVR_DPF((PVR_DBG_ERROR,
"%s: Given non-zero free count but missing indices array",
}
}
- /*The following block of code verifies any issues with common alloc page indices */
+ /* The following block of code verifies any issues with common alloc chunk indices */
for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
{
uiAllocpgidx = pai32AllocIndices[ui32Loop];
- if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages)
+ if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumChunks)
{
PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0);
}
if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
{
- if ((INVALID_PAGE_ADDR != psPageArray[uiAllocpgidx].uiAddr) ||
- (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
+ if ((!RA_BASE_IS_INVALID(paBaseArray[uiAllocpgidx])) ||
+ (psPMRMapTable->aui32Translation[uiAllocpgidx] != TRANSLATION_INVALID))
{
PVR_LOG_GOTO_WITH_ERROR("Trying to allocate already allocated page again", eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
}
}
else
{
- if ((INVALID_PAGE_ADDR == psPageArray[uiAllocpgidx].uiAddr) ||
- (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]))
+ if ((RA_BASE_IS_INVALID(paBaseArray[uiAllocpgidx])) ||
+ (psPMRMapTable->aui32Translation[uiAllocpgidx] == TRANSLATION_INVALID))
{
PVR_LOG_GOTO_WITH_ERROR("Unable to remap memory due to missing page", eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
}
}
}
-
ui32Loop = 0;
- /* Allocate new pages */
+ /* Allocate new chunks */
if (0 != ui32AdtnlAllocPages)
{
- /* Say how many pages to allocate */
- psPMRPageArrayData->uiPagesToAlloc = ui32AdtnlAllocPages;
+ /* Say how many chunks to allocate */
+ psPMRPageArrayData->uiChunksToAlloc = ui32AdtnlAllocPages;
eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices);
PVR_LOG_GOTO_IF_ERROR(eError, "_AllocLMPages", e0);
- /* Mark the corresponding pages of translation table as valid */
+ /* Mark the corresponding chunks of translation table as valid */
for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
{
psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
}
ui32Index = ui32Loop;
+ ui32Loop = 0;
+
+ /* Move the corresponding free chunks to alloc request */
+ eError = RA_SwapSparseMem(psPMRPageArrayData->psArena,
+ paBaseArray,
+ psPMRPageArrayData->uiTotalNumChunks,
+ psPMRPageArrayData->uiLog2ChunkSize,
+ &pai32AllocIndices[ui32Index],
+ &pai32FreeIndices[ui32Loop],
+ ui32CommonRequstCount);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RA_SwapSparseMem", unwind_alloc);
- /* Move the corresponding free pages to alloc request */
for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++)
{
-
uiAllocpgidx = pai32AllocIndices[ui32Index];
uiFreepgidx = pai32FreeIndices[ui32Loop];
- sPhyAddr = psPageArray[uiAllocpgidx];
- psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
/* Is remap mem used in real world scenario? Should it be turned to a
* debug feature? The condition check needs to be out of loop, will be
* done at later point though after some analysis */
- if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+ if ((uiFlags & SPARSE_REMAP_MEM) != SPARSE_REMAP_MEM)
{
psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
- psPageArray[uiFreepgidx].uiAddr = INVALID_PAGE_ADDR;
}
else
{
- psPageArray[uiFreepgidx] = sPhyAddr;
psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
}
* such as zeroing, poisoning etc. */
if (psPMRPageArrayData->bPoisonOnAlloc)
{
- eError = _PoisonAlloc(psPMRPageArrayData->psPhysHeap,
- &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
- psPMRPageArrayData->uiContigAllocSize,
- PVRSRV_POISON_ON_ALLOC_VALUE);
+ eError = _PhysPgMemSet(psPMRPageArrayData,
+ &psPMRPageArrayData->aBaseArray[uiAllocpgidx],
+ uiChunkSize,
+ PVRSRV_POISON_ON_ALLOC_VALUE);
/* Consider this as a soft failure and go ahead but log error to kernel log */
if (eError != PVRSRV_OK)
#endif
}
}
- else
+
+ if (psPMRPageArrayData->bZeroOnAlloc)
{
- if (psPMRPageArrayData->bZeroOnAlloc)
+ eError = _PhysPgMemSet(psPMRPageArrayData,
+ &psPMRPageArrayData->aBaseArray[uiAllocpgidx],
+ uiChunkSize,
+ ZERO_PAGE_VALUE);
+ /* Consider this as a soft failure and go ahead but log error to kernel log */
+ if (eError != PVRSRV_OK)
{
- eError = _ZeroAlloc(psPMRPageArrayData->psPhysHeap,
- &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
- psPMRPageArrayData->uiContigAllocSize);
- /* Consider this as a soft failure and go ahead but log error to kernel log */
- if (eError != PVRSRV_OK)
- {
#if defined(DEBUG)
- /*Don't think we need to zero any pages further*/
- bZeroFail = IMG_TRUE;
+ /* Don't think we need to zero any chunks further */
+ bZeroFail = IMG_TRUE;
#endif
- }
}
}
}
- /*Free the additional free pages */
+ /* Free the additional free chunks */
if (0 != ui32AdtnlFreePages)
{
ui32Index = ui32Loop;
- _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages);
+ eError = _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_FreeLMPages", e0);
+
ui32Loop = 0;
while (ui32Loop++ < ui32AdtnlFreePages)
{
- /*Set the corresponding mapping table entry to invalid address */
+ /* Set the corresponding mapping table entry to invalid address */
psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID;
}
psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages;
}
-
}
#if defined(DEBUG)
if (IMG_TRUE == bPoisonFail)
{
- PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the page", __func__));
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the chunk", __func__));
}
if (IMG_TRUE == bZeroFail)
{
- PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the page", __func__));
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the chunk", __func__));
}
#endif
- /* Update the PMR memory holding information */
- eError = PVRSRV_OK;
+ return PVRSRV_OK;
+
+unwind_alloc:
+ _FreeLMPages(psPMRPageArrayData, pai32AllocIndices, ui32Index);
+
+ for (ui32Loop = 0; ui32Loop < ui32Index; ui32Loop++)
+ {
+ psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = TRANSLATION_INVALID;
+ }
e0:
return eError;
uintptr_t sCpuVABase = sCpuVAddrBase;
IMG_CPU_PHYADDR sCpuAddrPtr;
IMG_BOOL bValid = IMG_FALSE;
+ IMG_UINT32 i;
- /*Get the base address of the heap */
+ /* Get the base address of the heap */
eError = PMR_CpuPhysAddr(psPMR,
- psPMRPageArrayData->uiLog2AllocSize,
+ psPMRPageArrayData->uiLog2ChunkSize,
1,
0, /* offset zero here mean first page in the PMR */
&sCpuAddrPtr,
/* Phys address of heap is computed here by subtracting the offset of this page
* basically phys address of any page = Base address of heap + offset of the page */
- sCpuAddrPtr.uiAddr -= psPMRPageArrayData->pasDevPAddr[0].uiAddr;
- psPageArray = psPMRPageArrayData->pasDevPAddr;
-
- return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
- sCpuVABase,
- sCpuAddrPtr,
- ui32AllocPageCount,
- pai32AllocIndices,
- ui32FreePageCount,
- pai32FreeIndices,
- IMG_TRUE);
+ sCpuAddrPtr.uiAddr -= RA_BASE_STRIP_GHOST_BIT(psPMRPageArrayData->aBaseArray[0]);
+
+ /* We still have ghost bits in the base array, this interface expects true page
+ * addresses so we need to pre mask / translate the base array
+ */
+ psPageArray = OSAllocMem(sizeof(IMG_DEV_PHYADDR)*
+ psPMRPageArrayData->uiTotalNumChunks);
+ PVR_LOG_RETURN_IF_NOMEM(psPageArray, "Page translation array");
+
+ for (i = 0; i < psPMRPageArrayData->uiTotalNumChunks; i++)
+ {
+ psPageArray[i].uiAddr = RA_BASE_STRIP_GHOST_BIT(psPMRPageArrayData->aBaseArray[i]);
+ }
+
+ eError = OSChangeSparseMemCPUAddrMap((void**) psPageArray,
+ sCpuVABase,
+ sCpuAddrPtr,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ IMG_TRUE);
+
+ OSFreeMem(psPageArray);
+
+ return eError;
}
static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = {
- /* pfnLockPhysAddresses */
- &PMRLockSysPhysAddressesLocalMem,
- /* pfnUnlockPhysAddresses */
- &PMRUnlockSysPhysAddressesLocalMem,
- /* pfnDevPhysAddr */
- &PMRSysPhysAddrLocalMem,
- /* pfnAcquireKernelMappingData */
- &PMRAcquireKernelMappingDataLocalMem,
- /* pfnReleaseKernelMappingData */
- &PMRReleaseKernelMappingDataLocalMem,
- /* pfnReadBytes */
- &PMRReadBytesLocalMem,
- /* pfnWriteBytes */
- &PMRWriteBytesLocalMem,
- /* pfnUnpinMem */
- NULL,
- /* pfnPinMem */
- NULL,
- /* pfnChangeSparseMem*/
- &PMRChangeSparseMemLocalMem,
- /* pfnChangeSparseMemCPUMap */
- &PMRChangeSparseMemCPUMapLocalMem,
- /* pfnMMap */
- NULL,
- /* pfnFinalize */
- &PMRFinalizeLocalMem
+ .pfnLockPhysAddresses = &PMRLockSysPhysAddressesLocalMem,
+ .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesLocalMem,
+ .pfnDevPhysAddr = &PMRSysPhysAddrLocalMem,
+ .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataLocalMem,
+ .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataLocalMem,
+ .pfnReadBytes = &PMRReadBytesLocalMem,
+ .pfnWriteBytes = &PMRWriteBytesLocalMem,
+ .pfnChangeSparseMem = &PMRChangeSparseMemLocalMem,
+ .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapLocalMem,
+ .pfnMMap = NULL,
+ .pfnFinalize = &PMRFinalizeLocalMem
};
PVRSRV_ERROR
PhysmemNewLocalRamBackedPMR(PHYS_HEAP *psPhysHeap,
- CONNECTION_DATA *psConnection,
+ CONNECTION_DATA *psConnection,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
IMG_BOOL bPoisonOnAlloc;
IMG_BOOL bPoisonOnFree;
IMG_BOOL bOnDemand;
- IMG_BOOL bContig;
+ IMG_BOOL bIsSparse;
- /* For sparse requests we have to do the allocation
- * in chunks rather than requesting one contiguous block */
+ /* This path is checking for the type of PMR to create, if sparse we
+ * have to perform additional validation as we can only map sparse ranges
+ * if the os functionality to do so is present. We can also only map virtually
+ * contiguous sparse regions. Non backed gaps in a range cannot be mapped.
+ */
if (ui32NumPhysChunks != ui32NumVirtChunks || ui32NumVirtChunks > 1)
{
- if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags))
+ if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) &&
+ !OSIsMapPhysNonContigSupported())
{
PVR_DPF((PVR_DBG_ERROR,
- "%s: LMA kernel mapping functions currently "
- "don't work with discontiguous memory.",
+ "%s: LMA kernel mapping functions not available "
+ "for physically discontiguous memory.",
__func__));
PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, errorOnParam);
}
- bContig = IMG_FALSE;
+ bIsSparse = IMG_TRUE;
}
else
{
- bContig = IMG_TRUE;
+ bIsSparse = IMG_FALSE;
}
bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
#endif
/* Create Array structure that holds the physical pages */
- eError = _AllocLMPageArray(uiChunkSize * ui32NumVirtChunks,
+ eError = _AllocLMPageArray(uiSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
- pui32MappingTable,
uiLog2AllocPageSize,
bZero,
bPoisonOnAlloc,
bPoisonOnFree,
- bContig,
+ bIsSparse,
bOnDemand,
psPhysHeap,
uiFlags,
eError = PMRCreatePMR(psPhysHeap,
uiSize,
- uiChunkSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
pui32MappingTable,
return PVRSRV_OK;
errorOnCreate:
- if (!bOnDemand && psPrivData->iNumPagesAllocated)
+ if (!bOnDemand && psPrivData->iNumChunksAllocated)
{
eError2 = _FreeLMPages(psPrivData, NULL, 0);
PVR_ASSERT(eError2 == PVRSRV_OK);
}
errorOnAllocPages:
- eError2 = _FreeLMPageArray(psPrivData);
- PVR_ASSERT(eError2 == PVRSRV_OK);
+ _FreeLMPageArray(psPrivData);
errorOnAllocPageArray:
errorOnParam:
*/
IMG_BOOL bSparseAlloc;
- /* Indicates whether this PMR has been unpinned.
- * By default, all PMRs are pinned at creation.
- */
- IMG_BOOL bIsUnpinned;
-
/*
* Flag that conveys mutability of the PMR:
* - TRUE indicates the PMR is immutable (no more memory changes)
static void
PDumpPMRMallocPMR(PMR *psPMR,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_ALIGN_T uiBlockSize,
IMG_UINT32 ui32ChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *puiMappingTable,
IMG_UINT32 uiLog2Contiguity,
IMG_BOOL bInitialise,
- IMG_UINT32 ui32InitValue,
+ IMG_UINT8 ui8InitValue,
IMG_HANDLE *phPDumpAllocInfoOut,
IMG_UINT32 ui32PDumpFlags);
IMG_UINT32 ui32FreePageCount,
IMG_UINT32 *pai32FreeIndices,
IMG_BOOL bInitialise,
- IMG_UINT32 ui32InitValue,
+ IMG_UINT8 ui8InitValue,
IMG_HANDLE *phPDumpAllocInfoOut);
#endif /* defined PDUMP */
static PVRSRV_ERROR
_PMRCreate(PMR_SIZE_T uiLogicalSize,
- PMR_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
PMR_MAPPING_TABLE *psMappingTable;
struct _PMR_CTX_ *psContext;
IMG_UINT32 i, ui32Temp = 0;
- IMG_UINT32 ui32Remainder;
PVRSRV_ERROR eError;
IMG_BOOL bSparse = IMG_FALSE;
+ PMR_SIZE_T uiChunkSize;
psContext = &_gsSingletonPMRContext;
(ui32NumVirtChunks > 1) )
{
bSparse = IMG_TRUE;
+ uiChunkSize = 1ULL << uiLog2ContiguityGuarantee;
+ }
+ else
+ {
+ uiChunkSize = uiLogicalSize;
}
/* Extra checks required for sparse PMRs */
- if (uiLogicalSize != uiChunkSize)
+ if (bSparse)
{
/* Check the logical size and chunk information agree with each other */
if (uiLogicalSize != (uiChunkSize * ui32NumVirtChunks))
__func__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumVirtChunks));
return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
}
-
- /* Check that the chunk size is a multiple of the contiguity */
- OSDivide64(uiChunkSize, (1<< uiLog2ContiguityGuarantee), &ui32Remainder);
- if (ui32Remainder)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Bad chunk size, must be a multiple of the contiguity "
- "(uiChunkSize = 0x%llx, uiLog2ContiguityGuarantee = %u)",
- __func__,
- (unsigned long long) uiChunkSize,
- uiLog2ContiguityGuarantee));
- return PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE;
- }
}
pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + sizeof(IMG_UINT32) * ui32NumVirtChunks);
psPMR->uiFlags = uiFlags;
psPMR->psMappingTable = psMappingTable;
psPMR->bSparseAlloc = bSparse;
- psPMR->bIsUnpinned = IMG_FALSE;
psPMR->bNoLayoutChange = IMG_FALSE;
psPMR->szAnnotation[0] = '\0';
return PVRSRV_OK;
}
-/* This function returns true if the PMR is in use and false otherwise.
- * This function is not thread safe and hence the caller
- * needs to ensure the thread safety by explicitly taking
- * the lock on the PMR or through other means */
-IMG_BOOL PMRIsPMRLive(PMR *psPMR)
-{
- return (OSAtomicRead(&psPMR->iRefCount) > 0);
-}
-
static IMG_UINT32
_Ref(PMR *psPMR)
{
- PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) >= 0);
+ if (OSAtomicRead(&psPMR->iRefCount) == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "pmr.c: Ref Count == 0 PMR: @0x%p Annot: %s",
+ psPMR,
+ psPMR->szAnnotation));
+ OSWarnOn(1);
+ }
return OSAtomicIncrement(&psPMR->iRefCount);
}
static IMG_UINT32
_Unref(PMR *psPMR)
{
- PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) > 0);
+ if (OSAtomicRead(&psPMR->iRefCount) <= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "pmr.c: Unref Count <= 0 PMR: @0x%p Annot: %s RefCount: %d",
+ psPMR,
+ psPMR->szAnnotation,
+ (IMG_INT32) OSAtomicRead(&psPMR->iRefCount)));
+ OSWarnOn(1);
+ }
return OSAtomicDecrement(&psPMR->iRefCount);
}
static void
_UnrefAndMaybeDestroy(PMR *psPMR)
{
- PVRSRV_ERROR eError2;
struct _PMR_CTX_ *psCtx;
IMG_INT iRefCount;
{
if (psPMR->psFuncTab->pfnFinalize != NULL)
{
- eError2 = psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData);
-
- /* PMR unref can be called asynchronously by the kernel or other
- * third party modules (eg. display) which doesn't go through the
- * usual services bridge. The same PMR can be referenced simultaneously
- * in a different path that results in a race condition.
- * Hence depending on the race condition, a factory may refuse to destroy
- * the resource associated with this PMR if a reference on it was taken
- * prior to unref. In that case the PMR factory function returns the error.
- *
- * When such an error is encountered, the factory needs to ensure the state
- * associated with PMR is undisturbed. At this point we just bail out from
- * freeing the PMR itself. The PMR handle will then be freed at a later point
- * when the same PMR is unreferenced.
- * */
- if (PVRSRV_ERROR_PMR_STILL_REFERENCED == eError2)
- {
- if (psPMR->psFuncTab->pfnReleasePMRFactoryLock)
- {
- psPMR->psFuncTab->pfnReleasePMRFactoryLock();
- }
- return;
- }
- PVR_ASSERT (eError2 == PVRSRV_OK); /* can we do better? */
+ psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData);
}
+
#if defined(PDUMP)
/* if allocation is done on the host node don't include it in the PDUMP */
if (!_IsHostDevicePMR(psPMR))
}
}
-static IMG_BOOL _PMRIsSparse(const PMR *psPMR)
+static INLINE IMG_BOOL _PMRIsSparse(const PMR *psPMR)
{
return psPMR->bSparseAlloc;
}
PVRSRV_ERROR
PMRCreatePMR(PHYS_HEAP *psPhysHeap,
PMR_SIZE_T uiLogicalSize,
- PMR_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
PVR_LOG_RETURN_IF_INVALID_PARAM(pszAnnotation != NULL, "pszAnnotation");
eError = _PMRCreate(uiLogicalSize,
- uiChunkSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
pui32MappingTable,
{
PMR_FLAGS_T uiFlags = psPMR->uiFlags;
IMG_BOOL bInitialise = IMG_FALSE;
- IMG_UINT32 ui32InitValue = 0;
+ IMG_UINT8 ui8InitValue = 0;
if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags))
{
}
else if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
{
- ui32InitValue = 0xDEADBEEF;
+ ui8InitValue = (IMG_UINT8)PVRSRV_POISON_ON_ALLOC_VALUE;
bInitialise = IMG_TRUE;
}
PDumpPMRMallocPMR(psPMR,
- (uiChunkSize * ui32NumVirtChunks),
+ uiLogicalSize,
1ULL<<uiLog2ContiguityGuarantee,
- uiChunkSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
pui32MappingTable,
uiLog2ContiguityGuarantee,
bInitialise,
- ui32InitValue,
+ ui8InitValue,
&psPMR->hPDumpAllocHandle,
ui32PDumpFlags);
}
return PVRSRV_OK;
}
-PVRSRV_ERROR
-PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped)
-{
- PVRSRV_ERROR eError = PVRSRV_OK;
-
- PVR_ASSERT(psPMR != NULL);
-
- OSLockAcquire(psPMR->hLock);
- /* Stop if we still have references on the PMR */
- if ( ( bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 2))
- || (!bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 1)) )
- {
- OSLockRelease(psPMR->hLock);
- PVR_DPF((PVR_DBG_ERROR,
- "%s: PMR is still referenced %u times. "
- "That means this PMR is probably exported or used somewhere else. "
- "Allowed are 2 references if it is mapped to device, otherwise 1.",
- __func__,
- OSAtomicRead(&psPMR->iRefCount)));
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_STILL_REFERENCED, e_exit);
- }
- OSLockRelease(psPMR->hLock);
-
- if (psPMR->psFuncTab->pfnUnpinMem != NULL)
- {
- eError = psPMR->psFuncTab->pfnUnpinMem(psPMR->pvFlavourData);
- if (eError == PVRSRV_OK)
- {
- psPMR->bIsUnpinned = IMG_TRUE;
- }
- }
-
-e_exit:
- return eError;
-}
-
-PVRSRV_ERROR
-PMRPinPMR(PMR *psPMR)
-{
- PVRSRV_ERROR eError = PVRSRV_OK;
-
- PVR_ASSERT(psPMR != NULL);
-
- if (psPMR->psFuncTab->pfnPinMem != NULL)
- {
- eError = psPMR->psFuncTab->pfnPinMem(psPMR->pvFlavourData,
- psPMR->psMappingTable);
- if (eError == PVRSRV_OK)
- {
- psPMR->bIsUnpinned = IMG_FALSE;
- }
- }
-
- return eError;
-}
-
PVRSRV_ERROR
PMRMakeLocalImportHandle(PMR *psPMR,
PMR **ppsPMR)
}
PVRSRV_ERROR
-PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData)
+PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData, PVRSRV_MEMALLOCFLAGS_T uiFlags)
{
+ /* if writeable mapping is requested on non-writeable PMR then fail */
+ PVR_RETURN_IF_FALSE(PVRSRV_CHECK_CPU_WRITEABLE(psPMR->uiFlags) ||
+ !PVRSRV_CHECK_CPU_WRITEABLE(uiFlags),
+ PVRSRV_ERROR_PMR_NOT_PERMITTED);
+
if (psPMR->psFuncTab->pfnMMap)
{
return psPMR->psFuncTab->pfnMMap(psPMR->pvFlavourData, psPMR, pOSMMapData);
{
PVR_ASSERT(psPMR != NULL);
_Ref(psPMR);
+
+ /* Lock phys addresses if PMR backing was allocated immediately */
+ if (PVRSRV_CHECK_PHYS_ALLOC_NOW(psPMR->uiFlags))
+ {
+ PMRLockSysPhysAddresses(psPMR);
+ }
}
PVRSRV_ERROR
PMRUnrefPMR(PMR *psPMR)
{
+ /* Unlock phys addresses if PMR backing was allocated immediately */
+ if (PVRSRV_CHECK_PHYS_ALLOC_NOW(psPMR->uiFlags))
+ {
+ PMRUnlockSysPhysAddresses(psPMR);
+ }
+
_UnrefAndMaybeDestroy(psPMR);
return PVRSRV_OK;
}
+void
+PMRRefPMR2(PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+ _Ref(psPMR);
+}
+
+void
+PMRUnrefPMR2(PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+ _UnrefAndMaybeDestroy(psPMR);
+}
+
PVRSRV_ERROR
PMRUnrefUnlockPMR(PMR *psPMR)
{
PMRUnlockSysPhysAddresses(psPMR);
- PMRUnrefPMR(psPMR);
+ _UnrefAndMaybeDestroy(psPMR);
return PVRSRV_OK;
}
return _PMRIsSparse(psPMR);
}
-IMG_BOOL
-PMR_IsUnpinned(const PMR *psPMR)
-{
- PVR_ASSERT(psPMR != NULL);
-
- return psPMR->bIsUnpinned;
-}
-
/* Function that alters the mutability property
* of the PMR
* Setting it to TRUE makes sure the PMR memory layout
PVR_ASSERT(psPMR != NULL);
/* iLockCount will be > 0 for any backed PMR (backed on demand or not) */
- if ((OSAtomicRead(&psPMR->iLockCount) > 0) && !psPMR->bIsUnpinned)
+ if (OSAtomicRead(&psPMR->iLockCount) > 0)
{
if (psPMR->bSparseAlloc)
{
return psPMR->uiLog2ContiguityGuarantee;
}
+IMG_UINT32 PMRGetMaxChunkCount(PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+ return (PMR_MAX_SUPPORTED_SIZE >> psPMR->uiLog2ContiguityGuarantee);
+}
+
const IMG_CHAR *
PMR_GetAnnotation(const PMR *psPMR)
{
puiPhysicalOffset,
&ui32Remain,
pbValid);
- if (*pbValid || _PMRIsSparse(psPMR))
- {
- /* Sparse PMR may not always have the first page valid */
- eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData,
- ui32Log2PageSize,
- ui32NumOfPages,
- puiPhysicalOffset,
- pbValid,
- psDevAddrPtr);
- PVR_GOTO_IF_ERROR(eError, FreeOffsetArray);
+
+ /* Sparse PMR may not always have the first page valid */
+ eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData,
+ ui32Log2PageSize,
+ ui32NumOfPages,
+ puiPhysicalOffset,
+ pbValid,
+ psDevAddrPtr);
+ PVR_GOTO_IF_ERROR(eError, FreeOffsetArray);
#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES)
- /* Currently excluded from the default build because of performance
- * concerns.
- * We do not need this part in all systems because the GPU has the same
- * address view of system RAM as the CPU.
- * Alternatively this could be implemented as part of the PMR-factories
- * directly */
- if (PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_UMA ||
- PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_DMA)
+ /* Currently excluded from the default build because of performance
+ * concerns.
+ * We do not need this part in all systems because the GPU has the same
+ * address view of system RAM as the CPU.
+ * Alternatively this could be implemented as part of the PMR-factories
+ * directly */
+ if (PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_UMA ||
+ PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_DMA)
+ {
+ IMG_UINT32 i;
+ IMG_DEV_PHYADDR sDevPAddrCorrected;
+
+ /* Copy the translated addresses to the correct array */
+ for (i = 0; i < ui32NumOfPages; i++)
{
- IMG_UINT32 i;
- IMG_DEV_PHYADDR sDevPAddrCorrected;
-
- /* Copy the translated addresses to the correct array */
- for (i = 0; i < ui32NumOfPages; i++)
- {
- PhysHeapCpuPAddrToDevPAddr(psPMR->psPhysHeap,
- 1,
- &sDevPAddrCorrected,
- (IMG_CPU_PHYADDR *) &psDevAddrPtr[i]);
- psDevAddrPtr[i].uiAddr = sDevPAddrCorrected.uiAddr;
- }
+ PhysHeapCpuPAddrToDevPAddr(psPMR->psPhysHeap,
+ 1,
+ &sDevPAddrCorrected,
+ (IMG_CPU_PHYADDR *) &psDevAddrPtr[i]);
+ psDevAddrPtr[i].uiAddr = sDevPAddrCorrected.uiAddr;
}
-#endif
}
+#endif
+
FreeOffsetArray:
if (puiPhysicalOffset != auiPhysicalOffset)
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
if (eError == PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES)
{
- PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT,
- OSGetCurrentClientProcessIDKM());
+ PVRSRVStatsUpdateOOMStat(NULL,
+ PMR_DeviceNode(psPMR),
+ PVRSRV_DEVICE_STAT_TYPE_OOM_PHYSMEM_COUNT,
+ OSGetCurrentClientProcessIDKM());
}
#endif
goto e0;
#if defined(PDUMP)
{
IMG_BOOL bInitialise = IMG_FALSE;
- IMG_UINT32 ui32InitValue = 0;
+ IMG_UINT8 ui8InitValue = 0;
if (PVRSRV_CHECK_ZERO_ON_ALLOC(PMR_Flags(psPMR)))
{
}
else if (PVRSRV_CHECK_POISON_ON_ALLOC(PMR_Flags(psPMR)))
{
- ui32InitValue = 0xDEADBEEF;
+ ui8InitValue = (IMG_UINT8)PVRSRV_POISON_ON_ALLOC_VALUE;
bInitialise = IMG_TRUE;
}
ui32FreePageCount,
pai32FreeIndices,
bInitialise,
- ui32InitValue,
+ ui8InitValue,
&psPMR->hPDumpAllocHandle);
}
OSSNPrintf(pszSymbolicAddr,
ui32SymbolicAddrLen,
PMR_SYMBOLICADDR_FMTSPEC,
- PMR_DEFAULT_PREFIX,
+ PMR_IsSparse(psPMR) ? PMR_SPARSE_PREFIX : PMR_DEFAULT_PREFIX,
psPMR->uiSerialNum,
uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR),
psPMR->szAnnotation);
return PVRSRV_OK;
}
+ /* Forcibly initialise the name to a 'NULL' 0-length string */
+ aszParamStreamFilename[0] = '\0';
+
PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
/* Check if pdump client is connected */
if (bValid)
{
size_t uiNumBytes;
+ IMG_BOOL bOk2Write = IMG_TRUE;
if (bZero)
{
&aszParamStreamFilename[0],
sizeof(aszParamStreamFilename),
&uiParamStreamFileOffset);
+
if (eError == PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
{
/* Write to parameter file prevented under the flags and
* current state of the driver so skip further writes.
*/
eError = PVRSRV_OK;
+ bOk2Write = IMG_FALSE; /* Do *NOT* write anything */
}
else if (eError != PVRSRV_OK)
{
PDUMP_ERROR(psDevNode,
eError, "Failed to write PMR memory to parameter file");
+ bOk2Write = IMG_FALSE; /* Do *NOT* write anything */
}
}
- /* Emit the LDB command to the current symbolic address */
- eError = PDumpPMRLDB(psDevNode,
- aszMemspaceName,
- aszSymbolicName,
- uiOutOffset,
- uiNumBytes,
- pszParamStreamFileName,
- uiParamStreamFileOffset,
- uiPDumpFlags);
+ if (bOk2Write)
+ {
+ /* Emit the LDB command to the current symbolic address */
+ eError = PDumpPMRLDB(psDevNode,
+ aszMemspaceName,
+ aszSymbolicName,
+ uiOutOffset,
+ uiNumBytes,
+ pszParamStreamFileName,
+ uiParamStreamFileOffset,
+ uiPDumpFlags);
+ }
uiSizeRemain = uiSizeRemain - uiNumBytes;
}
uiCurrentOffset = uiNextSymName;
IMG_UINT32 ui32FreePageCount,
IMG_UINT32 *pai32FreeIndices,
IMG_BOOL bInitialise,
- IMG_UINT32 ui32InitValue,
+ IMG_UINT8 ui8InitValue,
IMG_HANDLE *phPDumpAllocInfoOut)
{
PVRSRV_ERROR eError;
uiBlockSize,
uiBlockSize,
bInitialise,
- ui32InitValue,
+ ui8InitValue,
&phPDumpAllocInfo[uiIndex],
PDUMP_NONE);
PVR_ASSERT(eError == PVRSRV_OK);
/* (IMG_HANDLE*) <- (IMG_HANDLE) */
IMG_HANDLE *ahPDumpAllocHandleArray = (IMG_HANDLE*) hPDumpAllocationInfoHandle;
+ PDUMP_LOCK(PDUMP_FLAGS_NONE);
+
for (i = 0; i < psPMR->uiNumPDumpBlocks; i++)
{
if (ahPDumpAllocHandleArray[i] != NULL)
{
- eError = PDumpFree(PMR_DeviceNode(psPMR),
- ahPDumpAllocHandleArray[i]);
+ eError = PDumpFreeUnlocked(PMR_DeviceNode(psPMR),
+ ahPDumpAllocHandleArray[i]);
PVR_ASSERT(eError == PVRSRV_OK);
ahPDumpAllocHandleArray[i] = NULL;
}
}
+ PDUMP_UNLOCK(PDUMP_FLAGS_NONE);
OSFreeMem(ahPDumpAllocHandleArray);
}
static void
PDumpPMRMallocPMR(PMR *psPMR,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_ALIGN_T uiBlockSize,
IMG_UINT32 ui32ChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *puiMappingTable,
IMG_UINT32 uiLog2Contiguity,
IMG_BOOL bInitialise,
- IMG_UINT32 ui32InitValue,
+ IMG_UINT8 ui8InitValue,
IMG_HANDLE *phPDumpAllocInfoOut,
IMG_UINT32 ui32PDumpFlags)
{
phPDumpAllocInfo = (IMG_HANDLE*) OSAllocZMem(uiNumVirtBlocks * sizeof(IMG_HANDLE));
+ PDUMP_LOCK(ui32PDumpFlags);
for (i = 0; i < uiNumPhysBlocks; i++)
{
uiIndex = PMR_IsSparse(psPMR) ? puiMappingTable[i] : i;
eError = PMR_PDumpSymbolicAddr(psPMR,
- uiIndex * uiBlockSize,
+ uiIndex * ui32ChunkSize,
sizeof(aszMemspaceName),
&aszMemspaceName[0],
sizeof(aszSymbolicName),
&uiNextSymName);
PVR_ASSERT(eError == PVRSRV_OK);
- eError = PDumpMalloc(PMR_DeviceNode(psPMR),
- aszMemspaceName,
- aszSymbolicName,
- uiBlockSize,
- uiBlockSize,
- bInitialise,
- ui32InitValue,
- &phPDumpAllocInfo[uiIndex],
- ui32PDumpFlags);
+ eError = PDumpMallocUnlocked(PMR_DeviceNode(psPMR),
+ aszMemspaceName,
+ aszSymbolicName,
+ ui32ChunkSize,
+ ui32ChunkSize,
+ bInitialise,
+ ui8InitValue,
+ &phPDumpAllocInfo[uiIndex],
+ ui32PDumpFlags);
PVR_LOG_RETURN_VOID_IF_FALSE((eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE),
"PDumpPMRMalloc PDump capture bound to other device");
PVR_ASSERT(eError == PVRSRV_OK);
}
+ PDUMP_UNLOCK(ui32PDumpFlags);
+
/* (IMG_HANDLE) <- (IMG_HANDLE*) */
*phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo;
#include "lock.h"
#include "pvrsrv.h"
#include "pvr_debug.h"
-#include "process_stats.h"
+#include "htbserver.h"
+#include "di_server.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+typedef struct _EXTRA_POWER_STATS_
+{
+ IMG_UINT64 ui64PreClockSpeedChangeDuration;
+ IMG_UINT64 ui64BetweenPreEndingAndPostStartingDuration;
+ IMG_UINT64 ui64PostClockSpeedChangeDuration;
+} EXTRA_POWER_STATS;
+
+/* For the power timing stats we need 16 variables to store all the
+ * combinations of forced/not forced, power-on/power-off, pre-power/post-power
+ * and device/system statistics
+ */
+#define NUM_POWER_STATS (16)
+#define NUM_EXTRA_POWER_STATS 10
+
+typedef struct PVRSRV_POWER_STATS_TAG
+{
+ EXTRA_POWER_STATS asClockSpeedChanges[NUM_EXTRA_POWER_STATS];
+ IMG_UINT64 ui64PreClockSpeedChangeMark;
+ IMG_UINT64 ui64FirmwareIdleDuration;
+ IMG_UINT32 aui32PowerTimingStats[NUM_POWER_STATS];
+ IMG_UINT32 ui32ClockSpeedIndexStart;
+ IMG_UINT32 ui32ClockSpeedIndexEnd;
+ IMG_UINT32 ui32FirmwareStartTimestamp;
+} PVRSRV_POWER_STATS;
+#endif
struct _PVRSRV_POWER_DEV_TAG_
{
IMG_HANDLE hDevCookie;
PVRSRV_DEV_POWER_STATE eDefaultPowerState;
ATOMIC_T eCurrentPowerState;
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ PVRSRV_POWER_STATS sPowerStats;
+#endif
};
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+/*
+ * Power statistics related definitions
+ */
+
+/* For the mean time, use an exponentially weighted moving average with a
+ * 1/4 weighting for the new measurement.
+ */
+#define MEAN_TIME(A, B) ( ((3*(A))/4) + ((1 * (B))/4) )
+
+#define UPDATE_TIME(time, newtime) \
+ ((time) > 0 ? MEAN_TIME((time), (newtime)) : (newtime))
+
+/* Enum to be used as input to GET_POWER_STAT_INDEX */
+typedef enum
+{
+ DEVICE = 0,
+ SYSTEM = 1,
+ POST_POWER = 0,
+ PRE_POWER = 2,
+ POWER_OFF = 0,
+ POWER_ON = 4,
+ NOT_FORCED = 0,
+ FORCED = 8,
+} PVRSRV_POWER_STAT_TYPE;
+
+/* Macro used to access one of the power timing statistics inside an array */
+#define GET_POWER_STAT_INDEX(forced,powon,prepow,system) \
+ ((forced) + (powon) + (prepow) + (system))
+
+void PVRSRVSetFirmwareStartTime(PVRSRV_POWER_DEV *psPowerDevice,
+ IMG_UINT32 ui32Time)
+{
+ PVRSRV_POWER_STATS *psPowerStats = &psPowerDevice->sPowerStats;
+
+ psPowerStats->ui32FirmwareStartTimestamp =
+ UPDATE_TIME(psPowerStats->ui32FirmwareStartTimestamp,
+ ui32Time);
+}
+
+void PVRSRVSetFirmwareHandshakeIdleTime(PVRSRV_POWER_DEV *psPowerDevice,
+ IMG_UINT64 ui64Duration)
+{
+ PVRSRV_POWER_STATS *psPowerStats = &psPowerDevice->sPowerStats;
+
+ psPowerStats->ui64FirmwareIdleDuration =
+ UPDATE_TIME(psPowerStats->ui64FirmwareIdleDuration,
+ ui64Duration);
+}
+
+static void _InsertPowerTimeStatistic(PVRSRV_POWER_DEV *psPowerDevice,
+ IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+ IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+ IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower)
+{
+ PVRSRV_POWER_STATS *psPowerStats = &psPowerDevice->sPowerStats;
+ IMG_UINT32 *pui32Stat;
+ IMG_UINT64 ui64DeviceDiff = ui64DevEndTime - ui64DevStartTime;
+ IMG_UINT64 ui64SystemDiff = ui64SysEndTime - ui64SysStartTime;
+ IMG_UINT32 ui32Index;
+
+ if (bPrePower)
+ {
+ HTBLOGK(HTB_SF_MAIN_PRE_POWER, bPowerOn, ui64DeviceDiff, ui64SystemDiff);
+ }
+ else
+ {
+ HTBLOGK(HTB_SF_MAIN_POST_POWER, bPowerOn, ui64SystemDiff, ui64DeviceDiff);
+ }
+
+ ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
+ bPowerOn ? POWER_ON : POWER_OFF,
+ bPrePower ? PRE_POWER : POST_POWER,
+ DEVICE);
+ pui32Stat = &psPowerStats->aui32PowerTimingStats[ui32Index];
+ *pui32Stat = UPDATE_TIME(*pui32Stat, ui64DeviceDiff);
+
+ ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
+ bPowerOn ? POWER_ON : POWER_OFF,
+ bPrePower ? PRE_POWER : POST_POWER,
+ SYSTEM);
+ pui32Stat = &psPowerStats->aui32PowerTimingStats[ui32Index];
+ *pui32Stat = UPDATE_TIME(*pui32Stat, ui64SystemDiff);
+}
+
+static void _InsertPowerTimeStatisticExtraPre(PVRSRV_POWER_DEV *psPowerDevice,
+ IMG_UINT64 ui64StartTimer,
+ IMG_UINT64 ui64Stoptimer)
+{
+ PVRSRV_POWER_STATS *psPowerStats = &psPowerDevice->sPowerStats;
+
+ psPowerStats->asClockSpeedChanges[psPowerStats->ui32ClockSpeedIndexEnd].ui64PreClockSpeedChangeDuration =
+ ui64Stoptimer - ui64StartTimer;
+
+ psPowerStats->ui64PreClockSpeedChangeMark = OSClockus();
+}
+
+static void _InsertPowerTimeStatisticExtraPost(PVRSRV_POWER_DEV *psPowerDevice,
+ IMG_UINT64 ui64StartTimer,
+ IMG_UINT64 ui64StopTimer)
+{
+ PVRSRV_POWER_STATS *psPowerStats = &psPowerDevice->sPowerStats;
+ IMG_UINT64 ui64Duration = ui64StartTimer - psPowerStats->ui64PreClockSpeedChangeMark;
+
+ PVR_ASSERT(psPowerStats->ui64PreClockSpeedChangeMark > 0);
+
+ psPowerStats->asClockSpeedChanges[psPowerStats->ui32ClockSpeedIndexEnd].ui64BetweenPreEndingAndPostStartingDuration = ui64Duration;
+ psPowerStats->asClockSpeedChanges[psPowerStats->ui32ClockSpeedIndexEnd].ui64PostClockSpeedChangeDuration = ui64StopTimer - ui64StartTimer;
+
+ psPowerStats->ui32ClockSpeedIndexEnd = (psPowerStats->ui32ClockSpeedIndexEnd + 1) % NUM_EXTRA_POWER_STATS;
+
+ if (psPowerStats->ui32ClockSpeedIndexEnd == psPowerStats->ui32ClockSpeedIndexStart)
+ {
+ psPowerStats->ui32ClockSpeedIndexStart = (psPowerStats->ui32ClockSpeedIndexStart + 1) % NUM_EXTRA_POWER_STATS;
+ }
+
+ psPowerStats->ui64PreClockSpeedChangeMark = 0;
+}
+
+static INLINE void _PowerStatsPrintGroup(IMG_UINT32 *pui32Stats,
+ OSDI_IMPL_ENTRY *psEntry,
+ PVRSRV_POWER_STAT_TYPE eForced,
+ PVRSRV_POWER_STAT_TYPE ePowerOn)
+{
+ IMG_UINT32 ui32Index;
+
+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, DEVICE);
+ DIPrintf(psEntry, " Pre-Device: %9u\n", pui32Stats[ui32Index]);
+
+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, SYSTEM);
+ DIPrintf(psEntry, " Pre-System: %9u\n", pui32Stats[ui32Index]);
+
+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, SYSTEM);
+ DIPrintf(psEntry, " Post-System: %9u\n", pui32Stats[ui32Index]);
+
+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, DEVICE);
+ DIPrintf(psEntry, " Post-Device: %9u\n", pui32Stats[ui32Index]);
+}
+
+int PVRSRVPowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry);
+ PVRSRV_POWER_DEV *psPowerDevice = psDeviceNode->psPowerDev;
+ PVRSRV_POWER_STATS *psPowerStats;
+ IMG_UINT32 *pui32Stats;
+ IMG_UINT32 ui32Idx;
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ if (psPowerDevice == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Device not initialised when "
+ "reading power timing stats!"));
+ return -EIO;
+ }
+
+ psPowerStats = &psPowerDevice->sPowerStats;
+
+ pui32Stats = &psPowerStats->aui32PowerTimingStats[0];
+
+ DIPrintf(psEntry, "Forced Power-on Transition (nanoseconds):\n");
+ _PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_ON);
+ DIPrintf(psEntry, "\n");
+
+ DIPrintf(psEntry, "Forced Power-off Transition (nanoseconds):\n");
+ _PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_OFF);
+ DIPrintf(psEntry, "\n");
+
+ DIPrintf(psEntry, "Not Forced Power-on Transition (nanoseconds):\n");
+ _PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_ON);
+ DIPrintf(psEntry, "\n");
+
+ DIPrintf(psEntry, "Not Forced Power-off Transition (nanoseconds):\n");
+ _PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_OFF);
+ DIPrintf(psEntry, "\n");
+
+
+ DIPrintf(psEntry, "FW bootup time (timer ticks): %u\n", psPowerStats->ui32FirmwareStartTimestamp);
+ DIPrintf(psEntry, "Host Acknowledge Time for FW Idle Signal (timer ticks): %u\n", (IMG_UINT32)(psPowerStats->ui64FirmwareIdleDuration));
+ DIPrintf(psEntry, "\n");
+
+ DIPrintf(psEntry, "Last %d Clock Speed Change Timers (nanoseconds):\n", NUM_EXTRA_POWER_STATS);
+ DIPrintf(psEntry, "Prepare DVFS\tDVFS Change\tPost DVFS\n");
+
+ for (ui32Idx = psPowerStats->ui32ClockSpeedIndexStart;
+ ui32Idx != psPowerStats->ui32ClockSpeedIndexEnd;
+ ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS)
+ {
+ DIPrintf(psEntry, "%12llu\t%11llu\t%9llu\n",
+ psPowerStats->asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration,
+ psPowerStats->asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration,
+ psPowerStats->asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration);
+ }
+
+ return 0;
+}
+
+#else /* defined(PVRSRV_ENABLE_PROCESS_STATS) */
+
+static void _InsertPowerTimeStatistic(PVRSRV_POWER_DEV *psPowerDevice,
+ IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+ IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+ IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower)
+{
+ PVR_UNREFERENCED_PARAMETER(psPowerDevice);
+ PVR_UNREFERENCED_PARAMETER(ui64SysStartTime);
+ PVR_UNREFERENCED_PARAMETER(ui64SysEndTime);
+ PVR_UNREFERENCED_PARAMETER(ui64DevStartTime);
+ PVR_UNREFERENCED_PARAMETER(ui64DevEndTime);
+ PVR_UNREFERENCED_PARAMETER(bForced);
+ PVR_UNREFERENCED_PARAMETER(bPowerOn);
+ PVR_UNREFERENCED_PARAMETER(bPrePower);
+}
+
+static void _InsertPowerTimeStatisticExtraPre(PVRSRV_POWER_DEV *psPowerDevice,
+ IMG_UINT64 ui64StartTimer,
+ IMG_UINT64 ui64Stoptimer)
+{
+ PVR_UNREFERENCED_PARAMETER(psPowerDevice);
+ PVR_UNREFERENCED_PARAMETER(ui64StartTimer);
+ PVR_UNREFERENCED_PARAMETER(ui64Stoptimer);
+}
+
+static void _InsertPowerTimeStatisticExtraPost(PVRSRV_POWER_DEV *psPowerDevice,
+ IMG_UINT64 ui64StartTimer,
+ IMG_UINT64 ui64StopTimer)
+{
+ PVR_UNREFERENCED_PARAMETER(psPowerDevice);
+ PVR_UNREFERENCED_PARAMETER(ui64StartTimer);
+ PVR_UNREFERENCED_PARAMETER(ui64StopTimer);
+}
+#endif
+
+const char *PVRSRVSysPowerStateToString(PVRSRV_SYS_POWER_STATE eState)
+{
+ switch (eState) {
+#define X(name, _) \
+ case PVRSRV_SYS_POWER_STATE_##name: \
+ return #name;
+ _PVRSRV_SYS_POWER_STATES
+#undef X
+ default:
+ return "unknown";
+ }
+}
+
+const char *PVRSRVDevPowerStateToString(PVRSRV_DEV_POWER_STATE eState)
+{
+ switch (eState) {
+ case PVRSRV_DEV_POWER_STATE_DEFAULT:
+ return "DEFAULT";
+ case PVRSRV_DEV_POWER_STATE_OFF:
+ return "OFF";
+ case PVRSRV_DEV_POWER_STATE_ON:
+ return "ON";
+ default:
+ return "unknown";
+ }
+}
+
/*!
Typedef for a pointer to a function that will be called for re-acquiring
device powerlock after releasing it temporarily for some timeout period
return PVRSRV_OK;
}
+PVRSRV_ERROR PVRSRVSetDeviceCurrentPowerState(PVRSRV_POWER_DEV *psPowerDevice,
+ PVRSRV_DEV_POWER_STATE eNewPowerState)
+{
+ if (psPowerDevice == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eNewPowerState);
+
+ return PVRSRV_OK;
+}
+
/*
@Input pfnPowerLockAcquire : Function to re-acquire power-lock in-case
it was necessary to release it.
PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
PVRSRV_ERROR eError;
+ /* if pfnIsDefaultStateOff not provided or pfnIsDefaultStateOff(psPowerDev)
+ * is true (which means that the default state is OFF) then force idle. */
if ((psPowerDev && psPowerDev->pfnForcedIdleRequest) &&
- (!pfnIsDefaultStateOff || pfnIsDefaultStateOff(psPowerDev)))
+ (pfnIsDefaultStateOff == NULL || pfnIsDefaultStateOff(psPowerDev)))
{
LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
{
eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie,
eNewPowerState,
eCurrentPowerState,
- BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED));
+ ePwrFlags);
ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs();
ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs();
- PVR_RETURN_IF_ERROR(eError);
+ PVR_GOTO_IF_ERROR(eError, ErrRestorePowerState);
}
- InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2,
+ _InsertPowerTimeStatistic(psPowerDevice, ui64SysTimer1, ui64SysTimer2,
ui64DevTimer1, ui64DevTimer2,
BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED),
eNewPowerState == PVRSRV_DEV_POWER_STATE_ON,
IMG_TRUE);
return PVRSRV_OK;
+
+ErrRestorePowerState:
+ /* In a situation where pfnDevicePrePower() succeeded but pfnSystemPrePower()
+ * failed we need to restore the device's power state from before the current
+ * request. Otherwise it will result in an inconsistency between the device's
+ * actual state and what the driver thinks the state is. */
+ {
+ PVRSRV_ERROR eError2 = PVRSRV_OK;
+
+ if (psPowerDevice->pfnDevicePrePower != NULL)
+ {
+ /* Call the device's power callback. */
+ eError2 = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie,
+ eCurrentPowerState,
+ eNewPowerState,
+ ePwrFlags);
+ PVR_LOG_IF_ERROR(eError2, "pfnDevicePrePower");
+ }
+ if (eError2 == PVRSRV_OK && psPowerDevice->pfnDevicePostPower != NULL)
+ {
+ /* Call the device's power callback. */
+ eError2 = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie,
+ eCurrentPowerState,
+ eNewPowerState,
+ ePwrFlags);
+ PVR_LOG_IF_ERROR(eError2, "pfnDevicePostPower");
+ }
+ }
+
+ return eError;
}
/*!
eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie,
eNewPowerState,
eCurrentPowerState,
- BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED));
+ ePwrFlags);
ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs();
PVR_RETURN_IF_ERROR(eError);
}
- InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2,
+ _InsertPowerTimeStatistic(psPowerDevice, ui64SysTimer1, ui64SysTimer2,
ui64DevTimer1, ui64DevTimer2,
BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED),
eNewPowerState == PVRSRV_DEV_POWER_STATE_ON,
IMG_FALSE);
- OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eNewPowerState);
+ PVRSRVSetDeviceCurrentPowerState(psPowerDevice, eNewPowerState);
return PVRSRV_OK;
}
eNewPowerState = psPowerDevice->eDefaultPowerState;
}
- if (OSAtomicRead(&psPowerDevice->eCurrentPowerState) != eNewPowerState)
+ /* Call power function if the state change or if this is an OS request. */
+ if (OSAtomicRead(&psPowerDevice->eCurrentPowerState) != eNewPowerState ||
+ BITMASK_ANY(ePwrFlags, PVRSRV_POWER_FLAGS_SUSPEND_REQ | PVRSRV_POWER_FLAGS_RESUME_REQ))
{
eError = PVRSRVDevicePrePowerStateKM(psPowerDevice,
eNewPowerState,
"%s: Transition to %d was denied, Flags=0x%08x",
__func__, eNewPowerState, ePwrFlags));
}
- else if (eError != PVRSRV_OK)
+ else
{
PVR_DPF((PVR_DBG_WARNING,
"%s: Transition to %d FAILED (%s)",
PVRSRV_ERROR eError;
IMG_UINT uiStage = 0;
- PVRSRV_DEV_POWER_STATE eNewDevicePowerState =
- _IsSystemStatePowered(eNewSysPowerState)? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF;
+ PVRSRV_DEV_POWER_STATE eNewDevicePowerState = _IsSystemStatePowered(eNewSysPowerState)
+ ? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF;
/* If setting devices to default state, force idle all devices whose default state is off */
PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff =
(eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? PVRSRVDeviceIsDefaultStateOFF : NULL;
- /* require a proper power state */
+ /* Require a proper power state */
if (eNewSysPowerState == PVRSRV_SYS_POWER_STATE_Unspecified)
{
return PVRSRV_ERROR_INVALID_PARAMS;
/* Prevent simultaneous SetPowerStateKM calls */
_PVRSRVForcedPowerLock(psDeviceNode);
- /* no power transition requested, so do nothing */
+ /* No power transition requested, so do nothing */
if (eNewSysPowerState == psDeviceNode->eCurrentSysPowerState)
{
PVRSRVPowerUnlock(psDeviceNode);
return PVRSRV_OK;
}
- eError = _PVRSRVDeviceIdleRequestKM(psDeviceNode, pfnIsDefaultStateOff,
- IMG_TRUE, _PVRSRVForcedPowerLock);
- if (eError != PVRSRV_OK)
+ /* If the device is already off don't send the idle request. */
+ if (psDeviceNode->eCurrentSysPowerState != PVRSRV_SYS_POWER_STATE_OFF)
{
- PVR_LOG_ERROR(eError, "_PVRSRVDeviceIdleRequestKM");
- uiStage++;
- goto ErrorExit;
+ eError = _PVRSRVDeviceIdleRequestKM(psDeviceNode, pfnIsDefaultStateOff,
+ IMG_TRUE, _PVRSRVForcedPowerLock);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "_PVRSRVDeviceIdleRequestKM");
+ uiStage = 1;
+ goto ErrorExit;
+ }
}
eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, eNewDevicePowerState,
ePwrFlags | PVRSRV_POWER_FLAGS_FORCED);
if (eError != PVRSRV_OK)
{
- uiStage++;
+ uiStage = 2;
goto ErrorExit;
}
ErrorExit:
PVRSRVPowerUnlock(psDeviceNode);
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Transition from %d to %d FAILED (%s) at stage %u. Dumping debug info.",
- __func__, psDeviceNode->eCurrentSysPowerState, eNewSysPowerState,
- PVRSRVGetErrorString(eError), uiStage));
+ PVR_DPF((PVR_DBG_ERROR, "%s: Transition from %s to %s FAILED (%s) at stage "
+ "%u. Dumping debug info.", __func__,
+ PVRSRVSysPowerStateToString(psDeviceNode->eCurrentSysPowerState),
+ PVRSRVSysPowerStateToString(eNewSysPowerState),
+ PVRSRVGetErrorString(eError), uiStage));
PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
psPowerDevice->pfnGPUUnitsPowerChange = pfnGPUUnitsPowerChange;
psPowerDevice->hSysData = psDeviceNode->psDevConfig->hSysData;
psPowerDevice->hDevCookie = hDevCookie;
- OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eCurrentPowerState);
+ PVRSRVSetDeviceCurrentPowerState(psPowerDevice, eCurrentPowerState);
psPowerDevice->eDefaultPowerState = eDefaultPowerState;
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ OSCachedMemSet(&psPowerDevice->sPowerStats, 0, sizeof(psPowerDevice->sPowerStats));
+#endif
+
psDeviceNode->psPowerDev = psPowerDevice;
return PVRSRV_OK;
IMG_BOOL bIdleDevice,
void* pvInfo)
{
- PVRSRV_ERROR eError = PVRSRV_OK;
- PVRSRV_POWER_DEV *psPowerDevice;
- IMG_UINT64 ui64StartTimer, ui64StopTimer;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_POWER_DEV *psPowerDevice = psDeviceNode->psPowerDev;
+ IMG_UINT64 ui64StartTimer, ui64StopTimer;
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState;
PVR_UNREFERENCED_PARAMETER(pvInfo);
+ if (psPowerDevice == NULL)
+ {
+ return PVRSRV_OK;
+ }
+
ui64StartTimer = PVRSRVProcessStatsGetTimeUs();
/* This mutex is released in PVRSRVDevicePostClockSpeedChange. */
eError = PVRSRVPowerLock(psDeviceNode);
PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock");
- psPowerDevice = psDeviceNode->psPowerDev;
- if (psPowerDevice)
+ eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState);
+
+ if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
{
- PVRSRV_DEV_POWER_STATE eCurrentPowerState =
- OSAtomicRead(&psPowerDevice->eCurrentPowerState);
+ /* We can change the clock speed if the device is either IDLE or OFF */
+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE);
- if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+ if (eError != PVRSRV_OK)
{
- /* We can change the clock speed if the device is either IDLE or OFF */
- eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE);
-
- if (eError != PVRSRV_OK)
+ /* FW Can signal denied when busy with SPM or other work it can not idle */
+ if (eError != PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
{
- /* FW Can signal denied when busy with SPM or other work it can not idle */
- if (eError != PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Error (%s) from %s()", __func__,
- PVRSRVGETERRORSTRING(eError), "PVRSRVDeviceIdleRequestKM"));
- }
- if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED)
- {
- PVRSRVPowerUnlock(psDeviceNode);
- }
- return eError;
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error (%s) from %s()", __func__,
+ PVRSRVGETERRORSTRING(eError), "PVRSRVDeviceIdleRequestKM"));
}
+ if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED)
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ }
+ return eError;
}
-
- eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie,
- eCurrentPowerState);
}
+ eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie,
+ eCurrentPowerState);
+
ui64StopTimer = PVRSRVProcessStatsGetTimeUs();
- InsertPowerTimeStatisticExtraPre(ui64StartTimer, ui64StopTimer);
+ _InsertPowerTimeStatisticExtraPre(psPowerDevice, ui64StartTimer, ui64StopTimer);
return eError;
}
IMG_BOOL bIdleDevice,
void* pvInfo)
{
- PVRSRV_ERROR eError;
- PVRSRV_POWER_DEV *psPowerDevice;
- IMG_UINT64 ui64StartTimer, ui64StopTimer;
+ PVRSRV_ERROR eError;
+ PVRSRV_POWER_DEV *psPowerDevice = psDeviceNode->psPowerDev;
+ IMG_UINT64 ui64StartTimer, ui64StopTimer;
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState;
PVR_UNREFERENCED_PARAMETER(pvInfo);
+ if (psPowerDevice == NULL)
+ {
+ return;
+ }
+
ui64StartTimer = PVRSRVProcessStatsGetTimeUs();
- psPowerDevice = psDeviceNode->psPowerDev;
- if (psPowerDevice)
- {
- PVRSRV_DEV_POWER_STATE eCurrentPowerState =
- OSAtomicRead(&psPowerDevice->eCurrentPowerState);
+ eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState);
- eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie,
- eCurrentPowerState);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)",
- __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
- }
+ eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie,
+ eCurrentPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)",
+ __func__, psDeviceNode, PVRSRVGetErrorString(eError)));
+ }
- if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
- {
- eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode);
- PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceIdleCancelRequestKM");
- }
+ if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+ {
+ eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode);
+ PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceIdleCancelRequestKM");
}
/* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */
ui64StopTimer = PVRSRVProcessStatsGetTimeUs();
- InsertPowerTimeStatisticExtraPost(ui64StartTimer, ui64StopTimer);
+ _InsertPowerTimeStatisticExtraPost(psPowerDevice, ui64StartTimer, ui64StopTimer);
}
PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode,
#include "lock.h"
#include "allocmem.h"
#include "osfunc.h"
-#include "lists.h"
#include "process_stats.h"
#include "ri_server.h"
#include "hash.h"
#include "connection_server.h"
#include "pvrsrv.h"
#include "proc_stats.h"
-#include "htbuffer.h"
#include "pvr_ricommon.h"
#include "di_server.h"
+#include "dllist.h"
#if defined(__linux__)
#include "trace_events.h"
#endif
#if defined(PVRSRV_ENABLE_PERPID_STATS)
/* Array of Process stat type defined using the X-Macro */
#define X(stat_type, stat_str) stat_str,
-const IMG_CHAR *const pszProcessStatType[PVRSRV_PROCESS_STAT_TYPE_COUNT] = { PVRSRV_PROCESS_STAT_KEY };
+static const IMG_CHAR *const pszProcessStatType[PVRSRV_PROCESS_STAT_TYPE_COUNT] = { PVRSRV_PROCESS_STAT_KEY };
+static const IMG_CHAR *const pszDeviceStatType[PVRSRV_DEVICE_STAT_TYPE_COUNT] = { PVRSRV_DEVICE_STAT_KEY };
#undef X
#endif
/* Array of Driver stat type defined using the X-Macro */
#define X(stat_type, stat_str) stat_str,
-const IMG_CHAR *const pszDriverStatType[PVRSRV_DRIVER_STAT_TYPE_COUNT] = { PVRSRV_DRIVER_STAT_KEY };
+static const IMG_CHAR *const pszDriverStatType[PVRSRV_DRIVER_STAT_TYPE_COUNT] = { PVRSRV_DRIVER_STAT_KEY };
#undef X
/* structure used in hash table to track statistic entries */
#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData);
#endif
-int PowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData);
int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData);
/* Note: all of the accesses to the global stats should be protected
* invocations of macros *_GLOBAL_STAT_VALUE. */
/* Macros for fetching stat values */
-#define GET_STAT_VALUE(ptr,var) (ptr)->i32StatValue[(var)]
+#define GET_STAT_VALUE(ptr,var) (ptr)->i64StatValue[(var)]
#define GET_GLOBAL_STAT_VALUE(idx) gsGlobalStats.ui64StatValue[idx]
#define GET_GPUMEM_GLOBAL_STAT_VALUE() \
* Macros for updating stat values.
*/
#define UPDATE_MAX_VALUE(a,b) do { if ((b) > (a)) {(a) = (b);} } while (0)
-#define INCREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i32StatValue[(var)] += (val); if ((ptr)->i32StatValue[(var)] > (ptr)->i32StatValue[(var##_MAX)]) {(ptr)->i32StatValue[(var##_MAX)] = (ptr)->i32StatValue[(var)];} } while (0)
-#define INCREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui64StatValue[(idx)] += (val); if ((var).ui64StatValue[(idx)] > (var).ui64StatValue[(idx##_MAX)]) {(var).ui64StatValue[(idx##_MAX)] = (var).ui64StatValue[(idx)];} } while (0)
+#define INCREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i64StatValue[(var)] += (IMG_INT64)(val); if ((ptr)->i64StatValue[(var)] > (ptr)->i64StatValue[(var##_MAX)]) {(ptr)->i64StatValue[(var##_MAX)] = (ptr)->i64StatValue[(var)];} } while (0)
+#define INCREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui64StatValue[(idx)] += (IMG_UINT64)(val); if ((var).ui64StatValue[(idx)] > (var).ui64StatValue[(idx##_MAX)]) {(var).ui64StatValue[(idx##_MAX)] = (var).ui64StatValue[(idx)];} } while (0)
#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
/* Allow stats to go negative */
-#define DECREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i32StatValue[(var)] -= (val); } while (0)
+#define DECREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i64StatValue[(var)] -= (val); } while (0)
#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui64StatValue[(idx)] -= (val); } while (0)
#else
-#define DECREASE_STAT_VALUE(ptr,var,val) do { if ((ptr)->i32StatValue[(var)] >= (val)) { (ptr)->i32StatValue[(var)] -= (val); } else { (ptr)->i32StatValue[(var)] = 0; } } while (0)
-#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { if ((var).ui64StatValue[(idx)] >= (val)) { (var).ui64StatValue[(idx)] -= (val); } else { (var).ui64StatValue[(idx)] = 0; } } while (0)
+#define DECREASE_STAT_VALUE(ptr,var,val) do { if ((ptr)->i64StatValue[(var)] >= (val)) { (ptr)->i64StatValue[(var)] -= (IMG_INT64)(val); } else { (ptr)->i64StatValue[(var)] = 0; } } while (0)
+#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { if ((var).ui64StatValue[(idx)] >= (val)) { (var).ui64StatValue[(idx)] -= (IMG_UINT64)(val); } else { (var).ui64StatValue[(idx)] = 0; } } while (0)
#endif
#define MAX_CACHEOP_STAT 16
#define INCREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x+1) >= MAX_CACHEOP_STAT ? 0 : (x+1))
#define DECREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x-1) < 0 ? (MAX_CACHEOP_STAT-1) : (x-1))
+/*
+ * Track the search of one process when PVRSRV_DEBUG_LINUX_MEMORY_STATS
+ * is enabled.
+ */
+typedef enum _PVRSRV_PROC_SEARCH_STATE_
+{
+ PVRSRV_PROC_NOTFOUND,
+ PVRSRV_PROC_FOUND,
+ PVRSRV_PROC_RESURRECTED,
+} PVRSRV_PROC_SEARCH_STATE;
+
/*
* Structures for holding statistics...
*/
typedef struct _PVRSRV_MEM_ALLOC_REC_
{
PVRSRV_MEM_ALLOC_TYPE eAllocType;
- IMG_UINT64 ui64Key;
void* pvCpuVAddr;
IMG_CPU_PHYADDR sCpuPAddr;
size_t uiBytes;
- void* pvPrivateData;
#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON)
void* pvAllocdFromFile;
IMG_UINT32 ui32AllocdFromLine;
#endif
- IMG_PID pid;
- struct _PVRSRV_MEM_ALLOC_REC_* psNext;
- struct _PVRSRV_MEM_ALLOC_REC_** ppsThis;
} PVRSRV_MEM_ALLOC_REC;
+
+typedef struct PVRSRV_MEM_ALLOC_PRINT_DATA_TAG
+{
+ OSDI_IMPL_ENTRY *psEntry;
+ IMG_PID pid;
+ IMG_UINT32 ui32NumEntries;
+} PVRSRV_MEM_ALLOC_PRINT_DATA;
#endif
typedef struct _PVRSRV_PROCESS_STATS_ {
/* Linked list pointers */
- struct _PVRSRV_PROCESS_STATS_* psNext;
- struct _PVRSRV_PROCESS_STATS_* psPrev;
+ DLLIST_NODE sNode;
/* Create per process lock that need to be held
* to edit of its members */
IMG_PID pid;
IMG_UINT32 ui32RefCount;
- /* Stats... */
- IMG_INT32 i32StatValue[PVRSRV_PROCESS_STAT_TYPE_COUNT];
+ /* Process memory stats */
+ IMG_INT64 i64StatValue[PVRSRV_PROCESS_STAT_TYPE_COUNT];
IMG_UINT32 ui32StatAllocFlags;
#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG)
IMG_DEV_VIRTADDR sDevVAddr;
IMG_DEV_PHYADDR sDevPAddr;
- RGXFWIF_DM eFenceOpType;
#endif
IMG_DEVMEM_SIZE_T uiOffset;
IMG_DEVMEM_SIZE_T uiSize;
/* Other statistics structures */
#if defined(PVRSRV_ENABLE_MEMORY_STATS)
- PVRSRV_MEM_ALLOC_REC* psMemoryRecords;
+ HASH_TABLE* psMemoryRecords;
#endif
+ /* Device stats */
+ IMG_UINT32 ui32DevCount;
+ IMG_INT32 ai32DevStats[][PVRSRV_DEVICE_STAT_TYPE_COUNT];
} PVRSRV_PROCESS_STATS;
#if defined(ENABLE_DEBUGFS_PIDS)
#endif
-#if defined(PVRSRV_ENABLE_MEMORY_STATS)
-static IMPLEMENT_LIST_INSERT(PVRSRV_MEM_ALLOC_REC)
-static IMPLEMENT_LIST_REMOVE(PVRSRV_MEM_ALLOC_REC)
-#endif
-
/*
* Global Boolean to flag when the statistics are ready to monitor
* memory allocations.
* Linked lists for process stats. Live stats are for processes which are still running
* and the dead list holds those that have exited.
*/
-static PVRSRV_PROCESS_STATS *g_psLiveList;
-static PVRSRV_PROCESS_STATS *g_psDeadList;
+static DLLIST_NODE gsLiveList;
+static DLLIST_NODE gsDeadList;
static POS_LOCK g_psLinkedListLock;
/* Lockdep feature in the kernel cannot differentiate between different instances of same lock type.
* a false warning message about the possible occurrence of deadlock due to recursive lock acquisition.
* Hence we create the following sub classes to explicitly appraise Lockdep of such safe lock nesting */
#define PROCESS_LOCK_SUBCLASS_CURRENT 1
-#define PROCESS_LOCK_SUBCLASS_PREV 2
-#define PROCESS_LOCK_SUBCLASS_NEXT 3
#if defined(ENABLE_DEBUGFS_PIDS)
/*
* Pointer to OS folder to hold PID folders.
static PVRSRV_ERROR _RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid);
-static void _AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats);
-static void _AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats);
-static void _RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats);
-
static void _DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats);
static void _DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType,
PVRSRV_PROCESS_STATS* psProcessStats,
- IMG_UINT32 uiBytes);
-/*
- * Power statistics related definitions
- */
-
-/* For the mean time, use an exponentially weighted moving average with a
- * 1/4 weighting for the new measurement.
- */
-#define MEAN_TIME(A, B) ( ((3*(A))/4) + ((1 * (B))/4) )
-
-#define UPDATE_TIME(time, newtime) \
- ((time) > 0 ? MEAN_TIME((time), (newtime)) : (newtime))
-
-/* Enum to be used as input to GET_POWER_STAT_INDEX */
-typedef enum
-{
- DEVICE = 0,
- SYSTEM = 1,
- POST_POWER = 0,
- PRE_POWER = 2,
- POWER_OFF = 0,
- POWER_ON = 4,
- NOT_FORCED = 0,
- FORCED = 8,
-} PVRSRV_POWER_STAT_TYPE;
-
-/* Macro used to access one of the power timing statistics inside an array */
-#define GET_POWER_STAT_INDEX(forced,powon,prepow,system) \
- ((forced) + (powon) + (prepow) + (system))
-
-/* For the power timing stats we need 16 variables to store all the
- * combinations of forced/not forced, power-on/power-off, pre-power/post-power
- * and device/system statistics
- */
-#define NUM_POWER_STATS (16)
-static IMG_UINT32 aui32PowerTimingStats[NUM_POWER_STATS];
-
-static DI_ENTRY *psPowerStatsDIEntry;
-
-typedef struct _EXTRA_POWER_STATS_
-{
- IMG_UINT64 ui64PreClockSpeedChangeDuration;
- IMG_UINT64 ui64BetweenPreEndingAndPostStartingDuration;
- IMG_UINT64 ui64PostClockSpeedChangeDuration;
-} EXTRA_POWER_STATS;
-
-#define NUM_EXTRA_POWER_STATS 10
-
-static EXTRA_POWER_STATS asClockSpeedChanges[NUM_EXTRA_POWER_STATS];
-static IMG_UINT32 ui32ClockSpeedIndexStart, ui32ClockSpeedIndexEnd;
-
-
-#if defined(PVRSRV_ENABLE_PROCESS_STATS)
-void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
- IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
- IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower)
-{
- IMG_UINT32 *pui32Stat;
- IMG_UINT64 ui64DeviceDiff = ui64DevEndTime - ui64DevStartTime;
- IMG_UINT64 ui64SystemDiff = ui64SysEndTime - ui64SysStartTime;
- IMG_UINT32 ui32Index;
-
- if (bPrePower)
- {
- HTBLOGK(HTB_SF_MAIN_PRE_POWER, bPowerOn, ui64DeviceDiff, ui64SystemDiff);
- }
- else
- {
- HTBLOGK(HTB_SF_MAIN_POST_POWER, bPowerOn, ui64SystemDiff, ui64DeviceDiff);
- }
-
- ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
- bPowerOn ? POWER_ON : POWER_OFF,
- bPrePower ? PRE_POWER : POST_POWER,
- DEVICE);
- pui32Stat = &aui32PowerTimingStats[ui32Index];
- *pui32Stat = UPDATE_TIME(*pui32Stat, ui64DeviceDiff);
-
- ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
- bPowerOn ? POWER_ON : POWER_OFF,
- bPrePower ? PRE_POWER : POST_POWER,
- SYSTEM);
- pui32Stat = &aui32PowerTimingStats[ui32Index];
- *pui32Stat = UPDATE_TIME(*pui32Stat, ui64SystemDiff);
-}
-
-static IMG_UINT64 ui64PreClockSpeedChangeMark;
-
-void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer)
-{
- asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PreClockSpeedChangeDuration = ui64Stoptimer - ui64StartTimer;
-
- ui64PreClockSpeedChangeMark = OSClockus();
-}
-
-void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer)
-{
- IMG_UINT64 ui64Duration = ui64StartTimer - ui64PreClockSpeedChangeMark;
-
- PVR_ASSERT(ui64PreClockSpeedChangeMark > 0);
-
- asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64BetweenPreEndingAndPostStartingDuration = ui64Duration;
- asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PostClockSpeedChangeDuration = ui64StopTimer - ui64StartTimer;
-
- ui32ClockSpeedIndexEnd = (ui32ClockSpeedIndexEnd + 1) % NUM_EXTRA_POWER_STATS;
-
- if (ui32ClockSpeedIndexEnd == ui32ClockSpeedIndexStart)
- {
- ui32ClockSpeedIndexStart = (ui32ClockSpeedIndexStart + 1) % NUM_EXTRA_POWER_STATS;
- }
-
- ui64PreClockSpeedChangeMark = 0;
-}
-#endif
+ IMG_UINT64 uiBytes);
/*************************************************************************/ /*!
@Function _FindProcessStatsInLiveList
static PVRSRV_PROCESS_STATS*
_FindProcessStatsInLiveList(IMG_PID pid)
{
- PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList;
+ DLLIST_NODE *psNode, *psNext;
- while (psProcessStats != NULL)
+ dllist_foreach_node(&gsLiveList, psNode, psNext)
{
+ PVRSRV_PROCESS_STATS* psProcessStats;
+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode);
+
if (psProcessStats->pid == pid)
{
return psProcessStats;
}
-
- psProcessStats = psProcessStats->psNext;
}
-
return NULL;
} /* _FindProcessStatsInLiveList */
static PVRSRV_PROCESS_STATS*
_FindProcessStatsInDeadList(IMG_PID pid)
{
- PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList;
+ DLLIST_NODE *psNode, *psNext;
- while (psProcessStats != NULL)
+ dllist_foreach_node(&gsDeadList, psNode, psNext)
{
+ PVRSRV_PROCESS_STATS* psProcessStats;
+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode);
+
if (psProcessStats->pid == pid)
{
return psProcessStats;
}
-
- psProcessStats = psProcessStats->psNext;
}
-
return NULL;
} /* _FindProcessStatsInDeadList */
static void
_CompressMemoryUsage(void)
{
- PVRSRV_PROCESS_STATS* psProcessStats;
PVRSRV_PROCESS_STATS* psProcessStatsToBeFreed;
- IMG_UINT32 ui32ItemsRemaining;
+ IMG_INT32 i32ItemsRemaining;
+ DLLIST_NODE *psNode, *psNext;
+ DLLIST_NODE sToBeFreedHead;
/*
* We hold the lock whilst checking the list, but we'll release it
OSLockAcquire(g_psLinkedListLock);
/* Check that the dead list is not bigger than the max size... */
- psProcessStats = g_psDeadList;
psProcessStatsToBeFreed = NULL;
- ui32ItemsRemaining = MAX_DEAD_LIST_PROCESSES;
+ i32ItemsRemaining = MAX_DEAD_LIST_PROCESSES;
+
+ dllist_init(&sToBeFreedHead);
- while (psProcessStats != NULL && ui32ItemsRemaining > 0)
+ dllist_foreach_node(&gsDeadList, psNode, psNext)
{
- ui32ItemsRemaining--;
- if (ui32ItemsRemaining == 0)
+ i32ItemsRemaining--;
+ if (i32ItemsRemaining < 0)
{
/* This is the last allowed process, cut the linked list here! */
- psProcessStatsToBeFreed = psProcessStats->psNext;
- psProcessStats->psNext = NULL;
- }
- else
- {
- psProcessStats = psProcessStats->psNext;
+ dllist_remove_node(psNode);
+ dllist_add_to_tail(&sToBeFreedHead, psNode);
}
}
OSLockRelease(g_psLinkedListLock);
- /* Any processes stats remaining will need to be destroyed... */
- while (psProcessStatsToBeFreed != NULL)
+ dllist_foreach_node(&sToBeFreedHead, psNode, psNext)
{
- PVRSRV_PROCESS_STATS* psNextProcessStats = psProcessStatsToBeFreed->psNext;
-
- psProcessStatsToBeFreed->psNext = NULL;
+ psProcessStatsToBeFreed = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode);
_DestroyProcessStat(psProcessStatsToBeFreed);
- psProcessStatsToBeFreed = psNextProcessStats;
}
} /* _CompressMemoryUsage */
_MoveProcessToDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
{
/* Take the element out of the live list and append to the dead list... */
- _RemoveProcessStatsFromList(psProcessStats);
- _AddProcessStatsToFrontOfDeadList(psProcessStats);
+ PVR_ASSERT(psProcessStats != NULL);
+ dllist_remove_node(&psProcessStats->sNode);
+ dllist_add_to_head(&gsDeadList, &psProcessStats->sNode);
} /* _MoveProcessToDeadList */
-#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
/* These functions move the process stats from the dead to the live list.
* _MoveProcessToLiveList moves the entry in the global lists and
* it needs to be protected by g_psLinkedListLock.
_MoveProcessToLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
{
/* Take the element out of the live list and append to the dead list... */
- _RemoveProcessStatsFromList(psProcessStats);
- _AddProcessStatsToFrontOfLiveList(psProcessStats);
-} /* _MoveProcessToLiveList */
-#endif
-
-/*************************************************************************/ /*!
-@Function _AddProcessStatsToFrontOfLiveList
-@Description Add a statistic to the live list head.
-@Input psProcessStats Process stats to add.
-*/ /**************************************************************************/
-static void
-_AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
-{
- /* This function should always be called under global list lock g_psLinkedListLock.
- */
- PVR_ASSERT(psProcessStats != NULL);
-
- OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
-
- if (g_psLiveList != NULL)
- {
- PVR_ASSERT(psProcessStats != g_psLiveList);
- OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
- g_psLiveList->psPrev = psProcessStats;
- OSLockRelease(g_psLiveList->hLock);
- psProcessStats->psNext = g_psLiveList;
- }
-
- g_psLiveList = psProcessStats;
-
- OSLockRelease(psProcessStats->hLock);
-} /* _AddProcessStatsToFrontOfLiveList */
-
-/*************************************************************************/ /*!
-@Function _AddProcessStatsToFrontOfDeadList
-@Description Add a statistic to the dead list head.
-@Input psProcessStats Process stats to add.
-*/ /**************************************************************************/
-static void
-_AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
-{
PVR_ASSERT(psProcessStats != NULL);
- OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
-
- if (g_psDeadList != NULL)
- {
- PVR_ASSERT(psProcessStats != g_psDeadList);
- OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
- g_psDeadList->psPrev = psProcessStats;
- OSLockRelease(g_psDeadList->hLock);
- psProcessStats->psNext = g_psDeadList;
- }
-
- g_psDeadList = psProcessStats;
-
- OSLockRelease(psProcessStats->hLock);
-} /* _AddProcessStatsToFrontOfDeadList */
-
-/*************************************************************************/ /*!
-@Function _RemoveProcessStatsFromList
-@Description Detaches a process from either the live or dead list.
-@Input psProcessStats Process stats to remove.
-*/ /**************************************************************************/
-static void
-_RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats)
-{
- PVR_ASSERT(psProcessStats != NULL);
-
- OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
-
- /* Remove the item from the linked lists... */
- if (g_psLiveList == psProcessStats)
- {
- g_psLiveList = psProcessStats->psNext;
-
- if (g_psLiveList != NULL)
- {
- PVR_ASSERT(psProcessStats != g_psLiveList);
- OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
- g_psLiveList->psPrev = NULL;
- OSLockRelease(g_psLiveList->hLock);
-
- }
- }
- else if (g_psDeadList == psProcessStats)
- {
- g_psDeadList = psProcessStats->psNext;
-
- if (g_psDeadList != NULL)
- {
- PVR_ASSERT(psProcessStats != g_psDeadList);
- OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
- g_psDeadList->psPrev = NULL;
- OSLockRelease(g_psDeadList->hLock);
- }
- }
- else
- {
- PVRSRV_PROCESS_STATS* psNext = psProcessStats->psNext;
- PVRSRV_PROCESS_STATS* psPrev = psProcessStats->psPrev;
-
- if (psProcessStats->psNext != NULL)
- {
- PVR_ASSERT(psProcessStats != psNext);
- OSLockAcquireNested(psNext->hLock, PROCESS_LOCK_SUBCLASS_NEXT);
- psProcessStats->psNext->psPrev = psPrev;
- OSLockRelease(psNext->hLock);
- }
- if (psProcessStats->psPrev != NULL)
- {
- PVR_ASSERT(psProcessStats != psPrev);
- OSLockAcquireNested(psPrev->hLock, PROCESS_LOCK_SUBCLASS_PREV);
- psProcessStats->psPrev->psNext = psNext;
- OSLockRelease(psPrev->hLock);
- }
- }
-
-
- /* Reset the pointers in this cell, as it is not attached to anything */
- psProcessStats->psNext = NULL;
- psProcessStats->psPrev = NULL;
-
- OSLockRelease(psProcessStats->hLock);
-
-} /* _RemoveProcessStatsFromList */
+ dllist_remove_node(&psProcessStats->sNode);
+ dllist_add_to_head(&gsLiveList, &psProcessStats->sNode);
+} /* _MoveProcessToLiveList */
static PVRSRV_ERROR
_AllocateProcessStats(PVRSRV_PROCESS_STATS **ppsProcessStats, IMG_PID ownerPid)
{
PVRSRV_ERROR eError;
PVRSRV_PROCESS_STATS *psProcessStats;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ IMG_UINT32 ui32DevCount = 0;
- psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS));
+ if (psPVRSRVData != NULL)
+ {
+ ui32DevCount = psPVRSRVData->ui32RegisteredDevices;
+ }
+
+ psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS) +
+ ui32DevCount * PVRSRV_DEVICE_STAT_TYPE_COUNT * sizeof(IMG_INT32));
PVR_RETURN_IF_NOMEM(psProcessStats);
psProcessStats->pid = ownerPid;
psProcessStats->ui32RefCount = 1;
-
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = 1;
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+ psProcessStats->ui32DevCount = ui32DevCount;
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ psProcessStats->psMemoryRecords = HASH_Create(HASH_INITIAL_SIZE);
+ PVR_GOTO_IF_NOMEM(psProcessStats->psMemoryRecords, eError, free_process_stats);
+#endif
eError = OSLockCreateNoStats(&psProcessStats->hLock);
- PVR_GOTO_IF_ERROR(eError, e0);
+ PVR_GOTO_IF_ERROR(eError, destroy_mem_recs);
*ppsProcessStats = psProcessStats;
return PVRSRV_OK;
-e0:
+destroy_mem_recs:
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ HASH_Delete(psProcessStats->psMemoryRecords);
+free_process_stats:
+#endif
OSFreeMemNoStats(psProcessStats);
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+static PVRSRV_ERROR _FreeMemStatsEntry(uintptr_t k, uintptr_t v, void* pvPriv)
+{
+ PVRSRV_MEM_ALLOC_REC *psRecord = (PVRSRV_MEM_ALLOC_REC *)(uintptr_t)v;
+
+ PVR_UNREFERENCED_PARAMETER(pvPriv);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON)
+ PVR_DPF((PVR_DBG_WARNING, "Mem Stats Record not freed: 0x%" IMG_UINT64_FMTSPECx " %p, size="IMG_SIZE_FMTSPEC", %s:%d",
+ (IMG_UINT64)(k), psRecord, psRecord->uiBytes,
+ (IMG_CHAR*)psRecord->pvAllocdFromFile, psRecord->ui32AllocdFromLine));
+#else
+ PVR_UNREFERENCED_PARAMETER(k);
+#endif
+ OSFreeMemNoStats(psRecord);
+
+ return PVRSRV_OK;
+}
+#endif
+
/*************************************************************************/ /*!
@Function _DestroyProcessStat
@Description Frees memory and resources held by a process statistic.
OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
- /* Free the memory statistics... */
#if defined(PVRSRV_ENABLE_MEMORY_STATS)
- while (psProcessStats->psMemoryRecords)
- {
- List_PVRSRV_MEM_ALLOC_REC_Remove(psProcessStats->psMemoryRecords);
- }
+ /* Free the memory statistics... */
+ HASH_Iterate(psProcessStats->psMemoryRecords, (HASH_pfnCallback)_FreeMemStatsEntry, NULL);
+ HASH_Delete(psProcessStats->psMemoryRecords);
#endif
OSLockRelease(psProcessStats->hLock);
{
PVRSRV_ERROR error;
- PVR_ASSERT(g_psLiveList == NULL);
- PVR_ASSERT(g_psDeadList == NULL);
PVR_ASSERT(g_psLinkedListLock == NULL);
PVR_ASSERT(gpsSizeTrackingHashTable == NULL);
PVR_ASSERT(bProcessStatsInitialised == IMG_FALSE);
/* We need a lock to protect the linked lists... */
+#if defined(__linux__) && defined(__KERNEL__)
+ error = OSLockCreateNoStats(&g_psLinkedListLock);
+#else
error = OSLockCreate(&g_psLinkedListLock);
+#endif
PVR_GOTO_IF_ERROR(error, return_);
/* We also need a lock to protect the hash table used for size tracking. */
+#if defined(__linux__) && defined(__KERNEL__)
+ error = OSLockCreateNoStats(&gpsSizeTrackingHashTableLock);
+#else
error = OSLockCreate(&gpsSizeTrackingHashTableLock);
- PVR_GOTO_IF_ERROR(error, detroy_linked_list_lock_);
+#endif
+ PVR_GOTO_IF_ERROR(error, destroy_linked_list_lock_);
/* We also need a lock to protect the GlobalStat counters */
+#if defined(__linux__) && defined(__KERNEL__)
+ error = OSLockCreateNoStats(&gsGlobalStats.hGlobalStatsLock);
+#else
error = OSLockCreate(&gsGlobalStats.hGlobalStatsLock);
+#endif
PVR_GOTO_IF_ERROR(error, destroy_hashtable_lock_);
/* Flag that we are ready to start monitoring memory allocations. */
gpsSizeTrackingHashTable = HASH_Create(HASH_INITIAL_SIZE);
PVR_GOTO_IF_NOMEM(gpsSizeTrackingHashTable, error, destroy_stats_lock_);
- OSCachedMemSet(asClockSpeedChanges, 0, sizeof(asClockSpeedChanges));
+ dllist_init(&gsLiveList);
+ dllist_init(&gsDeadList);
bProcessStatsInitialised = IMG_TRUE;
#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
}
#endif
- {
- DI_ITERATOR_CB sIterator = {.pfnShow = PowerStatsPrintElements};
- /* Create power stats entry... */
- error = DICreateEntry("power_timing_stats", NULL, &sIterator, NULL,
- DI_ENTRY_TYPE_GENERIC, &psPowerStatsDIEntry);
- PVR_LOG_IF_ERROR(error, "DICreateEntry (2)");
- }
-
{
DI_ITERATOR_CB sIterator = {.pfnShow = GlobalStatsPrintElements};
error = DICreateEntry("driver_stats", NULL, &sIterator, NULL,
return PVRSRV_OK;
destroy_stats_lock_:
+#if defined(__linux__) && defined(__KERNEL__)
+ OSLockDestroyNoStats(gsGlobalStats.hGlobalStatsLock);
+#else
OSLockDestroy(gsGlobalStats.hGlobalStatsLock);
+#endif
gsGlobalStats.hGlobalStatsLock = NULL;
destroy_hashtable_lock_:
+#if defined(__linux__) && defined(__KERNEL__)
+ OSLockDestroyNoStats(gpsSizeTrackingHashTableLock);
+#else
OSLockDestroy(gpsSizeTrackingHashTableLock);
+#endif
gpsSizeTrackingHashTableLock = NULL;
-detroy_linked_list_lock_:
+destroy_linked_list_lock_:
+#if defined(__linux__) && defined(__KERNEL__)
+ OSLockDestroyNoStats(g_psLinkedListLock);
+#else
OSLockDestroy(g_psLinkedListLock);
+#endif
g_psLinkedListLock = NULL;
return_:
return error;
void
PVRSRVStatsDestroy(void)
{
+ DLLIST_NODE *psNode, *psNext;
+
PVR_ASSERT(bProcessStatsInitialised);
#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
}
#endif
- /* Destroy the power stats entry... */
- if (psPowerStatsDIEntry!=NULL)
- {
- DIDestroyEntry(psPowerStatsDIEntry);
- psPowerStatsDIEntry = NULL;
- }
-
/* Destroy the global data entry */
if (psGlobalMemDIEntry!=NULL)
{
/* Destroy the locks... */
if (g_psLinkedListLock != NULL)
{
+#if defined(__linux__) && defined(__KERNEL__)
+ OSLockDestroyNoStats(g_psLinkedListLock);
+#else
OSLockDestroy(g_psLinkedListLock);
+#endif
g_psLinkedListLock = NULL;
}
/* Free the live and dead lists... */
- while (g_psLiveList != NULL)
+ dllist_foreach_node(&gsLiveList, psNode, psNext)
{
- PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList;
- _RemoveProcessStatsFromList(psProcessStats);
+ PVRSRV_PROCESS_STATS* psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode);
+ dllist_remove_node(&psProcessStats->sNode);
_DestroyProcessStat(psProcessStats);
}
- while (g_psDeadList != NULL)
+ dllist_foreach_node(&gsDeadList, psNode, psNext)
{
- PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList;
- _RemoveProcessStatsFromList(psProcessStats);
+ PVRSRV_PROCESS_STATS* psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode);
+ dllist_remove_node(&psProcessStats->sNode);
_DestroyProcessStat(psProcessStats);
}
}
if (gpsSizeTrackingHashTableLock != NULL)
{
+#if defined(__linux__) && defined(__KERNEL__)
+ OSLockDestroyNoStats(gpsSizeTrackingHashTableLock);
+#else
OSLockDestroy(gpsSizeTrackingHashTableLock);
+#endif
gpsSizeTrackingHashTableLock = NULL;
}
if (NULL != gsGlobalStats.hGlobalStatsLock)
{
+#if defined(__linux__) && defined(__KERNEL__)
+ OSLockDestroyNoStats(gsGlobalStats.hGlobalStatsLock);
+#else
OSLockDestroy(gsGlobalStats.hGlobalStatsLock);
+#endif
gsGlobalStats.hGlobalStatsLock = NULL;
}
if (psProcessStats != NULL)
{
/* Move it back onto the live list! */
- _RemoveProcessStatsFromList(psProcessStats);
- _AddProcessStatsToFrontOfLiveList(psProcessStats);
+ _MoveProcessToLiveList(psProcessStats);
}
else
{
OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
psProcessStats->ui32RefCount++;
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
- UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS],
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]);
+
OSLockRelease(psProcessStats->hLock);
OSLockRelease(g_psLinkedListLock);
/* Add it to the live list... */
OSLockAcquire(g_psLinkedListLock);
- _AddProcessStatsToFrontOfLiveList(psProcessStats);
+ dllist_add_to_head(&gsLiveList, &psProcessStats->sNode);
OSLockRelease(g_psLinkedListLock);
/* Done */
{
OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
psProcessStats->ui32RefCount--;
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
if (psProcessStats->ui32RefCount == 0)
}
} /* PVRSRVStatsDeregisterProcess */
+PVRSRV_ERROR PVRSRVStatsDeviceConnect(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ IMG_UINT32 ui32DevID = psDeviceNode->sDevId.ui32InternalID;
+ IMG_PID ownerPid = OSGetCurrentClientProcessIDKM();
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStatsInLiveList(ownerPid);
+
+ if (psProcessStats != NULL)
+ {
+ if (ui32DevID < psProcessStats->ui32DevCount)
+ {
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_CONNECTIONS]++;
+ UPDATE_MAX_VALUE(psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_MAX_CONNECTIONS],
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_CONNECTIONS]);
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device index %d is greater than device count %d for PID %d.",
+ __func__, ui32DevID, psProcessStats->ui32DevCount, ownerPid));
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Process %d not found.",
+ __func__, ownerPid));
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+
+ return PVRSRV_OK;
+}
+
+void PVRSRVStatsDeviceDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ IMG_UINT32 ui32DevID = psDeviceNode->sDevId.ui32InternalID;
+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ IMG_PID currentPid = OSGetCurrentClientProcessIDKM();
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ OSLockAcquire(g_psLinkedListLock);
+
+ if (psPVRSRVData)
+ {
+ if ((currentPid == psPVRSRVData->cleanupThreadPid) &&
+ (currentCleanupPid != 0))
+ {
+ psProcessStats = _FindProcessStats(currentCleanupPid);
+ }
+ else
+ {
+ psProcessStats = _FindProcessStatsInLiveList(currentPid);
+ }
+ }
+ else
+ {
+ psProcessStats = _FindProcessStatsInLiveList(currentPid);
+ }
+
+ if (psProcessStats != NULL)
+ {
+ if (ui32DevID < psProcessStats->ui32DevCount)
+ {
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_CONNECTIONS]--;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device index %d is greater than device count %d for PID %d.",
+ __func__, ui32DevID, psProcessStats->ui32DevCount, currentPid));
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Process %d not found.",
+ __func__, currentPid));
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+}
+
void
PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
void *pvCpuVAddr,
IMG_CPU_PHYADDR sCpuPAddr,
size_t uiBytes,
- void *pvPrivateData,
IMG_PID currentPid
DEBUG_MEMSTATS_PARAMS)
{
PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
PVRSRV_MEM_ALLOC_REC* psRecord = NULL;
PVRSRV_PROCESS_STATS* psProcessStats;
- enum { PVRSRV_PROC_NOTFOUND,
- PVRSRV_PROC_FOUND,
- PVRSRV_PROC_RESURRECTED
- } eProcSearch = PVRSRV_PROC_FOUND;
-
+ __maybe_unused PVRSRV_PROC_SEARCH_STATE eProcSearch = PVRSRV_PROC_FOUND;
#if defined(ENABLE_GPU_MEM_TRACEPOINT)
IMG_UINT64 ui64InitialSize;
#endif
psRecord->pvCpuVAddr = pvCpuVAddr;
psRecord->sCpuPAddr.uiAddr = sCpuPAddr.uiAddr;
psRecord->uiBytes = uiBytes;
- psRecord->pvPrivateData = pvPrivateData;
-
- psRecord->pid = currentPid;
#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON)
psRecord->pvAllocdFromFile = pvAllocFromFile;
eProcSearch = PVRSRV_PROC_NOTFOUND;
#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
- PVR_DPF((PVR_DBG_WARNING,
+ PVR_DPF((PVR_DBG_MESSAGE,
"%s: Process stat increment called for 'unknown' process PID(%d)",
__func__, currentPid));
}
/* Add it to the live list... */
- _AddProcessStatsToFrontOfLiveList(psProcessStats);
+ dllist_add_to_head(&gsLiveList, &psProcessStats->sNode);
OSLockRelease(g_psLinkedListLock);
#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
if (eProcSearch == PVRSRV_PROC_RESURRECTED)
{
- PVR_DPF((PVR_DBG_WARNING,
+ PVR_DPF((PVR_DBG_MESSAGE,
"%s: Process stat incremented on 'dead' process PID(%d)",
__func__, currentPid));
/* Move process from dead list to live list */
OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
- /* Insert the memory record... */
- if (psRecord != NULL)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
{
- List_PVRSRV_MEM_ALLOC_REC_Insert(&psProcessStats->psMemoryRecords, psRecord);
+ IMG_UINT64 ui64Key;
+
+ if (eAllocType == PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA ||
+ eAllocType == PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES ||
+ eAllocType == PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES)
+ {
+ ui64Key = psRecord->sCpuPAddr.uiAddr;
+ }
+ else
+ {
+ ui64Key = (IMG_UINT64)psRecord->pvCpuVAddr;
+ }
+
+ /* Insert the memory record... */
+ if (!HASH_Insert(psProcessStats->psMemoryRecords, ui64Key, (uintptr_t)psRecord))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s UNABLE TO CREATE mem stats record for pid %d [%s] (" IMG_SIZE_FMTSPEC " bytes)",
+ __func__, currentPid, OSGetCurrentProcessName(), uiBytes));
+ }
}
+#endif
#if defined(ENABLE_GPU_MEM_TRACEPOINT)
ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats);
{
case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
{
- if (psRecord != NULL)
- {
- if (pvCpuVAddr == NULL)
- {
- break;
- }
- psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
- }
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
{
- if (psRecord != NULL)
- {
- if (pvCpuVAddr == NULL)
- {
- break;
- }
- psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
- }
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
{
- if (psRecord != NULL)
- {
- if (pvCpuVAddr == NULL)
- {
- break;
- }
- psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
- }
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
{
- if (psRecord != NULL)
- {
- if (pvCpuVAddr == NULL)
- {
- break;
- }
- psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
- }
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
{
- if (psRecord != NULL)
- {
- psRecord->ui64Key = sCpuPAddr.uiAddr;
- }
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
{
- if (psRecord != NULL)
- {
- if (pvCpuVAddr == NULL)
- {
- break;
- }
- psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
- }
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
{
- if (psRecord != NULL)
- {
- psRecord->ui64Key = sCpuPAddr.uiAddr;
- }
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
{
- if (psRecord != NULL)
- {
- psRecord->ui64Key = sCpuPAddr.uiAddr;
- }
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
{
- if (psRecord != NULL)
- {
- if (pvCpuVAddr == NULL)
- {
- break;
- }
- psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
- }
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
return;
free_record:
+ _decrease_global_stat(eAllocType, uiBytes);
if (psRecord != NULL)
{
OSFreeMemNoStats(psRecord);
}
if (psProcessStats != NULL)
{
- psRecord = psProcessStats->psMemoryRecords;
- while (psRecord != NULL)
- {
- if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType)
- {
- bFound = IMG_TRUE;
- break;
- }
-
- psRecord = psRecord->psNext;
- }
+ psRecord = (PVRSRV_MEM_ALLOC_REC*)HASH_Remove(psProcessStats->psMemoryRecords, ui64Key);
+ bFound = psRecord != NULL;
}
/* If not found, we need to do a full search in case it was allocated to a different PID... */
if (!bFound)
{
PVRSRV_PROCESS_STATS* psProcessStatsAlreadyChecked = psProcessStats;
+ DLLIST_NODE *psNode, *psNext;
/* Search all live lists first... */
- psProcessStats = g_psLiveList;
- while (psProcessStats != NULL)
+ dllist_foreach_node(&gsLiveList, psNode, psNext)
{
+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode);
if (psProcessStats != psProcessStatsAlreadyChecked)
{
- psRecord = psProcessStats->psMemoryRecords;
- while (psRecord != NULL)
- {
- if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType)
- {
- bFound = IMG_TRUE;
- break;
- }
-
- psRecord = psRecord->psNext;
- }
+ psRecord = (PVRSRV_MEM_ALLOC_REC*)HASH_Remove(psProcessStats->psMemoryRecords, ui64Key);
+ bFound = psRecord != NULL;
}
if (bFound)
{
break;
}
-
- psProcessStats = psProcessStats->psNext;
}
/* If not found, then search all dead lists next... */
if (!bFound)
{
- psProcessStats = g_psDeadList;
- while (psProcessStats != NULL)
+ dllist_foreach_node(&gsDeadList, psNode, psNext)
{
+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode);
if (psProcessStats != psProcessStatsAlreadyChecked)
{
- psRecord = psProcessStats->psMemoryRecords;
- while (psRecord != NULL)
- {
- if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType)
- {
- bFound = IMG_TRUE;
- break;
- }
-
- psRecord = psRecord->psNext;
- }
+ psRecord = (PVRSRV_MEM_ALLOC_REC*)HASH_Remove(psProcessStats->psMemoryRecords, ui64Key);
+ bFound = psRecord != NULL;
}
if (bFound)
{
break;
}
-
- psProcessStats = psProcessStats->psNext;
}
}
}
psProcessStats,
psRecord->uiBytes);
- List_PVRSRV_MEM_ALLOC_REC_Remove(psRecord);
OSLockRelease(psProcessStats->hLock);
OSLockRelease(g_psLinkedListLock);
IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid();
PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
PVRSRV_PROCESS_STATS* psProcessStats = NULL;
- enum { PVRSRV_PROC_NOTFOUND,
- PVRSRV_PROC_FOUND,
- PVRSRV_PROC_RESURRECTED
- } eProcSearch = PVRSRV_PROC_FOUND;
-
+ __maybe_unused PVRSRV_PROC_SEARCH_STATE eProcSearch = PVRSRV_PROC_FOUND;
#if defined(ENABLE_GPU_MEM_TRACEPOINT)
IMG_UINT64 ui64InitialSize;
#endif
eProcSearch = PVRSRV_PROC_NOTFOUND;
#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
- PVR_DPF((PVR_DBG_WARNING,
+ PVR_DPF((PVR_DBG_MESSAGE,
"%s: Process stat increment called for 'unknown' process PID(%d)",
__func__, currentPid));
return;
}
/* Add it to the live list... */
- _AddProcessStatsToFrontOfLiveList(psProcessStats);
+ dllist_add_to_head(&gsLiveList, &psProcessStats->sNode);
}
#else
OSLockRelease(g_psLinkedListLock);
#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
if (eProcSearch == PVRSRV_PROC_RESURRECTED)
{
- PVR_DPF((PVR_DBG_WARNING,
+ PVR_DPF((PVR_DBG_MESSAGE,
"%s: Process stat incremented on 'dead' process PID(%d)",
__func__, currentPid));
{
case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
{
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
{
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
{
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
{
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
{
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
{
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
{
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
{
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
{
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT:
{
- INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, uiBytes);
psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
break;
static void
_DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType,
PVRSRV_PROCESS_STATS* psProcessStats,
- IMG_UINT32 uiBytes)
+ IMG_UINT64 uiBytes)
{
#if defined(ENABLE_GPU_MEM_TRACEPOINT)
IMG_UINT64 ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats);
{
case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
{
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
- if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0)
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0)
{
psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
{
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
- if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0)
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0)
{
psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
{
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
- if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0)
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0)
{
psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
{
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
- if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0)
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, uiBytes);
+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0)
{
psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
{
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
- if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0)
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0)
{
psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
{
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
- if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0)
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0)
{
psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
{
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
- if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0)
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0)
{
psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
{
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
- if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0)
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0)
{
psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
{
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
- if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0)
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, uiBytes);
+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0)
{
psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT:
{
- DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, (IMG_UINT32)uiBytes);
- if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT] == 0)
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, uiBytes);
+ if (psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT] == 0)
{
psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
}
int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData)
{
PVRSRV_PROCESS_STATS *psProcessStats;
+ DLLIST_NODE *psNode, *psNext;
DIPrintf(psEntry,
"%s,%s,%s,%s,%s,%s,%s\n",
OSLockAcquire(g_psLinkedListLock);
- psProcessStats = g_psLiveList;
-
- while (psProcessStats != NULL)
+ dllist_foreach_node(&gsLiveList, psNode, psNext)
{
+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode);
if (psProcessStats->pid != PVR_SYS_ALLOC_PID)
{
DIPrintf(psEntry,
- "%d,%d,%d,%d,%d,%d,%d\n",
+ "%d,%"IMG_INT64_FMTSPECd",%"IMG_INT64_FMTSPECd","
+ "%"IMG_INT64_FMTSPECd",%"IMG_INT64_FMTSPECd","
+ "%"IMG_INT64_FMTSPECd",%"IMG_INT64_FMTSPECd"\n",
psProcessStats->pid,
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC],
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA],
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA],
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES],
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES],
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT]);
+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC],
+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA],
+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA],
+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES],
+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES],
+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT]);
}
-
- psProcessStats = psProcessStats->psNext;
}
OSLockRelease(g_psLinkedListLock);
_decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
}
-void
-PVRSRVStatsUpdateOOMStats(IMG_UINT32 ui32OOMStatType,
- IMG_PID pidOwner)
+PVRSRV_ERROR
+PVRSRVStatsUpdateOOMStat(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OOMStatType,
+ IMG_PID pidOwner)
{
- PVRSRV_PROCESS_STAT_TYPE eOOMStatType = (PVRSRV_PROCESS_STAT_TYPE) ui32OOMStatType;
+ PVRSRV_DEVICE_STAT_TYPE eOOMStatType = (PVRSRV_DEVICE_STAT_TYPE) ui32OOMStatType;
IMG_PID pidCurrent = pidOwner;
PVRSRV_PROCESS_STATS* psProcessStats;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
/* Don't do anything if we are not initialised or we are shutting down! */
if (!bProcessStatsInitialised)
{
- return;
+ return PVRSRV_ERROR_NOT_INITIALISED;
+ }
+
+ if (ui32OOMStatType >= PVRSRV_DEVICE_STAT_TYPE_COUNT)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
}
/* Lock while we find the correct process and update the record... */
if (psProcessStats != NULL)
{
OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
- psProcessStats->i32StatValue[eOOMStatType]++;
+ psProcessStats->ai32DevStats[psDeviceNode->sDevId.ui32InternalID][eOOMStatType]++;
OSLockRelease(psProcessStats->hLock);
}
else
{
- PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateOOMStats: Process not found for Pid=%d", pidCurrent));
+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateOOMStat: Process not found for Pid=%d", pidCurrent));
}
OSLockRelease(g_psLinkedListLock);
-} /* PVRSRVStatsUpdateOOMStats */
-
-PVRSRV_ERROR
-PVRSRVServerUpdateOOMStats(IMG_UINT32 ui32OOMStatType,
- IMG_PID pidOwner)
-{
- if (ui32OOMStatType >= PVRSRV_PROCESS_STAT_TYPE_COUNT)
- {
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
-
- PVRSRVStatsUpdateOOMStats(ui32OOMStatType, pidOwner);
return PVRSRV_OK;
-}
+} /* PVRSRVStatsUpdateOOMStat */
void
-PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+PVRSRVStatsUpdateRenderContextStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32TotalNumPartialRenders,
IMG_UINT32 ui32TotalNumOutOfMemory,
IMG_UINT32 ui32NumTAStores,
IMG_UINT32 ui32Num3DStores,
psProcessStats = _FindProcessStats(pidCurrent);
if (psProcessStats != NULL)
{
+ IMG_UINT32 ui32DevID = psDeviceNode->sDevId.ui32InternalID;
+
OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_PRS] += ui32TotalNumPartialRenders;
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_OOMS] += ui32TotalNumOutOfMemory;
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES] += ui32NumTAStores;
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores;
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores;
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TDM_STORES]+= ui32NumTDMStores;
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_PRS] += ui32TotalNumPartialRenders;
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_OOMS] += ui32TotalNumOutOfMemory;
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_TA_STORES] += ui32NumTAStores;
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores;
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores;
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_RC_TDM_STORES]+= ui32NumTDMStores;
OSLockRelease(psProcessStats->hLock);
}
else
} /* PVRSRVStatsUpdateRenderContextStats */
void
-PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+PVRSRVStatsUpdateZSBufferStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32NumReqByApp,
IMG_UINT32 ui32NumReqByFW,
IMG_PID owner)
{
psProcessStats = _FindProcessStats(currentPid);
if (psProcessStats != NULL)
{
+ IMG_UINT32 ui32DevID = psDeviceNode->sDevId.ui32InternalID;
+
OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP] += ui32NumReqByApp;
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW] += ui32NumReqByFW;
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_ZSBUFFER_REQS_BY_APP] += ui32NumReqByApp;
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_ZSBUFFER_REQS_BY_FW] += ui32NumReqByFW;
OSLockRelease(psProcessStats->hLock);
}
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Process not found for Pid=%d", __func__, currentPid));
+ }
OSLockRelease(g_psLinkedListLock);
} /* PVRSRVStatsUpdateZSBufferStats */
void
-PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+PVRSRVStatsUpdateFreelistStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32NumGrowReqByApp,
IMG_UINT32 ui32NumGrowReqByFW,
IMG_UINT32 ui32InitFLPages,
IMG_UINT32 ui32NumHighPages,
if (psProcessStats != NULL)
{
+ IMG_UINT32 ui32DevID = psDeviceNode->sDevId.ui32InternalID;
OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP] += ui32NumGrowReqByApp;
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW] += ui32NumGrowReqByFW;
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_FREELIST_GROW_REQS_BY_APP] += ui32NumGrowReqByApp;
+ psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_FREELIST_GROW_REQS_BY_FW] += ui32NumGrowReqByFW;
- UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT],
- (IMG_INT32) ui32InitFLPages);
-
- UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES],
- (IMG_INT32) ui32NumHighPages);
+ UPDATE_MAX_VALUE(psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_FREELIST_PAGES_INIT],
+ (IMG_INT32) ui32InitFLPages);
+ UPDATE_MAX_VALUE(psProcessStats->ai32DevStats[ui32DevID][PVRSRV_DEVICE_STAT_TYPE_FREELIST_MAX_PAGES],
+ (IMG_INT32) ui32NumHighPages);
OSLockRelease(psProcessStats->hLock);
}
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Process not found for Pid=%d", __func__, currentPid));
+ }
OSLockRelease(g_psLinkedListLock);
} /* PVRSRVStatsUpdateFreelistStats */
{
PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry);
PVRSRV_PROCESS_STATS* psProcessStats;
+ DLLIST_NODE *psNode, *psNext;
PVR_UNREFERENCED_PARAMETER(pvData);
OSLockAcquire(g_psLinkedListLock);
- psProcessStats = g_psLiveList;
-
- if (psProcessStats == NULL)
+ if (dllist_is_empty(&gsLiveList))
{
DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr);
}
else
{
- while (psProcessStats != NULL)
+ dllist_foreach_node(&gsLiveList, psNode, psNext)
{
+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode);
psStatType->pfnStatsPrintElements(psEntry, psProcessStats);
- psProcessStats = psProcessStats->psNext;
DIPrintf(psEntry, "%s\n", g_szSeparatorStr);
}
}
{
PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry);
PVRSRV_PROCESS_STATS* psProcessStats;
+ DLLIST_NODE *psNode, *psNext;
PVR_UNREFERENCED_PARAMETER(pvData);
OSLockAcquire(g_psLinkedListLock);
- psProcessStats = g_psDeadList;
-
- if (psProcessStats == NULL)
+ if (dllist_is_empty(&gsDeadList))
{
DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr);
}
else
{
- while (psProcessStats != NULL)
+ dllist_foreach_node(&gsDeadList, psNode, psNext)
{
+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode);
psStatType->pfnStatsPrintElements(psEntry, psProcessStats);
- psProcessStats = psProcessStats->psNext;
DIPrintf(psEntry, "%s\n", g_szSeparatorStr);
}
}
DIPrintf(psEntry, "PID %u\n", psProcessStats->pid);
- /* Loop through all the values and print them... */
+ /* Print device stats table PVRSRV_DEVICE_STAT_TYPE */
+ if (psProcessStats->ui32DevCount > 0)
+ {
+ IMG_UINT32 i;
+
+ for (ui32StatNumber = 0;
+ ui32StatNumber < ARRAY_SIZE(pszDeviceStatType);
+ ui32StatNumber++)
+ {
+ if (OSStringNCompare(pszDeviceStatType[ui32StatNumber], "", 1) != 0)
+ {
+ DIPrintf(psEntry, "%-34s",
+ pszDeviceStatType[ui32StatNumber]);
+
+ for (i = 0; i < psProcessStats->ui32DevCount; i++)
+ {
+ if (i == 0)
+ {
+ DIPrintf(psEntry, "%10d",
+ psProcessStats->ai32DevStats[i][ui32StatNumber]);
+ }
+ else
+ {
+ DIPrintf(psEntry, ",%d",
+ psProcessStats->ai32DevStats[i][ui32StatNumber]);
+ }
+ }
+ }
+
+ DIPrintf(psEntry, "\n");
+ }
+ }
+
+ /* Print process memory stats table PVRSRV_PROCESS_STAT_TYPE */
for (ui32StatNumber = 0;
ui32StatNumber < ARRAY_SIZE(pszProcessStatType);
ui32StatNumber++)
if (ui32StatNumber >= PVRSRV_PROCESS_STAT_TYPE_KMALLOC &&
ui32StatNumber <= PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX)
{
- DIPrintf(psEntry, "%-34s%10d %8dK\n",
- pszProcessStatType[ui32StatNumber],
- psProcessStats->i32StatValue[ui32StatNumber],
- psProcessStats->i32StatValue[ui32StatNumber] >> 10);
+ DIPrintf(psEntry, "%-34s%10"IMG_INT64_FMTSPECd" %8"IMG_INT64_FMTSPECd"K\n",
+ pszProcessStatType[ui32StatNumber],
+ psProcessStats->i64StatValue[ui32StatNumber],
+ psProcessStats->i64StatValue[ui32StatNumber] >> 10);
}
else
{
- DIPrintf(psEntry, "%-34s%10d\n",
- pszProcessStatType[ui32StatNumber],
- psProcessStats->i32StatValue[ui32StatNumber]);
+ DIPrintf(psEntry, "%-34s%10"IMG_INT64_FMTSPECd"\n",
+ pszProcessStatType[ui32StatNumber],
+ psProcessStats->i64StatValue[ui32StatNumber]);
}
}
}
#endif
#if defined(PVRSRV_ENABLE_MEMORY_STATS)
-/*************************************************************************/ /*!
-@Function MemStatsPrintElements
-@Description Prints all elements for the memory statistic record.
-@Input pvStatPtr Pointer to statistics structure.
-@Input pfnOSStatsPrintf Printf function to use for output.
-*/ /**************************************************************************/
-void
-MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry,
- PVRSRV_PROCESS_STATS *psProcessStats)
+static PVRSRV_ERROR _PrintMemStatsEntry(uintptr_t k, uintptr_t v, void* pvPriv)
{
IMG_UINT32 ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32);
IMG_UINT32 ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32);
- PVRSRV_MEM_ALLOC_REC *psRecord;
IMG_UINT32 ui32ItemNumber;
+ PVRSRV_MEM_ALLOC_REC *psRecord = (PVRSRV_MEM_ALLOC_REC *)(uintptr_t)v;
+ PVRSRV_MEM_ALLOC_PRINT_DATA *psPrintData = (PVRSRV_MEM_ALLOC_PRINT_DATA *)pvPriv;
+ OSDI_IMPL_ENTRY *psEntry = psPrintData->psEntry;
- /* Write the header... */
- DIPrintf(psEntry, "PID ");
-
- DIPrintf(psEntry, "Type VAddress");
- for (ui32ItemNumber = 1; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++)
- {
- DIPrintf(psEntry, " ");
- }
-
- DIPrintf(psEntry, " PAddress");
- for (ui32ItemNumber = 1; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++)
- {
- DIPrintf(psEntry, " ");
- }
-
- DIPrintf(psEntry, " Size(bytes)\n");
-
- psRecord = psProcessStats->psMemoryRecords;
- if (psRecord == NULL)
- {
- DIPrintf(psEntry, "%-5d\n", psProcessStats->pid);
- }
-
- while (psRecord != NULL)
+ if (psRecord != NULL)
{
IMG_BOOL bPrintStat = IMG_TRUE;
- DIPrintf(psEntry, "%-5d ", psProcessStats->pid);
+ DIPrintf(psEntry, "%-5d ", psPrintData->pid);
switch (psRecord->eAllocType)
{
DIPrintf(psEntry, " " IMG_SIZE_FMTSPEC "\n", psRecord->uiBytes);
#endif
}
- /* Move to next record... */
- psRecord = psRecord->psNext;
+
+ psPrintData->ui32NumEntries++;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function MemStatsPrintElements
+@Description Prints all elements for the memory statistic record.
+@Input pvStatPtr Pointer to statistics structure.
+@Input pfnOSStatsPrintf Printf function to use for output.
+*/ /**************************************************************************/
+void
+MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry,
+ PVRSRV_PROCESS_STATS *psProcessStats)
+{
+ IMG_UINT32 ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32);
+ IMG_UINT32 ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32);
+ IMG_UINT32 ui32ItemNumber;
+ PVRSRV_MEM_ALLOC_PRINT_DATA sPrintData;
+
+ sPrintData.psEntry = psEntry;
+ sPrintData.pid = psProcessStats->pid;
+ sPrintData.ui32NumEntries = 0;
+
+ /* Write the header... */
+ DIPrintf(psEntry, "PID ");
+
+ DIPrintf(psEntry, "Type VAddress");
+ for (ui32ItemNumber = 1; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++)
+ {
+ DIPrintf(psEntry, " ");
+ }
+
+ DIPrintf(psEntry, " PAddress");
+ for (ui32ItemNumber = 1; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++)
+ {
+ DIPrintf(psEntry, " ");
+ }
+
+ DIPrintf(psEntry, " Size(bytes)\n");
+
+ HASH_Iterate(psProcessStats->psMemoryRecords, (HASH_pfnCallback)_PrintMemStatsEntry, &sPrintData);
+
+ if (sPrintData.ui32NumEntries == 0)
+ {
+ DIPrintf(psEntry, "%-5d\n", psProcessStats->pid);
}
} /* MemStatsPrintElements */
#endif
#endif
-static IMG_UINT32 ui32FirmwareStartTimestamp;
-static IMG_UINT64 ui64FirmwareIdleDuration;
-
-void SetFirmwareStartTime(IMG_UINT32 ui32Time)
-{
- ui32FirmwareStartTimestamp = UPDATE_TIME(ui32FirmwareStartTimestamp, ui32Time);
-}
-
-void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration)
-{
- ui64FirmwareIdleDuration = UPDATE_TIME(ui64FirmwareIdleDuration, ui64Duration);
-}
-
-static INLINE void PowerStatsPrintGroup(IMG_UINT32 *pui32Stats,
- OSDI_IMPL_ENTRY *psEntry,
- PVRSRV_POWER_STAT_TYPE eForced,
- PVRSRV_POWER_STAT_TYPE ePowerOn)
-{
- IMG_UINT32 ui32Index;
-
- ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, DEVICE);
- DIPrintf(psEntry, " Pre-Device: %9u\n", pui32Stats[ui32Index]);
-
- ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, SYSTEM);
- DIPrintf(psEntry, " Pre-System: %9u\n", pui32Stats[ui32Index]);
-
- ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, SYSTEM);
- DIPrintf(psEntry, " Post-System: %9u\n", pui32Stats[ui32Index]);
-
- ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, DEVICE);
- DIPrintf(psEntry, " Post-Device: %9u\n", pui32Stats[ui32Index]);
-}
-
-int PowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData)
-{
- IMG_UINT32 *pui32Stats = &aui32PowerTimingStats[0];
- IMG_UINT32 ui32Idx;
-
- PVR_UNREFERENCED_PARAMETER(pvData);
-
- DIPrintf(psEntry, "Forced Power-on Transition (nanoseconds):\n");
- PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_ON);
- DIPrintf(psEntry, "\n");
-
- DIPrintf(psEntry, "Forced Power-off Transition (nanoseconds):\n");
- PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_OFF);
- DIPrintf(psEntry, "\n");
-
- DIPrintf(psEntry, "Not Forced Power-on Transition (nanoseconds):\n");
- PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_ON);
- DIPrintf(psEntry, "\n");
-
- DIPrintf(psEntry, "Not Forced Power-off Transition (nanoseconds):\n");
- PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_OFF);
- DIPrintf(psEntry, "\n");
-
-
- DIPrintf(psEntry, "FW bootup time (timer ticks): %u\n", ui32FirmwareStartTimestamp);
- DIPrintf(psEntry, "Host Acknowledge Time for FW Idle Signal (timer ticks): %u\n", (IMG_UINT32)(ui64FirmwareIdleDuration));
- DIPrintf(psEntry, "\n");
-
- DIPrintf(psEntry, "Last %d Clock Speed Change Timers (nanoseconds):\n", NUM_EXTRA_POWER_STATS);
- DIPrintf(psEntry, "Prepare DVFS\tDVFS Change\tPost DVFS\n");
-
- for (ui32Idx = ui32ClockSpeedIndexStart; ui32Idx !=ui32ClockSpeedIndexEnd; ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS)
- {
- DIPrintf(psEntry, "%12llu\t%11llu\t%9llu\n",asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration,
- asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration,
- asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration);
- }
-
- return 0;
-} /* PowerStatsPrintElements */
-
int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData)
{
IMG_UINT32 ui32StatNumber;
are stored.
@Output Memory statistics records for the requested pid.
*/ /**************************************************************************/
-PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats)
+PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid,
+ IMG_UINT32 ui32ArrSize,
+ IMG_BOOL bAllProcessStats,
+ IMG_UINT64 *pui64MemoryStats)
{
IMG_INT i;
PVRSRV_PROCESS_STATS* psProcessStats;
- PVR_LOG_RETURN_IF_INVALID_PARAM(pui32MemoryStats, "pui32MemoryStats");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(pui64MemoryStats, "pui64MemoryStats");
if (bAllProcessStats)
{
PVR_LOG_RETURN_IF_FALSE(ui32ArrSize == PVRSRV_DRIVER_STAT_TYPE_COUNT,
- "MemStats array size is incorrect",
- PVRSRV_ERROR_INVALID_PARAMS);
+ "MemStats array size is incorrect",
+ PVRSRV_ERROR_INVALID_PARAMS);
OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
for (i = 0; i < ui32ArrSize; i++)
{
- pui32MemoryStats[i] = GET_GLOBAL_STAT_VALUE(i);
+ pui64MemoryStats[i] = GET_GLOBAL_STAT_VALUE(i);
}
OSLockRelease(gsGlobalStats.hGlobalStatsLock);
}
PVR_LOG_RETURN_IF_FALSE(ui32ArrSize == PVRSRV_PROCESS_STAT_TYPE_COUNT,
- "MemStats array size is incorrect",
- PVRSRV_ERROR_INVALID_PARAMS);
+ "MemStats array size is incorrect",
+ PVRSRV_ERROR_INVALID_PARAMS);
OSLockAcquire(g_psLinkedListLock);
OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
for (i = 0; i < ui32ArrSize; i++)
{
- pui32MemoryStats[i] = psProcessStats->i32StatValue[i];
+ pui64MemoryStats[i] = psProcessStats->i64StatValue[i];
}
OSLockRelease(psProcessStats->hLock);
connected processes. Memstat values provided by this API relate
only to the physical memory allocated by the process and does
not relate to any of the mapped or imported memory.
-@Output pui32TotalMem Total memory usage for all live
+@Output pui64TotalMem Total memory usage for all live
PIDs connected to the driver.
@Output pui32NumberOfLivePids Number of live pids currently
connected to the server.
PVRSRV_ERROR_OUT_OF_MEMORY Failed to allocate memory for
ppsPerProcessMemUsageData.
*/ /**************************************************************************/
-PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem,
+PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT64 *pui64TotalMem,
IMG_UINT32 *pui32NumberOfLivePids,
PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData)
{
- IMG_UINT32 ui32Counter = 0;
IMG_UINT32 ui32NumberOfLivePids = 0;
PVRSRV_ERROR eError = PVRSRV_ERROR_PROCESS_NOT_FOUND;
- PVRSRV_PROCESS_STATS* psProcessStats = NULL;
PVRSRV_PER_PROCESS_MEM_USAGE* psPerProcessMemUsageData = NULL;
+ DLLIST_NODE *psNode, *psNext;
OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
- *pui32TotalMem = GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC) +
+ *pui64TotalMem = GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC) +
GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC) +
GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) +
GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) +
OSLockRelease(gsGlobalStats.hGlobalStatsLock);
OSLockAcquire(g_psLinkedListLock);
- psProcessStats = g_psLiveList;
- while (psProcessStats != NULL)
+ dllist_foreach_node(&gsLiveList, psNode, psNext)
{
- psProcessStats = psProcessStats->psNext;
ui32NumberOfLivePids++;
}
if (psPerProcessMemUsageData)
{
- psProcessStats = g_psLiveList;
+ PVRSRV_PROCESS_STATS* psProcessStats = NULL;
+ IMG_UINT32 ui32Counter = 0;
- while (psProcessStats != NULL)
+ dllist_foreach_node(&gsLiveList, psNode, psNext)
{
+ psProcessStats = IMG_CONTAINER_OF(psNode, PVRSRV_PROCESS_STATS, sNode);
OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
psPerProcessMemUsageData[ui32Counter].ui32Pid = (IMG_UINT32)psProcessStats->pid;
- psPerProcessMemUsageData[ui32Counter].ui32KernelMemUsage = psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] +
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC];
+ psPerProcessMemUsageData[ui32Counter].ui64KernelMemUsage =
+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] +
+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC];
- psPerProcessMemUsageData[ui32Counter].ui32GraphicsMemUsage = psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] +
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] +
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] +
- psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES];
+ psPerProcessMemUsageData[ui32Counter].ui64GraphicsMemUsage =
+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] +
+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] +
+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] +
+ psProcessStats->i64StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES];
OSLockRelease(psProcessStats->hLock);
- psProcessStats = psProcessStats->psNext;
ui32Counter++;
}
eError = PVRSRV_OK;
if (!pfnDumpDebugPrintf)
{
/* Only notify OS of an issue if the debug dump has gone there */
- //OSWarnOn(IMG_TRUE);
+ OSWarnOn(IMG_TRUE);
}
}
#include "physmem_lma.h"
#include "physmem_osmem.h"
#include "physmem_hostmem.h"
+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY)
+#include "physmem_cpumap_history.h"
+#endif
#include "tlintern.h"
#include "htbserver.h"
#if defined(MULTI_DEVICE_BRINGUP)
#define MULTI_DEVICE_BRINGUP_DPF(msg, ...) PVR_DPF((PVR_DBG_MESSAGE, msg, __VA_ARGS__))
#else
-#define MULTI_DEVICE_BRINGUP_DPF(msg, ...) PVR_DPF((PVR_DBG_MESSAGE, msg, __VA_ARGS__))
+#define MULTI_DEVICE_BRINGUP_DPF(msg, ...)
#endif
#if defined(SUPPORT_RGX)
while (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
{
IMG_HANDLE hEvent;
+ IMG_UINT64 ui64Timeoutus;
if (psPVRSRVData->bUnload)
{
if (bRetryWorkList && bUseGlobalEO)
{
hEvent = hGlobalEvent;
+ /* If using global event object we are
+ * waiting for GPU work to finish, so
+ * use MAX_HW_TIME_US as timeout (this
+ * will be set appropriately when
+ * running on systems with emulated
+ * hardware, etc).
+ */
+ ui64Timeoutus = MAX_HW_TIME_US;
}
else
{
hEvent = hOSEvent;
+ /* Use the default retry timeout. */
+ ui64Timeoutus = CLEANUP_THREAD_WAIT_RETRY_TIMEOUT;
}
eRc = OSEventObjectWaitKernel(hEvent,
bRetryWorkList ?
- CLEANUP_THREAD_WAIT_RETRY_TIMEOUT :
+ ui64Timeoutus :
CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT);
if (eRc == PVRSRV_ERROR_TIMEOUT)
{
return gpsPVRSRVData->cleanupThreadTid;
}
+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY)
+/*
+ * Firmware is unresponsive.
+ * The Host will initiate a recovery process during which the
+ * Firmware and GPU are reset and returned to a working state.
+ */
+static PVRSRV_ERROR HandleFwHostSideRecovery(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32CtxIdx = 0U;
+ IMG_UINT32 ui32Nodes = 0U;
+
+ OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock);
+ /* Get the number of nodes in a linked list */
+ dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext)
+ {
+ ++ui32Nodes;
+ }
+
+ /* Any client contexts active at the moment? */
+ if (ui32Nodes > 0U)
+ {
+ /* Free the active context buffer previously allocated */
+ if (psDevInfo->psRGXFWIfActiveContextBufDesc)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfActiveContextBufDesc);
+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfActiveContextBufDesc);
+ psDevInfo->psRGXFWIfActiveContextBufDesc = NULL;
+ }
+
+ /* Setup allocations to store the active contexts */
+ eError = RGXSetupFwAllocation(psDevInfo,
+ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS,
+ (ui32Nodes + 1) * sizeof(RGXFWIF_ACTIVE_CONTEXT_BUF_DATA),
+ "FwSysActiveContextBufData",
+ &psDevInfo->psRGXFWIfActiveContextBufDesc,
+ (void *) &psDevInfo->psRGXFWIfSysInit->sActiveContextBufBase.ui32Addr,
+ (void **) &psDevInfo->psRGXFWIfActiveContextBuf,
+ RFW_FWADDR_NOREF_FLAG);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation",Error);
+
+ /* List of contexts to be rekicked by FW powering up the device */
+ dllist_foreach_node_backwards(&psDevInfo->sCommonCtxtListHead, psNode, psNext)
+ {
+ psDevInfo->psRGXFWIfActiveContextBuf[ui32CtxIdx].psContext =
+ RGXGetFWCommonContextAddrFromServerCommonCtx(psDevInfo, psNode);
+ ++ui32CtxIdx;
+ }
+ /* Null context as the terminator marker */
+ psDevInfo->psRGXFWIfActiveContextBuf[ui32CtxIdx].psContext.ui32Addr = 0;
+ }
+ OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock);
+
+ /* Host can't expect a response on power-down request as FW is in BAD state */
+ eError = PVRSRVSetDeviceCurrentPowerState(psDeviceNode->psPowerDev, PVRSRV_DEV_POWER_STATE_OFF);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVSetDeviceCurrentPowerState OFF", Error);
+
+ /* Flag to be set to notify FW while recovering from crash */
+ psDevInfo->psRGXFWIfSysInit->bFwHostRecoveryMode = IMG_TRUE;
+
+ /* Power-on the device resetting GPU & FW */
+ OSLockAcquire(psDeviceNode->hPowerLock);
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON,
+ PVRSRV_POWER_FLAGS_NONE);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVSetDevicePowerStateKM ON", Error);
+ OSLockRelease(psDeviceNode->hPowerLock);
+
+Error:
+ return eError;
+}
+#endif
+
static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode,
va_list va)
{
IMG_BOOL bCheckAfterTimePassed;
pePreviousHealthStatus = va_arg(va, PVRSRV_DEVICE_HEALTH_STATUS *);
- bCheckAfterTimePassed = va_arg(va, IMG_BOOL);
+ /* IMG_BOOL (i.e. bool) is converted to int during default argument promotion
+ * in variadic argument list. Thus, fetch an int to get IMG_BOOL */
+ bCheckAfterTimePassed = (IMG_BOOL) va_arg(va, int);
if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE)
{
"Device status not OK!!!"));
PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
NULL, NULL);
+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY)
+ HandleFwHostSideRecovery(psDeviceNode);
+#endif
}
}
}
* indefinite sleep. */
bInfiniteSleep = IMG_FALSE;
#if defined(SUPPORT_RGX)
- RGXSRV_HWPERF_HOST_INFO(psDevInfo, RGX_HWPERF_INFO_EV_MEM_USAGE);
+ RGXSRV_HWPERF_HOST_INFO(psDevInfo, RGX_HWPERF_INFO_EV_MEM64_USAGE);
#endif
}
}
*/
psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_RETRIES] = 10;
-#if defined(VIRTUAL_PLATFORM)
- psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 1200000U;
-#else
-#if defined(EMULATOR)
- psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 20000U;
-#else
- psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 1000U;
-#endif /* EMULATOR */
-#endif
+
+ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] =
+ MAX_HW_TIME_US / 1000U;
return PVRSRV_OK;
}
void *pvAppHintState = NULL;
IMG_UINT32 ui32AppHintDefault;
+ IMG_BOOL bAppHintDefault;
/*
* As this function performs one time driver initialisation, use the
return PVRSRV_ERROR_ALREADY_EXISTS;
}
+ eError = DIInit();
+ PVR_GOTO_IF_ERROR(eError, Error);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ eError = PVRSRVStatsInitialise();
+ PVR_GOTO_IF_ERROR(eError, Error);
+#endif /* PVRSRV_ENABLE_PROCESS_STATS */
+
+#if defined(SUPPORT_DI_BRG_IMPL)
+ eError = PVRDIImplBrgRegister();
+ PVR_GOTO_IF_ERROR(eError, Error);
+#endif
+
+ eError = HTB_CreateDIEntry();
+ PVR_GOTO_IF_ERROR(eError, Error);
+
/*
* Allocate the device-independent data
*/
NULL);
PVR_GOTO_IF_ERROR(eError, Error);
- eError = DIInit();
- PVR_GOTO_IF_ERROR(eError, Error);
-
-#if defined(SUPPORT_DI_BRG_IMPL)
- eError = PVRDIImplBrgRegister();
- PVR_GOTO_IF_ERROR(eError, Error);
-#endif
-
-#ifdef PVRSRV_ENABLE_PROCESS_STATS
- eError = PVRSRVStatsInitialise();
- PVR_GOTO_IF_ERROR(eError, Error);
-#endif /* PVRSRV_ENABLE_PROCESS_STATS */
-
- eError = HTB_CreateDIEntry();
- PVR_GOTO_IF_ERROR(eError, Error);
-
/*
* Initialise the server bridges
*/
eError = ServerBridgeInit();
PVR_GOTO_IF_ERROR(eError, Error);
- eError = PhysHeapInit();
- PVR_GOTO_IF_ERROR(eError, Error);
-
eError = DevmemIntInit();
PVR_GOTO_IF_ERROR(eError, Error);
RIInitKM();
#endif
- ui32AppHintDefault = PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG;
+ bAppHintDefault = PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG;
OSCreateKMAppHintState(&pvAppHintState);
OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnablePageFaultDebug,
- &ui32AppHintDefault, &bEnablePageFaultDebug);
+ &bAppHintDefault, &bEnablePageFaultDebug);
OSFreeKMAppHintState(pvAppHintState);
if (bEnablePageFaultDebug)
PVR_LOG_GOTO_IF_ERROR(eError, "DevicememHistoryInitKM", Error);
}
+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY)
+ eError = CPUMappingHistoryInit();
+ PVR_GOTO_IF_ERROR(eError, Error);
+#endif
+
eError = PMRInit();
PVR_GOTO_IF_ERROR(eError, Error);
OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, WatchdogThreadPriority,
&ui32AppHintDefault, &ui32AppHintWatchdogThreadPriority);
- ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING;
+ bAppHintDefault = PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING;
OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableFullSyncTracking,
- &ui32AppHintDefault, &bEnableFullSyncTracking);
+ &bAppHintDefault, &bEnableFullSyncTracking);
OSFreeKMAppHintState(pvAppHintState);
pvAppHintState = NULL;
RIDeInitKM();
#endif
+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY)
+ CPUMappingHistoryDeInit();
+#endif
+
if (bEnablePageFaultDebug)
{
+ /* Clear all allocated history tracking data */
DevicememHistoryDeInitKM();
}
ServerBridgeDeInit();
- PhysHeapDeinit();
-
HTB_DestroyDIEntry();
-#ifdef PVRSRV_ENABLE_PROCESS_STATS
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
PVRSRVStatsDestroy();
#endif /* PVRSRV_ENABLE_PROCESS_STATS */
SysDebugInfo(psDeviceNode->psDevConfig, pfnDumpDebugPrintf, pvDumpDebugFile);
}
-#define PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE (0x100000ULL * 32ULL) /* 32MB */
-
-static PVRSRV_ERROR PVRSRVValidatePhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig)
+PHYS_HEAP_CONFIG* PVRSRVFindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PHYS_HEAP_USAGE_FLAGS ui32Flags)
{
- IMG_UINT32 ui32FlagsAccumulate = 0;
IMG_UINT32 i;
- PVR_LOG_RETURN_IF_FALSE(psDevConfig->ui32PhysHeapCount > 0,
- "Device config must specify at least one phys heap config.",
- PVRSRV_ERROR_PHYSHEAP_CONFIG);
-
for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++)
{
- PHYS_HEAP_CONFIG *psHeapConf = &psDevConfig->pasPhysHeaps[i];
-
- PVR_LOG_RETURN_IF_FALSE_VA(psHeapConf->ui32UsageFlags != 0,
- PVRSRV_ERROR_PHYSHEAP_CONFIG,
- "Phys heap config %d: must specify usage flags.", i);
-
- PVR_LOG_RETURN_IF_FALSE_VA((ui32FlagsAccumulate & psHeapConf->ui32UsageFlags) == 0,
- PVRSRV_ERROR_PHYSHEAP_CONFIG,
- "Phys heap config %d: duplicate usage flags.", i);
-
- ui32FlagsAccumulate |= psHeapConf->ui32UsageFlags;
-
- /* Output message if default heap is LMA and smaller than recommended minimum */
- if ((i == psDevConfig->eDefaultHeap) &&
-#if defined(__KERNEL__)
- ((psHeapConf->eType == PHYS_HEAP_TYPE_LMA) ||
- (psHeapConf->eType == PHYS_HEAP_TYPE_DMA)) &&
-#else
- (psHeapConf->eType == PHYS_HEAP_TYPE_LMA) &&
-#endif
- (psHeapConf->uiSize < PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE))
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Size of default heap is 0x%" IMG_UINT64_FMTSPECX
- " (recommended minimum heap size is 0x%llx)",
- __func__, psHeapConf->uiSize,
- PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE));
- }
- }
-
- if (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_GPU_LOCAL)
- {
- PVR_LOG_RETURN_IF_FALSE(((ui32FlagsAccumulate & PHYS_HEAP_USAGE_GPU_LOCAL) != 0) ,
- "Device config must specify GPU local phys heap config.",
- PVRSRV_ERROR_PHYSHEAP_CONFIG);
- }
- else if (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_CPU_LOCAL)
- {
- PVR_LOG_RETURN_IF_FALSE(((ui32FlagsAccumulate & PHYS_HEAP_USAGE_CPU_LOCAL) != 0) ,
- "Device config must specify CPU local phys heap config.",
- PVRSRV_ERROR_PHYSHEAP_CONFIG);
- }
-
- return PVRSRV_OK;
-}
-
-PVRSRV_ERROR PVRSRVPhysMemHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig)
-{
- PVRSRV_ERROR eError;
- PVRSRV_PHYS_HEAP ePhysHeap;
-
- eError = PVRSRVValidatePhysHeapConfig(psDevConfig);
- PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVValidatePhysHeapConfig");
-
- eError = PhysHeapCreateDeviceHeapsFromConfigs(psDeviceNode,
- psDevConfig->pasPhysHeaps,
- psDevConfig->ui32PhysHeapCount);
- PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreateDeviceHeapsFromConfigs", ErrorDeinit);
-
- for (ePhysHeap = PVRSRV_PHYS_HEAP_DEFAULT+1; ePhysHeap < PVRSRV_PHYS_HEAP_LAST; ePhysHeap++)
- {
- if (PhysHeapPVRLayerAcquire(ePhysHeap))
- {
- eError = PhysHeapAcquireByDevPhysHeap(ePhysHeap, psDeviceNode, &psDeviceNode->apsPhysHeap[ePhysHeap]);
- PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByDevPhysHeap", ErrorDeinit);
- }
-
- /* Calculate the total number of user accessible physical heaps */
- if (psDeviceNode->apsPhysHeap[ePhysHeap] && PhysHeapUserModeAlloc(ePhysHeap))
+ if (BITMASK_HAS(psDevConfig->pasPhysHeaps[i].ui32UsageFlags, ui32Flags))
{
- psDeviceNode->ui32UserAllocHeapCount++;
+ return &psDevConfig->pasPhysHeaps[i];
}
}
- if (PhysHeapValidateDefaultHeapExists(psDeviceNode))
- {
- PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPhysHeapCheckUsageFlags", ErrorDeinit);
- }
-
- eError = PhysHeapMMUPxSetup(psDeviceNode);
- PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapMMUPxSetup", ErrorDeinit);
-
- return PVRSRV_OK;
-
-ErrorDeinit:
- PVR_ASSERT(IMG_FALSE);
- PVRSRVPhysMemHeapsDeinit(psDeviceNode);
-
- return eError;
+ return NULL;
}
-void PVRSRVPhysMemHeapsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode)
+/*************************************************************************/ /*!
+@Function PVRSRVAcquireInternalID
+@Description Returns the lowest free device ID.
+@Output pui32InternalID The device ID
+@Return PVRSRV_ERROR PVRSRV_OK or an error code
+*/ /**************************************************************************/
+static PVRSRV_ERROR PVRSRVAcquireInternalID(IMG_UINT32 *pui32InternalID)
{
- PVRSRV_PHYS_HEAP ePhysHeapIdx;
- IMG_UINT32 i;
-
-#if defined(SUPPORT_AUTOVZ)
- if (psDeviceNode->psFwMMUReservedPhysHeap)
- {
- PhysHeapDestroy(psDeviceNode->psFwMMUReservedPhysHeap);
- psDeviceNode->psFwMMUReservedPhysHeap = NULL;
- }
-#endif
+ IMG_UINT32 ui32InternalID = 0;
+ IMG_BOOL bFound = IMG_FALSE;
- PhysHeapMMUPxDeInit(psDeviceNode);
-
- /* Release heaps */
- for (ePhysHeapIdx = 0;
- ePhysHeapIdx < ARRAY_SIZE(psDeviceNode->apsPhysHeap);
- ePhysHeapIdx++)
+ for (ui32InternalID = 0;
+ ui32InternalID < PVRSRV_MAX_DEVICES;
+ ui32InternalID++)
{
- if (psDeviceNode->apsPhysHeap[ePhysHeapIdx])
+ if (PVRSRVGetDeviceInstance(ui32InternalID) == NULL)
{
- PhysHeapRelease(psDeviceNode->apsPhysHeap[ePhysHeapIdx]);
+ bFound = IMG_TRUE;
+ break;
}
}
- if (psDeviceNode->psFWMainPhysHeap)
+ if (bFound)
{
- PhysHeapDestroy(psDeviceNode->psFWMainPhysHeap);
- psDeviceNode->psFWMainPhysHeap = NULL;
- }
-
- if (psDeviceNode->psFWCfgPhysHeap)
- {
- PhysHeapDestroy(psDeviceNode->psFWCfgPhysHeap);
- psDeviceNode->psFWCfgPhysHeap = NULL;
- }
-
- for (i = 0; i < RGX_NUM_OS_SUPPORTED; i++)
- {
- if (psDeviceNode->apsFWPremapPhysHeap[i])
- {
- PhysHeapDestroy(psDeviceNode->apsFWPremapPhysHeap[i]);
- psDeviceNode->apsFWPremapPhysHeap[i] = NULL;
- }
+ *pui32InternalID = ui32InternalID;
+ return PVRSRV_OK;
}
-
- PhysHeapDestroyDeviceHeaps(psDeviceNode);
-}
-
-PHYS_HEAP_CONFIG* FindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
- PHYS_HEAP_USAGE_FLAGS ui32Flags)
-{
- IMG_UINT32 i;
-
- for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++)
+ else
{
- if (psDevConfig->pasPhysHeaps[i].ui32UsageFlags == ui32Flags)
- {
- return &psDevConfig->pasPhysHeaps[i];
- }
+ return PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE;
}
-
- return NULL;
}
PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice,
- IMG_INT32 i32OsDeviceID,
+ IMG_INT32 i32KernelDeviceID,
PVRSRV_DEVICE_NODE **ppsDeviceNode)
{
PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
PVRSRV_DEVICE_NODE *psDeviceNode;
IMG_UINT32 ui32AppHintDefault;
IMG_UINT32 ui32AppHintDriverMode;
+
#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__)
IMG_UINT32 ui32AppHintPhysMemTestPasses;
#endif
void *pvAppHintState = NULL;
-#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
IMG_HANDLE hProcessStats;
#endif
+ IMG_BOOL bAppHintDefault;
+ IMG_BOOL bEnablePageFaultDebug = IMG_FALSE;
- MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceCreate: DevId %d", i32OsDeviceID);
+ MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceCreate: DevId %d", i32KernelDeviceID);
/* Read driver mode (i.e. native, host or guest) AppHint early as it is
required by SysDevInit */
&ui32AppHintDefault, &ui32AppHintDriverMode);
psPVRSRVData->eDriverMode = PVRSRV_VZ_APPHINT_MODE(ui32AppHintDriverMode);
psPVRSRVData->bForceApphintDriverMode = PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(ui32AppHintDriverMode);
+
+ /* Determine if we've got EnablePageFaultDebug set or not */
+ bAppHintDefault = PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG;
+ OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnablePageFaultDebug,
+ &bAppHintDefault, &bEnablePageFaultDebug);
OSFreeKMAppHintState(pvAppHintState);
pvAppHintState = NULL;
psDeviceNode = OSAllocZMemNoStats(sizeof(*psDeviceNode));
PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "psDeviceNode");
-#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
/* Allocate process statistics */
eError = PVRSRVStatsRegisterProcess(&hProcessStats);
PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsRegisterProcess", ErrorFreeDeviceNode);
#endif
- psDeviceNode->sDevId.i32OsDeviceID = i32OsDeviceID;
- psDeviceNode->sDevId.ui32InternalID = psPVRSRVData->ui32RegisteredDevices;
+ /* Record setting of EnablePageFaultDebug in device-node */
+ psDeviceNode->bEnablePFDebug = bEnablePageFaultDebug;
+ psDeviceNode->sDevId.i32KernelDeviceID = i32KernelDeviceID;
+ eError = PVRSRVAcquireInternalID(&psDeviceNode->sDevId.ui32InternalID);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAcquireInternalID", ErrorDeregisterStats);
eError = SysDevInit(pvOSDevice, &psDevConfig);
PVR_LOG_GOTO_IF_ERROR(eError, "SysDevInit", ErrorDeregisterStats);
PVR_LOG_GOTO_IF_ERROR(eError, "SysDevInit", ErrorDeregisterStats);
}
- psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT;
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_CREATING;
if (psDevConfig->pfnGpuDomainPower)
{
/* Initialise the paravirtualised connection */
if (!PVRSRV_VZ_MODE_IS(NATIVE))
{
- /* If a device already exists */
- if (psPVRSRVData->psDeviceNodeList != NULL)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Virtualization is currently supported only on single device systems.",
- __func__));
- eError = PVRSRV_ERROR_NOT_SUPPORTED;
- goto ErrorSysDevDeInit;
- }
-
- PvzConnectionInit(psDevConfig);
+ PvzConnectionInit();
PVR_GOTO_IF_ERROR(eError, ErrorSysDevDeInit);
}
+ BIT_SET(psDevConfig->psDevNode->ui32VmState, RGXFW_HOST_DRIVER_ID);
+
eError = PVRSRVRegisterDeviceDbgTable(psDeviceNode);
PVR_GOTO_IF_ERROR(eError, ErrorPvzConnectionDeInit);
eError = PVRSRVPowerLockInit(psDeviceNode);
PVR_GOTO_IF_ERROR(eError, ErrorUnregisterDbgTable);
- eError = PVRSRVPhysMemHeapsInit(psDeviceNode, psDevConfig);
+ eError = PhysHeapInitDeviceHeaps(psDeviceNode, psDevConfig);
PVR_GOTO_IF_ERROR(eError, ErrorPowerLockDeInit);
#if defined(SUPPORT_RGX)
{
PVR_LOG_ERROR(eError, "RGXRegisterDevice");
eError = PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
- goto ErrorPhysMemHeapsDeinit;
+ goto ErrorPhysHeapDeInitDeviceHeaps;
}
#endif
+ /* Inform the device layer PhysHeaps are now initialised so that device
+ * specific heaps can be obtained along with carrying out any Vz setup. */
if (psDeviceNode->pfnPhysMemDeviceHeapsInit != NULL)
{
eError = psDeviceNode->pfnPhysMemDeviceHeapsInit(psDeviceNode);
- PVR_GOTO_IF_ERROR(eError, ErrorPhysMemHeapsDeinit);
+ PVR_GOTO_IF_ERROR(eError, ErrorPhysHeapDeInitDeviceHeaps);
}
+ /* Carry out initialisation of a dedicated FW MMU data, if the FW CPU has
+ * an MMU separate to the GPU MMU e.g. MIPS based FW. */
if (psDeviceNode->pfnFwMMUInit != NULL)
{
eError = psDeviceNode->pfnFwMMUInit(psDeviceNode);
}
#endif
+ eError = DebugCommonInitDevice(psDeviceNode);
+ PVR_LOG_GOTO_IF_ERROR(eError, "DebugCommonInitDevice",
+ ErrorDestroyMemoryContextPageFaultNotifyListLock);
+
+ /* Create the devicemem_history hook for the device. We need to
+ * have the debug-info instantiated before calling this.
+ */
+ if (psDeviceNode->bEnablePFDebug)
+ {
+ eError = DevicememHistoryDeviceCreate(psDeviceNode);
+ PVR_LOG_GOTO_IF_ERROR(eError, "DevicememHistoryDeviceCreate", ErrorDebugCommonDeInitDevice);
+ }
+
#if defined(__linux__)
- /* register the AppHint device control before device initialisation
- * so individual AppHints can be configured during the init phase
+ /* Register the device specific AppHints so individual AppHints can be
+ * configured before the FW is initialised. This must be called after
+ * DebugCommonInitDevice() above as it depends on the created gpuXX/apphint
+ * DI Group.
*/
{
int iError = pvr_apphint_device_register(psDeviceNode);
RGXHWPerfInitAppHintCallbacks(psDeviceNode);
#endif
- eError = DebugCommonInitDevice(psDeviceNode);
- PVR_LOG_GOTO_IF_ERROR(eError, "DebugCommonInitDevice",
- ErrorDestroyMemoryContextPageFaultNotifyListLock);
-
/* Finally insert the device into the dev-list and set it as active */
OSWRLockAcquireWrite(psPVRSRVData->hDeviceNodeListLock);
List_PVRSRV_DEVICE_NODE_InsertTail(&psPVRSRVData->psDeviceNodeList,
PVR_LOG_GOTO_IF_ERROR(eError, "RegisterDVFSDevice", ErrorRegisterDVFSDeviceFail);
#endif
-#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
/* Close the process statistics */
PVRSRVStatsDeregisterProcess(hProcessStats);
#endif
OSLockCreateNoStats(&psDeviceNode->hValidationLock);
#endif
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_CREATED;
+
return PVRSRV_OK;
#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE)
List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
psPVRSRVData->ui32RegisteredDevices--;
OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock);
+
+#if defined(__linux__)
+ pvr_apphint_device_unregister(psDeviceNode);
+#endif /* defined(__linux__) */
+
+ /* Remove the devicemem_history hook if we created it */
+ if (psDeviceNode->bEnablePFDebug)
+ {
+ DevicememHistoryDeviceDestroy(psDeviceNode);
+ }
#endif
+ErrorDebugCommonDeInitDevice:
+ DebugCommonDeInitDevice(psDeviceNode);
+
ErrorDestroyMemoryContextPageFaultNotifyListLock:
OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock);
psDeviceNode->hMemoryContextPageFaultNotifyListLock = NULL;
DevDeInitRGX(psDeviceNode);
#endif
ErrorFwMMUDeinit:
-ErrorPhysMemHeapsDeinit:
- PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+ErrorPhysHeapDeInitDeviceHeaps:
+ PhysHeapDeInitDeviceHeaps(psDeviceNode);
ErrorPowerLockDeInit:
PVRSRVPowerLockDeInit(psDeviceNode);
ErrorUnregisterDbgTable:
ErrorSysDevDeInit:
SysDevDeInit(psDevConfig);
ErrorDeregisterStats:
-#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
/* Close the process statistics */
PVRSRVStatsDeregisterProcess(hProcessStats);
ErrorFreeDeviceNode:
PVRSRV_ERROR PVRSRVCommonDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode)
{
IMG_BOOL bInitSuccesful = IMG_FALSE;
-#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
IMG_HANDLE hProcessStats;
#endif
PVRSRV_ERROR eError;
- MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceInitialise: DevId %d", psDeviceNode->sDevId.i32OsDeviceID);
+ PDUMPCOMMENT(psDeviceNode, "Common Device Initialisation");
- if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_INIT)
+ MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceInitialise: DevId %d", psDeviceNode->sDevId.i32KernelDeviceID);
+
+ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_CREATED)
{
PVR_DPF((PVR_DBG_ERROR, "%s: Device already initialised", __func__));
return PVRSRV_ERROR_INIT_FAILURE;
}
+ /* Allocate devmem_history backing store for the device if we have
+ * EnablePageFaultDebug set
+ */
+ if (psDeviceNode->bEnablePFDebug)
+ {
+ eError = DevicememHistoryDeviceInit(psDeviceNode);
+ PVR_LOG_RETURN_IF_ERROR(eError, "DevicememHistoryDeviceInit");
+ }
+
#if defined(PDUMP)
#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT)
{
eError = OSLockCreate(&psDeviceNode->hConnectionsLock);
PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate");
- PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON);
/* Allocate process statistics */
-#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
eError = PVRSRVStatsRegisterProcess(&hProcessStats);
PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVStatsRegisterProcess");
#endif
+ eError = MMU_InitDevice(psDeviceNode);
+ PVR_LOG_RETURN_IF_ERROR(eError, "MMU_InitDevice");
+
#if defined(SUPPORT_RGX)
eError = RGXInit(psDeviceNode);
PVR_LOG_GOTO_IF_ERROR(eError, "RGXInit", Exit);
NULL);
#endif
-#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
/* Close the process statistics */
PVRSRVStatsDeregisterProcess(hProcessStats);
#endif
}
#endif
- MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceDestroy: DevId %d", psDeviceNode->sDevId.i32OsDeviceID);
+ /* Remove DI hook for the devicemem_history for this device (if any).
+ * The associated devicemem_history buffers are freed by the final
+ * call to DevicememHistoryDeInitKM() as they are used asynchronously
+ * by other parts of the DDK.
+ */
+ if (psDeviceNode->bEnablePFDebug)
+ {
+ DevicememHistoryDeviceDestroy(psDeviceNode);
+ }
+
+ MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceDestroy: DevId %d", psDeviceNode->sDevId.i32KernelDeviceID);
psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_DEINIT;
SyncServerDeinit(psDeviceNode);
+ MMU_DeInitDevice(psDeviceNode);
+
#if defined(SUPPORT_RGX)
DevDeInitRGX(psDeviceNode);
#endif
- PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+ PhysHeapDeInitDeviceHeaps(psDeviceNode);
PVRSRVPowerLockDeInit(psDeviceNode);
PVRSRVUnregisterDeviceDbgTable(psDeviceNode);
#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
/* functions only used on rogue, but header defining them is common */
-void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState)
+void SetAxiProtOSid(IMG_HANDLE hSysData, IMG_UINT32 ui32OSid, IMG_BOOL bState)
{
- SysSetAxiProtOSid(ui32OSid, bState);
+ SysSetAxiProtOSid(hSysData, ui32OSid, bState);
}
-void SetTrustedDeviceAceEnabled(void)
+void SetTrustedDeviceAceEnabled(IMG_HANDLE hSysData)
{
- SysSetTrustedDeviceAceEnabled();
+ SysSetTrustedDeviceAceEnabled(hSysData);
}
#endif
return psDevNode;
}
-PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByOSId(IMG_INT32 i32OSInstance)
+PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByKernelDevID(IMG_INT32 i32OSInstance)
{
PVRSRV_DEVICE_NODE *psDevNode;
for (psDevNode = gpsPVRSRVData->psDeviceNodeList;
psDevNode != NULL; psDevNode = psDevNode->psNext)
{
- if (i32OSInstance == psDevNode->sDevId.i32OsDeviceID)
+ if (i32OSInstance == psDevNode->sDevId.i32KernelDeviceID)
{
+ MULTI_DEVICE_BRINGUP_DPF("%s: Found DevId %d. Retrieving node.", __func__, i32OSInstance);
break;
}
+ else
+ {
+ MULTI_DEVICE_BRINGUP_DPF("%s: Searching for DevId %d: Id %d not matching", __func__, i32OSInstance, psDevNode->sDevId.i32KernelDeviceID);
+ }
}
OSWRLockReleaseRead(gpsPVRSRVData->hDeviceNodeListLock);
+ if (psDevNode == NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: DevId %d not found.", __func__, i32OSInstance));
+ }
return psDevNode;
}
PVRSRV_ERROR InitRGXREGCONFIGBridge(void);
void DeinitRGXREGCONFIGBridge(void);
#endif
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
void DeinitRGXKICKSYNCBridge(void);
+#endif
#endif /* SUPPORT_RGX */
PVRSRV_ERROR InitCACHEBridge(void);
void DeinitCACHEBridge(void);
PVR_LOG_IF_ERROR(eError, "InitRGXREGCONFIGBridge");
#endif
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
eError = InitRGXKICKSYNCBridge();
PVR_LOG_IF_ERROR(eError, "InitRGXKICKSYNCBridge");
+#endif
eError = InitRGXTIMERQUERYBridge();
PVR_LOG_IF_ERROR(eError, "InitRGXTIMERQUERYBridge");
DeinitRGXREGCONFIGBridge();
#endif
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
DeinitRGXKICKSYNCBridge();
+#endif
DeinitRGXTIMERQUERYBridge();
-
#endif /* SUPPORT_RGX */
}
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ /**************************************************************************/
-#if defined(__linux__)
+#if defined(__linux__) && defined(__KERNEL__)
#include <linux/version.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
#include <linux/stdarg.h>
#define RI_PROC_BUF_SIZE 16
+#define RI_DEV_ID_BUF_SIZE 4
+
#define RI_MEMDESC_SUM_FRMT "PID %d %s MEMDESCs Alloc'd:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) + "\
"Imported:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) = "\
"Total:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K)\n"
"[Physical: 0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K]\n"
#define RI_PMR_SUM_BUF_SIZE (sizeof(RI_PMR_SUM_FRMT)+(20+40))
-#define RI_PMR_ENTRY_FRMT "%%sPID:%%-5d <%%p>\t%%-%ds\t0x%%010" IMG_UINT64_FMTSPECx "\t[0x%%010" IMG_UINT64_FMTSPECx "]\t%%c"
-#define RI_PMR_ENTRY_BUF_SIZE (sizeof(RI_PMR_ENTRY_FRMT)+(3+5+16+PVR_ANNOTATION_MAX_LEN+10+10))
+#define RI_PMR_ENTRY_FRMT "%%sPID:%%-5d DEV:%%s <%%p>\t%%-%ds\t0x%%010" IMG_UINT64_FMTSPECx "\t[0x%%010" IMG_UINT64_FMTSPECx "]\t%%c"
+#define RI_PMR_ENTRY_BUF_SIZE (sizeof(RI_PMR_ENTRY_FRMT)+(3+5+RI_DEV_ID_BUF_SIZE+16+PVR_ANNOTATION_MAX_LEN+10+10))
#define RI_PMR_ENTRY_FRMT_SIZE (sizeof(RI_PMR_ENTRY_FRMT))
/* Use %5d rather than %d so the output aligns in server/kernel.log, debugFS sees extra spaces */
#define RI_MEMDESC_ENTRY_IMPORT_FRMT "{Import from PID %d}"
#define RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_IMPORT_FRMT)+5)
-#define RI_MEMDESC_ENTRY_UNPINNED_FRMT "{Unpinned}"
-#define RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_UNPINNED_FRMT))
-
-#define RI_MEMDESC_ENTRY_FRMT "%%sPID:%%-5d 0x%%010" IMG_UINT64_FMTSPECx "\t%%-%ds %%s\t0x%%010" IMG_UINT64_FMTSPECx "\t<%%p> %%s%%s%%s%%c"
-#define RI_MEMDESC_ENTRY_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)+(3+5+10+PVR_ANNOTATION_MAX_LEN+RI_MEMDESC_ENTRY_PROC_BUF_SIZE+16+\
- RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE+RI_SYS_ALLOC_IMPORT_FRMT_SIZE+RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE))
+#define RI_MEMDESC_ENTRY_FRMT "%%sPID:%%-5d DEV:%%s 0x%%010" IMG_UINT64_FMTSPECx "\t%%-%ds %%s\t0x%%010" IMG_UINT64_FMTSPECx "\t<%%p> %%s%%s%%c"
+#define RI_MEMDESC_ENTRY_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)+(3+5+RI_DEV_ID_BUF_SIZE+10+PVR_ANNOTATION_MAX_LEN+RI_MEMDESC_ENTRY_PROC_BUF_SIZE+16+\
+ RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE+RI_SYS_ALLOC_IMPORT_FRMT_SIZE))
#define RI_MEMDESC_ENTRY_FRMT_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT))
IMG_BOOL bIsImport;
IMG_BOOL bIsSuballoc;
IMG_PID pid;
+ IMG_UINT32 ui32DevID;
IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE];
IMG_DEV_VIRTADDR sVAddr;
IMG_UINT64 ui64Offset;
IMG_UINT32 valid;
PMR *psPMR;
IMG_PID pid;
+ IMG_UINT32 ui32DevID;
IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE];
IMG_UINT16 ui16SubListCount;
IMG_UINT16 ui16MaxSubListCount;
psRIEntry = (RI_LIST_ENTRY *)hashData;
if (!psRIEntry)
{
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psPMR);
+
/*
* If failed to find a matching existing entry, create a new one
*/
else
{
PMR_FLAGS_T uiPMRFlags = PMR_Flags(psPMR);
- PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psPMR);
/*
* Add new RI Entry
* or the owner PID is PVR_SYS_ALLOC_PID.
* Also record host dev node allocs on the system PID.
*/
- if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT ||
+ if (psDeviceNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE ||
PVRSRV_CHECK_FW_MAIN(uiPMRFlags) ||
ui32Owner == PVR_SYS_ALLOC_PID ||
psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode)
psRIEntry->psPMR = psPMR;
psRIEntry->ui32Flags = 0;
+ psRIEntry->ui32DevID = psDeviceNode->sDevId.ui32InternalID;
/* Create index entry in Hash Table */
HASH_Insert_Extended (g_pRIHashTable, (void *)&pPMRHashKey, (uintptr_t)psRIEntry);
{
PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psRISubEntry->psRI->psPMR);
- if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT ||
+ if (psDeviceNode->eDevState < PVRSRV_DEVICE_STATE_ACTIVE ||
psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode)
{
psRISubEntry->pid = psRISubEntry->psRI->pid;
{
psRISubEntry->pid = OSGetCurrentClientProcessIDKM();
}
- }
- if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1)
- {
- PVR_DPF((PVR_DBG_WARNING,
- "%s: TextBSize too long (%u). Text will be truncated "
- "to %zu characters", __func__,
- ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1));
- }
+ if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: TextBSize too long (%u). Text will be truncated "
+ "to %zu characters", __func__,
+ ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1));
+ }
- /* copy ai8TextB field data */
- OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB);
+ /* copy ai8TextB field data */
+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB);
- psRISubEntry->ui64Offset = ui64Offset;
- psRISubEntry->ui64Size = ui64Size;
- psRISubEntry->bIsImport = bIsImport;
- psRISubEntry->bIsSuballoc = bIsSuballoc;
- OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM());
- dllist_init (&(psRISubEntry->sProcListNode));
+ psRISubEntry->ui32DevID = psDeviceNode->sDevId.ui32InternalID;
+ psRISubEntry->ui64Offset = ui64Offset;
+ psRISubEntry->ui64Size = ui64Size;
+ psRISubEntry->bIsImport = bIsImport;
+ psRISubEntry->bIsSuballoc = bIsSuballoc;
+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM());
+ dllist_init (&(psRISubEntry->sProcListNode));
+ }
/*
* Now insert this MEMDESC into the proc list
@Return PVRSRV_ERROR
******************************************************************************/
-PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize,
+PVRSRV_ERROR RIWriteProcListEntryKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32TextBSize,
const IMG_CHAR *psz8TextB,
IMG_UINT64 ui64Size,
IMG_UINT64 ui64DevVAddr,
IMG_PID pid;
RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
if (!g_pRIHashTable)
{
g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default);
psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY;
psRISubEntry->pid = OSGetCurrentClientProcessIDKM();
+ psRISubEntry->ui32DevID = psDeviceNode->sDevId.ui32InternalID;
if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1)
{
IMG_PID uiRIPid = 0;
PMR* psRIPMR = NULL;
IMG_UINT32 ui32RIPMRFlags = 0;
+ IMG_BOOL bHostDevice = psRISubEntry->ui32DevID == PVRSRV_HOST_DEVICE_ID;
+ IMG_CHAR szDeviceID[RI_DEV_ID_BUF_SIZE];
if (psRISubEntry->psRI != NULL)
{
psRISubEntry->ai8ProcName);
}
+ if (!bHostDevice)
+ {
+ OSSNPrintf(szDeviceID,
+ sizeof(szDeviceID),
+ "%-3d",
+ psRISubEntry->ui32DevID);
+ }
+
if (psRISubEntry->bIsImport && psRIPMR)
{
OSSNPrintf((IMG_CHAR *)&szImport,
szEntryFormat,
(bDebugFs ? "" : " "),
psRISubEntry->pid,
+ (bHostDevice ? "- " : szDeviceID),
(psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset),
pszAnnotationText,
(bDebugFs ? "" : (char *)szProc),
psRIPMR,
(psRISubEntry->bIsImport ? (char *)&szImport : ""),
(!psRISubEntry->bIsImport && (ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) && (psRISubEntry->pid != PVR_SYS_ALLOC_PID)) ? g_szSysAllocImport : "",
- (psRIPMR && PMR_IsUnpinned(psRIPMR)) ? RI_MEMDESC_ENTRY_UNPINNED_FRMT : "",
(bDebugFs ? '\n' : ' '));
}
}
IMG_DEVMEM_SIZE_T uiLogicalSize = 0;
IMG_DEVMEM_SIZE_T uiPhysicalSize = 0;
IMG_CHAR szEntryFormat[RI_PMR_ENTRY_FRMT_SIZE];
+ IMG_BOOL bHostDevice = psRIEntry->ui32DevID == PVRSRV_HOST_DEVICE_ID;
+ IMG_CHAR szDeviceID[RI_DEV_ID_BUF_SIZE];
PMR_LogicalSize(psRIEntry->psPMR, &uiLogicalSize);
/* Set pszAnnotationText to that PMR RI entry */
pszAnnotationText = (IMG_PCHAR) PMR_GetAnnotation(psRIEntry->psPMR);
+ if (!bHostDevice)
+ {
+ OSSNPrintf(szDeviceID,
+ sizeof(szDeviceID),
+ "%-3d",
+ psRIEntry->ui32DevID);
+ }
+
OSSNPrintf(pszEntryString,
ui16MaxStrLen,
szEntryFormat,
(bDebugFs ? "" : " "),
psRIEntry->pid,
+ (bHostDevice ? "- " : szDeviceID),
(void*)psRIEntry->psPMR,
pszAnnotationText,
uiLogicalSize,
#include "pvr_bridge.h"
#include "connection_server.h"
#include "device.h"
-#include "htbuffer.h"
+#include "htbserver.h"
#include "pdump_km.h"
#include "pvrsrv.h"
#include "power.h"
+#include "oskm_apphint.h"
+
#if defined(SUPPORT_RGX)
#include "rgxdevice.h"
#include "rgxinit.h"
IMG_UINT32 ui32Index;
IMG_UINT32 ui32Remainder;
+ BridgeGlobalStatsLock();
+
printf("Total Bridge call count = %u\n"
"Total number of bytes copied via copy_from_user = %u\n"
"Total number of bytes copied via copy_to_user = %u\n"
}
+
+ BridgeGlobalStatsUnlock();
}
#endif
void __user *pvSrc,
IMG_UINT32 ui32Size)
{
+ BridgeGlobalStatsLock();
g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyFromUserTotalBytes+=ui32Size;
g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
+ BridgeGlobalStatsUnlock();
+
return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
}
PVRSRV_ERROR
void *pvSrc,
IMG_UINT32 ui32Size)
{
+ BridgeGlobalStatsLock();
g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyToUserTotalBytes+=ui32Size;
g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
+ BridgeGlobalStatsUnlock();
+
return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
}
#else
}
#endif
+/**************************************************************************/ /*!
+@Function DeviceDefaultPhysHeapFreeMemCheck
+
+@Description Check if the required amount of free space is available in the
+ Default PhysHeap for a connection to be made.
+
+@Input psDeviceNode The device the connection is being
+ made on.
+@Input ui32MinMemInMBs The minimum memory required to be
+ available in the Default PhysHeap.
+
+@Return PVRSRV_OK if successful else a PVRSRV_ERROR.
+*/ /***************************************************************************/
+static PVRSRV_ERROR DeviceDefaultPhysHeapFreeMemCheck(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32MinMemInMBs)
+{
+ PHYS_HEAP *psDefaultHeap = NULL;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL, "psDeviceNode");
+
+ psDefaultHeap = psDeviceNode->apsPhysHeap[psDeviceNode->psDevConfig->eDefaultHeap];
+ if (psDefaultHeap == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to get device's default PhysHeap"));
+ return PVRSRV_ERROR_INVALID_HEAP;
+ }
+
+ if (PhysHeapGetType(psDefaultHeap) == PHYS_HEAP_TYPE_LMA)
+ {
+ IMG_UINT64 ui64FreePhysHeapMem;
+
+ eError = PhysHeapFreeMemCheck(psDefaultHeap,
+ MB2B(ui32MinMemInMBs),
+ &ui64FreePhysHeapMem);
+ if (eError == PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Default PhysHeap contains less than the "
+ "minimum free space required to acquire a connection. "
+ "Free space: %"IMG_UINT64_FMTSPEC"MB "
+ "Minimum required: %uMB",
+ B2MB(ui64FreePhysHeapMem),
+ ui32MinMemInMBs));
+ }
+ }
+
+ return eError;
+}
+
+/**************************************************************************/ /*!
+@Function CheckConnectionPhysHeapMem
+
+@Description Check if there is enough memory in the PhysHeaps to allow a
+ connection to be made.
+
+@Input psConnection The connection being made.
+
+@Return PVRSRV_OK if successful else a PVRSRV_ERROR.
+*/ /***************************************************************************/
+static PVRSRV_ERROR CheckConnectionPhysHeapMem(CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_PHYSHEAPMINMEMONCONNECTION;
+ IMG_UINT32 ui32AppHintPhysHeapMinMemOnConnection = 0;
+ void *pvAppHintState = NULL;
+ PVRSRV_DEVICE_NODE *psDeviceNode = NULL;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_RETURN_IF_INVALID_PARAM(psConnection);
+
+ OSCreateKMAppHintState(&pvAppHintState);
+ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, PhysHeapMinMemOnConnection,
+ &ui32AppHintDefault, &ui32AppHintPhysHeapMinMemOnConnection);
+ OSFreeKMAppHintState(pvAppHintState);
+
+ psDeviceNode = OSGetDevNode(psConnection);
+
+ if (ui32AppHintPhysHeapMinMemOnConnection != 0)
+ {
+ eError = DeviceDefaultPhysHeapFreeMemCheck(psDeviceNode,
+ ui32AppHintPhysHeapMinMemOnConnection);
+ PVR_LOG_RETURN_IF_ERROR(eError, "DeviceDefaultPhysHeapFreeMemCheck");
+
+ if (psDeviceNode->pfnCheckForSufficientFWPhysMem != NULL
+ && RGX_FW_PHYSHEAP_MINMEM_ON_CONNECTION > 0)
+ {
+ eError = psDeviceNode->pfnCheckForSufficientFWPhysMem(psDeviceNode);
+ PVR_LOG_RETURN_IF_ERROR(eError, "pfnCheckForSufficientFWPhysMem");
+ }
+ }
+
+ return eError;
+}
+
PVRSRV_ERROR
PVRSRVConnectKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE * psDeviceNode,
IMG_UINT64 *ui64PackedBvnc)
{
PVRSRV_ERROR eError = PVRSRV_OK;
- IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch;
+ IMG_UINT32 ui32ServerBuildOptions;
IMG_UINT32 ui32DDKVersion, ui32DDKBuild;
PVRSRV_DATA *psSRVData = NULL;
IMG_UINT64 ui64ProcessVASpaceSize = OSGetCurrentProcessVASpaceSize();
static IMG_BOOL bIsFirstConnection=IMG_FALSE;
-
#if defined(SUPPORT_RGX)
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#endif
+ /* Check the minimum free PhysHeap memory is available before allowing
+ * the connection to succeed */
+ eError = CheckConnectionPhysHeapMem(psConnection);
+ PVR_RETURN_IF_ERROR(eError);
+
+#if defined(SUPPORT_RGX)
/* Gather BVNC information to output to UM */
*ui64PackedBvnc = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B,
*pui32CapabilityFlags |= PVRSRV_SYSTEM_DMA_USED;
}
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(SUPPORT_RGX) && defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK)
+ /* For GPUs with lossy TFBC support, is system using lossy control group 1? */
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT))
+ {
+ if (psDeviceNode->pfnGetTFBCLossyGroup(psDeviceNode) == 1)
+ {
+ *pui32CapabilityFlags |= PVRSRV_TFBC_LOSSY_GROUP_1;
+ }
+ }
+#endif
+
+#if defined(SUPPORT_CUSTOM_OSID_EMISSION)
{
IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0;
IMG_BOOL bOSidAxiProtReg = IMG_FALSE;
ui32OSidReg,
bOSidAxiProtReg?"TRUE":"FALSE"));
- SetAxiProtOSid(ui32OSidReg, ui32OSidAxiProtTD);
+ SetAxiProtOSid(psDeviceNode->psDevConfig->hSysData, ui32OSidReg, ui32OSidAxiProtTD);
}
}
#endif /* defined(EMULATOR) */
ui32OSid,
ui32OSidReg));
}
-#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+#endif /* defined(SUPPORT_CUSTOM_OSID_EMISSION) */
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
/* Only enabled if enabled in the UM */
- if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_WORKLOAD_ESTIMATION_MASK))
+ if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_WORKLOAD_ESTIMATION_EN))
{
PVR_DPF((PVR_DBG_ERROR,
"%s: Workload Estimation disabled. Not enabled in UM",
#if defined(SUPPORT_PDVFS)
/* Only enabled if enabled in the UM */
- if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_PDVFS_MASK))
+ if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_PDVFS_EN))
{
PVR_DPF((PVR_DBG_ERROR,
"%s: Proactive DVFS disabled. Not enabled in UM",
psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision = ui32ClientDDKBuild;
psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType =
- ((RGX_BUILD_OPTIONS_KM) & OPTIONS_DEBUG_MASK) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE;
+ ((RGX_BUILD_OPTIONS_KM) & OPTIONS_DEBUG_EN) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE;
psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType =
- (ui32ClientBuildOptions & OPTIONS_DEBUG_MASK) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE;
+ (ui32ClientBuildOptions & OPTIONS_DEBUG_EN) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE;
if (sizeof(void *) == POINTER_SIZE_64BIT)
{
/*
* Validate the build options
*/
- ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
- if (ui32BuildOptions != ui32ClientBuildOptions)
+ ui32ServerBuildOptions = (RGX_BUILD_OPTIONS_KM);
+ if (ui32ServerBuildOptions != ui32ClientBuildOptions)
{
- ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32ClientBuildOptions;
+ IMG_UINT32 ui32ServerBuildOptionsMismatch = ui32ServerBuildOptions ^ ui32ClientBuildOptions;
+ IMG_UINT32 ui32ClientBuildOptionsMismatch = ui32ServerBuildOptionsMismatch;
+
#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
/*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
- ui32BuildOptionsMismatch &= OPTIONS_STRICT;
+ ui32ServerBuildOptionsMismatch &= KM_OPTIONS_STRICT;
+ ui32ClientBuildOptionsMismatch &= UM_OPTIONS_STRICT;
#endif
- if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+ if ( (ui32ClientBuildOptions & ui32ClientBuildOptionsMismatch) != 0)
{
PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
"extra options present in client-side driver: (0x%x). Please check rgx_options.h",
__func__,
- ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+ ui32ClientBuildOptions & ui32ClientBuildOptionsMismatch));
PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH, chk_exit);
}
- if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+ if ( (ui32ServerBuildOptions & ui32ServerBuildOptionsMismatch) != 0)
{
PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
"extra options present in KM driver: (0x%x). Please check rgx_options.h",
__func__,
- ui32BuildOptions & ui32BuildOptionsMismatch ));
+ ui32ServerBuildOptions & ui32ServerBuildOptionsMismatch ));
PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH, chk_exit);
}
if (IMG_FALSE == bIsFirstConnection)
__func__,
ui32ClientBuildOptions,
(psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug",
- ui32BuildOptions,
+ ui32ServerBuildOptions,
(psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug"));
}else{
PVR_DPF((PVR_DBG_WARNING, "%s: COMPAT_TEST: Client-side (0x%04x) and KM driver (0x%04x) build options differ.",
__func__,
ui32ClientBuildOptions,
- ui32BuildOptions));
+ ui32ServerBuildOptions));
}
if (!psSRVData->sDriverInfo.bIsNoMatch)
IMG_CHAR acStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE];
OSSNPrintf(acStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE,
PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC,
- psDeviceNode->sDevId.i32OsDeviceID,
+ psDeviceNode->sDevId.i32KernelDeviceID,
psConnection->pid);
eError = TLStreamCreate(&psConnection->hClientTLStream,
#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
PVR_DPF((PVR_DBG_ERROR,
"%s: Adding dispatch table entry for %s clobbers an existing entry for %s (current pfn=<%p>, new pfn=<%p>)",
- __func__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName),
+ __func__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName,
(void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction));
#else
PVR_DPF((PVR_DBG_ERROR,
psBridgePackageKM->ui32FunctionID));
PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error);
}
+
#if defined(DEBUG_BRIDGE_KM)
+ BridgeGlobalStatsLock();
+
PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch table entry index=%d, (bridge module %d, function %d)",
__func__,
ui32DispatchTableEntryIndex, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
PVR_DPF((PVR_DBG_MESSAGE, "%s: %s",
__func__,
g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pszIOCName));
+
g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32CallCount++;
g_BridgeGlobalStats.ui32IOCTLCount++;
+ BridgeGlobalStatsUnlock();
#endif
if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL)
ui64TimeDiff = ui64TimeEnd - ui64TimeStart;
- /* if there is no lock held then acquire the stats lock to
- * ensure the calculations are done safely
- */
- if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL)
- {
- BridgeGlobalStatsLock();
- }
+ BridgeGlobalStatsLock();
g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64TotalTimeNS += ui64TimeDiff;
g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS = ui64TimeDiff;
}
- if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL)
- {
- BridgeGlobalStatsUnlock();
- }
+ BridgeGlobalStatsUnlock();
#endif
unlock_and_return_error:
return err;
}
-PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemStatArray)
+PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT64 *pui64MemStatArray)
{
#if !defined(__QNXNTO__)
return PVRSRVFindProcessMemStats(pid,
ui32ArrSize,
bAllProcessStats,
- pui32MemStatArray);
+ pui64MemStatArray);
#else
PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform"));
#include "pvrsrv.h"
#include "pdump_km.h"
#include "info_page.h"
+#include "oskm_apphint.h"
#include "pvrsrv_sync_km.h"
#include "rgxhwperf.h"
/* Set the size of the sync checkpoint pool (not used if 0).
* A pool will be maintained for each sync checkpoint context.
+ * SYNC_CHECKPOINT_POOL_LIMIT must be a power of 2 (POT),
+ * as the pool wrap mask is calculated using it.
*/
-#if defined(PDUMP)
-#define SYNC_CHECKPOINT_POOL_SIZE 0
-#else
-#define SYNC_CHECKPOINT_POOL_SIZE 128
-#define SYNC_CHECKPOINT_POOL_MASK (SYNC_CHECKPOINT_POOL_SIZE - 1)
-#endif
+#define SYNC_CHECKPOINT_POOL_LIMIT 1024
/* The 'sediment' value represents the minimum number of
* sync checkpoints which must be in the pool before one
* just after it has been returned to the pool, making
* debugging somewhat easier to understand.
*/
+#if defined(PDUMP)
#define SYNC_CHECKPOINT_POOL_SEDIMENT 20
-
-#if (SYNC_CHECKPOINT_POOL_SIZE & (SYNC_CHECKPOINT_POOL_SIZE - 1)) != 0
-#error "SYNC_CHECKPOINT_POOL_SIZE must be power of 2."
+#else
+#define SYNC_CHECKPOINT_POOL_SEDIMENT 0
#endif
-#define SYNC_CHECKPOINT_BLOCK_LIST_CHUNK_SIZE 10
+#if (SYNC_CHECKPOINT_POOL_LIMIT & (SYNC_CHECKPOINT_POOL_LIMIT - 1)) != 0
+#error "SYNC_CHECKPOINT_POOL_LIMIT must be power of 2."
+#endif
/*
This defines the maximum amount of synchronisation memory
*/
#define MAX_SYNC_CHECKPOINT_MEM (4 * 1024 * 1024)
-
-typedef struct _SYNC_CHECKPOINT_BLOCK_LIST_
-{
- IMG_UINT32 ui32BlockCount; /*!< Number of contexts in the list */
- IMG_UINT32 ui32BlockListSize; /*!< Size of the array contexts */
- SYNC_CHECKPOINT_BLOCK **papsSyncCheckpointBlock; /*!< Array of sync checkpoint blocks */
-} SYNC_CHECKPOINT_BLOCK_LIST;
-
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
+/* Flags used to indicate state of pool */
+#define SYNC_CHECKPOINT_POOL_FULL (1)
+#define SYNC_CHECKPOINT_POOL_VALID (1 << 7)
+#define CHECKPOINT_POOL_FULL(ctxctl) \
+ ctxctl->ui8PoolStateFlags & SYNC_CHECKPOINT_POOL_FULL
+#define CHECKPOINT_POOL_VALID(ctxctl) \
+ ctxctl->ui8PoolStateFlags & SYNC_CHECKPOINT_POOL_VALID
+#define SET_CHECKPOINT_POOL_FULL(ctxctl) \
+ ctxctl->ui8PoolStateFlags |= SYNC_CHECKPOINT_POOL_FULL
+#define SET_CHECKPOINT_POOL_VALID(ctxctl) \
+ ctxctl->ui8PoolStateFlags |= SYNC_CHECKPOINT_POOL_VALID
+#define CLEAR_CHECKPOINT_POOL_FULL(ctxctl) \
+ ctxctl->ui8PoolStateFlags &= ~SYNC_CHECKPOINT_POOL_FULL
+#define CLEAR_CHECKPOINT_POOL_VALID(ctxctl) \
+ ctxctl->ui8PoolStateFlags &= ~SYNC_CHECKPOINT_POOL_VALID
+#endif
struct _SYNC_CHECKPOINT_CONTEXT_CTL_
{
SHARED_DEV_CONNECTION psDeviceNode;
- PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve;
- PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate;
/*
* Used as head of linked-list of sync checkpoints for which
* SyncCheckpointFree() has been called, but have outstanding
/* Lock to protect the deferred cleanup list */
POS_SPINLOCK hDeferredCleanupListLock;
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
- SYNC_CHECKPOINT *psSyncCheckpointPool[SYNC_CHECKPOINT_POOL_SIZE];
- IMG_BOOL bSyncCheckpointPoolFull;
- IMG_BOOL bSyncCheckpointPoolValid;
- IMG_UINT32 ui32SyncCheckpointPoolCount;
- IMG_UINT32 ui32SyncCheckpointPoolWp;
- IMG_UINT32 ui32SyncCheckpointPoolRp;
- POS_SPINLOCK hSyncCheckpointPoolLock; /*! Protects access to the checkpoint pool control data. */
+ /* Counters to provide stats for number of checkpoints used at any one time */
+ IMG_UINT32 ui32CurrentInUseSyncCheckpoints;
+ IMG_UINT32 ui32MaxInUseSyncCheckpoints;
+ /* Lock to protect the checkpoint stats */
+ POS_SPINLOCK hSyncCheckpointStatsLock;
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
+ IMG_UINT32 ui32SyncCheckpointPoolSize; /*! Allocated size of the pool */
+ IMG_UINT32 ui32SyncCheckpointPoolCount; /*! Number of checkpoints currently in pool */
+ IMG_UINT32 ui32SyncCheckpointPoolWp; /*! Pool write pointer */
+ IMG_UINT32 ui32SyncCheckpointPoolRp; /*! Pool read pointer */
+ POS_SPINLOCK hSyncCheckpointPoolLock; /*! Lock to protect access to pool control data */
+ IMG_UINT8 ui8PoolStateFlags; /*! Flags to indicate state of pool */
+ /*! Array of SYNC_CHECKPOINTs. Must be last member in structure */
+ SYNC_CHECKPOINT *apsSyncCheckpointPool[1]; /*! The allocated checkpoint pool */
#endif
}; /*_SYNC_CHECKPOINT_CONTEXT_CTL is already typedef-ed in sync_checkpoint_internal.h */
+struct SYNC_CHECKPOINT_CONTEXT_TAG
+{
+#if defined(PDUMP)
+ DLLIST_NODE sSyncCheckpointBlockListHead; /*!< List head for the sync chkpt blocks in this context*/
+ DLLIST_NODE sListNode; /*!< List node for the sync chkpt context list*/
+ POS_LOCK hSyncCheckpointBlockListLock; /*!< sync chkpt blocks list lock*/
+#endif
+ RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */
+ RA_ARENA *psSubAllocRA; /*!< RA context */
+ _PSYNC_CHECKPOINT_CONTEXT_CTL psContextCtl;
+ ATOMIC_T hCheckpointCount; /*!< Checkpoint count for this context */
+ ATOMIC_T hRefCount; /*!< Ref count for this context */
+}; /*_SYNC_CHECKPOINT_CONTEXT is already typedef-ed in sync_checkpoint_internal.h */
+
/* this is the max number of sync checkpoint records we will search or dump
* at any time.
*/
struct SYNC_CHECKPOINT_RECORD
{
- PVRSRV_DEVICE_NODE *psDevNode;
SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< handle to SYNC_CHECKPOINT_BLOCK */
IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */
IMG_UINT32 ui32FwBlockAddr;
static PFN_SYNC_CHECKPOINT_STRUCT *g_psSyncCheckpointPfnStruct = NULL;
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
static SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext);
static IMG_BOOL _PutCheckpointInPool(SYNC_CHECKPOINT *psSyncCheckpoint);
static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext);
/* SyncCheckpointContextDestroy only when no longer referenced */
OSSpinLockDestroy(psCtxCtl->hDeferredCleanupListLock);
psCtxCtl->hDeferredCleanupListLock = NULL;
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+
+ OSSpinLockDestroy(psCtxCtl->hSyncCheckpointStatsLock);
+ psCtxCtl->hSyncCheckpointStatsLock = NULL;
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
if (psCtxCtl->ui32SyncCheckpointPoolCount)
{
PVR_DPF((PVR_DBG_WARNING,
(void *) psContext,
psCtxCtl->ui32SyncCheckpointPoolCount));
}
- psCtxCtl->bSyncCheckpointPoolValid = IMG_FALSE;
+ CLEAR_CHECKPOINT_POOL_FULL(psCtxCtl);
OSSpinLockDestroy(psCtxCtl->hSyncCheckpointPoolLock);
psCtxCtl->hSyncCheckpointPoolLock = NULL;
#endif
OSFreeMem(psContextInt->psContextCtl);
RA_Delete(psContextInt->psSpanRA);
RA_Delete(psContextInt->psSubAllocRA);
- OSLockDestroy(psContextInt->hLock);
- psContextInt->hLock = NULL;
+
+#if defined(PDUMP)
+ if (psContext->hSyncCheckpointBlockListLock != NULL)
+ {
+ OSLockDestroy(psContext->hSyncCheckpointBlockListLock);
+ }
+#endif
+
OSFreeMem(psContext);
}
}
psSyncBlk->psContext = psContext;
/* Allocate sync checkpoint block */
- psDevNode = psContext->psDevNode;
+ psDevNode = psContext->psContextCtl->psDeviceNode;
PVR_LOG_GOTO_IF_INVALID_PARAM(psDevNode, eError, fail_alloc_ufo_block);
- psSyncBlk->psDevNode = psDevNode;
-
eError = psDevNode->pfnAllocUFOBlock(psDevNode,
&psSyncBlk->hMemDesc,
&psSyncBlk->ui32FirmwareAddr,
OSLockAcquire(psSyncBlk->hLock);
if (0 == OSAtomicDecrement(&psSyncBlk->hRefCount))
{
- PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncBlk->psContext;
+ PVRSRV_DEVICE_NODE *psDevNode = psContext->psContextCtl->psDeviceNode;
#if defined(PDUMP)
OSLockAcquire(psSyncBlk->psContext->hSyncCheckpointBlockListLock);
_SyncCheckpointBlockImport(RA_PERARENA_HANDLE hArena,
RA_LENGTH_T uSize,
RA_FLAGS_T uFlags,
+ RA_LENGTH_T uBaseAlignment,
const IMG_CHAR *pszAnnotation,
RA_BASE_T *puiBase,
RA_LENGTH_T *puiActualSize,
RA_LENGTH_T uiSpanSize;
PVRSRV_ERROR eError;
PVR_UNREFERENCED_PARAMETER(uFlags);
+ PVR_UNREFERENCED_PARAMETER(uBaseAlignment);
PVR_LOG_RETURN_IF_INVALID_PARAM((hArena != NULL), "hArena");
{
IMG_UINT64 ui64Temp;
- ui64Temp = psSyncInt->uiSpanAddr - psSyncInt->psSyncCheckpointBlock->uiSpanBase;
+ ui64Temp = (IMG_UINT64)psSyncInt->uiAllocatedAddr -
+ (IMG_UINT64)psSyncInt->psSyncCheckpointBlock->uiSpanBase;
PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
return (IMG_UINT32)ui64Temp;
}
psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags;
}
}
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
#endif
if (*pui32NumSyncCheckpoints > MAX_SYNC_CHECKPOINTS_PER_FENCE)
psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags;
}
}
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
#endif
}
return eError;
return eError;
}
+static PVRSRV_ERROR
+_AllocSyncCheckpoint(_SYNC_CHECKPOINT_CONTEXT *psContext,
+ SYNC_CHECKPOINT **ppsSyncCheckpoint)
+{
+ SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL;
+ PVRSRV_ERROR eError;
+
+ /* Allocate sync checkpoint */
+ psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint));
+ PVR_LOG_RETURN_IF_NOMEM(psNewSyncCheckpoint, "OSAllocMem"); /* Sets OOM error code */
+
+ eError = RA_Alloc(psContext->psSubAllocRA,
+ sizeof(*psNewSyncCheckpoint->psSyncCheckpointFwObj),
+ RA_NO_IMPORT_MULTIPLIER,
+ 0,
+ sizeof(IMG_UINT32),
+ NULL,
+ &psNewSyncCheckpoint->uiAllocatedAddr,
+ NULL,
+ (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock);
+
+ PVR_LOG_GOTO_IF_ERROR(eError, "RA_Alloc", fail_ra_alloc);
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s CALLED RA_Alloc(), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+ __func__,
+ (void*)psContext->psSubAllocRA,
+ psNewSyncCheckpoint->uiAllocatedAddr));
+#endif
+ psNewSyncCheckpoint->psSyncCheckpointFwObj =
+ (volatile SYNC_CHECKPOINT_FW_OBJ*)(void *)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr +
+ (_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32)));
+ psNewSyncCheckpoint->ui32FWAddr = psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr +
+ _SyncCheckpointGetOffset(psNewSyncCheckpoint) + 1;
+ OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount);
+
+#if defined(DEBUG)
+ psNewSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE;
+#endif
+ psNewSyncCheckpoint->sListNode.psPrevNode = NULL;
+ psNewSyncCheckpoint->sListNode.psNextNode = NULL;
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s called to allocate new sync checkpoint<%p> for context<%p>",
+ __func__, (void*)psNewSyncCheckpoint, (void*)psContext));
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s psSyncCheckpointFwObj<%p>",
+ __func__, (void*)psNewSyncCheckpoint->psSyncCheckpointFwObj));
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s psSyncCheckpoint FwAddr=0x%x",
+ __func__, SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint)));
+#endif
+ *ppsSyncCheckpoint = psNewSyncCheckpoint;
+ return PVRSRV_OK;
+
+fail_ra_alloc:
+ OSFreeMem(psNewSyncCheckpoint);
+ return eError;
+}
+
+/* Poisons and frees the checkpoint
+ * Decrements context refcount. */
+static void _FreeSyncCheckpoint(SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext;
+
+ psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = 0;
+ psSyncCheckpoint->psSyncCheckpointFwObj = NULL;
+#if defined(DEBUG)
+ psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED;
+#endif
+
+ RA_Free(psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA,
+ psSyncCheckpoint->uiAllocatedAddr);
+ psSyncCheckpoint->psSyncCheckpointBlock = NULL;
+
+ OSFreeMem(psSyncCheckpoint);
+
+ OSAtomicDecrement(&psContext->hCheckpointCount);
+}
+
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
+static PVRSRV_ERROR
+_PrepopulateSyncCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext,
+ IMG_UINT32 ui32InitPoolSize)
+{
+ IMG_UINT32 ui32SyncCheckpoint;
+ SYNC_CHECKPOINT *psNewSyncCheckpoint;
+ PVRSRV_ERROR eError;
+
+ /* Allocate sync checkpoints and place in the pool */
+ for (ui32SyncCheckpoint=0; ui32SyncCheckpoint<ui32InitPoolSize; ui32SyncCheckpoint++)
+ {
+ eError = _AllocSyncCheckpoint(psContext, &psNewSyncCheckpoint);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_AllocSyncCheckpoint");
+
+ if (!_PutCheckpointInPool(psNewSyncCheckpoint))
+ {
+ _FreeSyncCheckpoint(psNewSyncCheckpoint);
+ }
+ }
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,"%s: Sync checkpoint pool [%d/%d]",
+ __func__, psContext->psContextCtl->ui32SyncCheckpointPoolCount,
+ psContext->psContextCtl->ui32SyncCheckpointPoolSize));
+#endif
+ return PVRSRV_OK;
+}
+#endif /* if (SYNC_CHECKPOINT_POOL_LIMIT > 0) */
+
PVRSRV_ERROR
SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode,
PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext)
_SYNC_CHECKPOINT_CONTEXT *psContext = NULL;
_SYNC_CHECKPOINT_CONTEXT_CTL *psContextCtl = NULL;
PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_CHAR azTempName[PVRSRV_SYNC_NAME_LENGTH] = {0};
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
+ void *pvAppHintState = NULL;
+ const IMG_UINT32 ui32DefaultMaxPoolLog2Size = 8;
+#if defined(PDUMP)
+ /* Pdumps start with an empty pool to avoid extra work allocating checkpoints which might not be used. */
+ const IMG_UINT32 ui32DefaultInitPoolLog2Size = 0;
+#else
+ const IMG_UINT32 ui32DefaultInitPoolLog2Size = 7;
+#endif
+ IMG_UINT32 ui32MaxPoolLog2Size;
+ IMG_UINT32 ui32InitPoolLog2Size;
+ IMG_UINT32 ui32InitPoolSize = 0;
+#endif
+ IMG_UINT32 ui32MaxPoolSize = 0;
PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpointContext != NULL),
"ppsSyncCheckpointContext invalid",
PVRSRV_ERROR_INVALID_PARAMS);
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
+ /* Read AppHints to determine the size of the sync checkpoint pool, if specified */
+ OSCreateKMAppHintState(&pvAppHintState);
+ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, SyncCheckpointPoolMaxLog2,
+ &ui32DefaultMaxPoolLog2Size, &ui32MaxPoolLog2Size);
+ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, SyncCheckpointPoolInitLog2,
+ &ui32DefaultInitPoolLog2Size, &ui32InitPoolLog2Size);
+ OSFreeKMAppHintState(pvAppHintState);
+
+ if (ui32MaxPoolLog2Size > 0)
+ {
+ ui32MaxPoolSize = 1 << ui32MaxPoolLog2Size;
+ if (ui32MaxPoolSize > SYNC_CHECKPOINT_POOL_LIMIT)
+ {
+ ui32MaxPoolSize = SYNC_CHECKPOINT_POOL_LIMIT;
+ }
+ }
+#endif
+
psContext = OSAllocMem(sizeof(*psContext));
PVR_LOG_GOTO_IF_NOMEM(psContext, eError, fail_alloc); /* Sets OOM error code */
- psContextCtl = OSAllocMem(sizeof(*psContextCtl));
+ /* psContextCtl includes allocation for the sync checkpoint pool) */
+ psContextCtl = OSAllocMem(sizeof(*psContextCtl) + (sizeof(SYNC_CHECKPOINT*) * ui32MaxPoolSize));
PVR_LOG_GOTO_IF_NOMEM(psContextCtl, eError, fail_alloc2); /* Sets OOM error code */
- eError = OSLockCreate(&psContext->hLock);
- PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:1", fail_create_context_lock);
-
eError = OSSpinLockCreate(&psContextCtl->hDeferredCleanupListLock);
PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:1", fail_create_deferred_cleanup_lock);
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
eError = OSSpinLockCreate(&psContextCtl->hSyncCheckpointPoolLock);
PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:2", fail_create_pool_lock);
#endif
dllist_init(&psContextCtl->sDeferredCleanupListHead);
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
psContextCtl->ui32SyncCheckpointPoolCount = 0;
psContextCtl->ui32SyncCheckpointPoolWp = 0;
psContextCtl->ui32SyncCheckpointPoolRp = 0;
- psContextCtl->bSyncCheckpointPoolFull = IMG_FALSE;
- psContextCtl->bSyncCheckpointPoolValid = IMG_TRUE;
+ psContextCtl->ui8PoolStateFlags = SYNC_CHECKPOINT_POOL_VALID;
#endif
- psContext->psDevNode = psDevNode;
-
- OSSNPrintf(psContext->azName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim RA-%p", psContext);
- OSSNPrintf(psContext->azSpanName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim span RA-%p", psContext);
+ psContextCtl->psDeviceNode = (SHARED_DEV_CONNECTION)psDevNode;
/*
Create the RA for sub-allocations of the sync checkpoints
back the blocksize when it does the import which overrides
what we specify here.
*/
- psContext->psSubAllocRA = RA_Create(psContext->azName,
+ OSSNPrintf(azTempName, PVRSRV_SYNC_NAME_LENGTH,"Sync Prim RA-%p", psContext);
+ psContext->psSubAllocRA = RA_Create(azTempName,
/* Params for imports */
_Log2(sizeof(IMG_UINT32)),
RA_LOCKCLASS_2,
ensures that all are imports are added to the RA in a linear
fashion
*/
- psContext->psSpanRA = RA_Create(psContext->azSpanName,
+ OSSNPrintf(azTempName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim span RA-%p", psContext);
+ psContext->psSpanRA = RA_Create(azTempName,
/* Params for imports */
0,
RA_LOCKCLASS_1,
#endif
- return PVRSRV_OK;
+ psContextCtl->ui32CurrentInUseSyncCheckpoints = 0;
+ psContextCtl->ui32MaxInUseSyncCheckpoints = 0;
+ eError = OSSpinLockCreate(&psContextCtl->hSyncCheckpointStatsLock);
+ PVR_GOTO_IF_ERROR(eError, fail_span_stat);
+
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
+ /* Pre-populate the sync checkpoint pool, if specified */
+ psContextCtl->ui32SyncCheckpointPoolSize = ui32MaxPoolSize;
+ /* Ensure ui32MaxPoolSize is a POT and does not exceed SYNC_CHECKPOINT_POOL_LIMIT,
+ * and ui32InitPoolSize does not exceed ui32MaxPoolSize.
+ */
+ if (psContextCtl->ui32SyncCheckpointPoolSize > SYNC_CHECKPOINT_POOL_LIMIT)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: AppHint SyncCheckpointPoolMaxLog2(%d) would exceed "
+ "SYNC_CHECKPOINT_POOL_LIMIT(%d) - limiting to %d",
+ __func__, ui32MaxPoolLog2Size,
+ SYNC_CHECKPOINT_POOL_LIMIT, SYNC_CHECKPOINT_POOL_LIMIT));
+ psContextCtl->ui32SyncCheckpointPoolSize = SYNC_CHECKPOINT_POOL_LIMIT;
+ }
+
+ if (ui32InitPoolLog2Size > 0)
+ {
+ ui32InitPoolSize = 1 << ui32InitPoolLog2Size;
+ }
+ if (ui32InitPoolSize > psContextCtl->ui32SyncCheckpointPoolSize)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: AppHint SyncCheckpointPoolInitLog2(%d) would exceed "
+ "ui32SyncCheckpointPoolSize(%d) - limiting to %d",
+ __func__, ui32InitPoolLog2Size,
+ psContextCtl->ui32SyncCheckpointPoolSize,
+ psContextCtl->ui32SyncCheckpointPoolSize));
+ ui32InitPoolSize = psContextCtl->ui32SyncCheckpointPoolSize;
+ }
+
+ if (ui32InitPoolSize > 0)
+ {
+ eError = _PrepopulateSyncCheckpointPool(psContext, ui32InitPoolSize);
+ PVR_LOG_RETURN_IF_ERROR_VA(eError, "_PrepopulateSyncCheckpointPool(%d)", ui32InitPoolSize);
+ }
+#endif
+
+ return PVRSRV_OK;
+fail_span_stat:
+#if defined(PDUMP)
+ OSSpinLockDestroy(psContext->hSyncCheckpointBlockListLock);
+ psContext->hSyncCheckpointBlockListLock = NULL;
+#endif
fail_span_add:
RA_Delete(psContext->psSpanRA);
fail_span:
RA_Delete(psContext->psSubAllocRA);
fail_suballoc:
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
OSSpinLockDestroy(psContextCtl->hSyncCheckpointPoolLock);
psContextCtl->hSyncCheckpointPoolLock = NULL;
fail_create_pool_lock:
OSSpinLockDestroy(psContextCtl->hDeferredCleanupListLock);
psContextCtl->hDeferredCleanupListLock = NULL;
fail_create_deferred_cleanup_lock:
- OSLockDestroy(psContext->hLock);
- psContext->hLock = NULL;
-fail_create_context_lock:
OSFreeMem(psContextCtl);
fail_alloc2:
OSFreeMem(psContext);
return eError;
}
-/* Poisons and frees the checkpoint
- * Decrements context refcount. */
-static void _FreeSyncCheckpoint(SYNC_CHECKPOINT *psSyncCheckpoint)
-{
- _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext;
-
- psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = 0;
- psSyncCheckpoint->psSyncCheckpointFwObj = NULL;
- psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED;
-
- RA_Free(psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA,
- psSyncCheckpoint->uiSpanAddr);
- psSyncCheckpoint->psSyncCheckpointBlock = NULL;
-
- OSFreeMem(psSyncCheckpoint);
-
- OSAtomicDecrement(&psContext->hCheckpointCount);
-}
-
PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext)
{
PVRSRV_ERROR eError = PVRSRV_OK;
"psSyncCheckpointContext invalid",
PVRSRV_ERROR_INVALID_PARAMS);
- psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode;
+ psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psContextCtl->psDeviceNode;
#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
PVR_DPF((PVR_DBG_WARNING,
_CheckDeferredCleanupList(psContext);
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
if (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0)
{
IMG_UINT32 ui32NumFreedFromPool = _CleanCheckpointPool(psContext);
}
else
{
- IMG_INT iRf2 = 0;
-
- iRf2 = OSAtomicRead(&psContext->hRefCount);
SyncCheckpointContextUnref(psSyncCheckpointContext);
}
SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL;
_SYNC_CHECKPOINT_CONTEXT *psSyncContextInt = (_SYNC_CHECKPOINT_CONTEXT*)psSyncContext;
PVRSRV_DEVICE_NODE *psDevNode;
+ OS_SPINLOCK_FLAGS uiFlags;
PVRSRV_ERROR eError;
PVR_LOG_RETURN_IF_FALSE((psSyncContext != NULL), "psSyncContext invalid", PVRSRV_ERROR_INVALID_PARAMS);
PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpoint != NULL), "ppsSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS);
- psDevNode = (PVRSRV_DEVICE_NODE *)psSyncContextInt->psDevNode;
+ psDevNode = (PVRSRV_DEVICE_NODE *)psSyncContextInt->psContextCtl->psDeviceNode;
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
PVR_DPF((PVR_DBG_WARNING, "%s Entry, Getting checkpoint from pool",
__func__));
/* If pool is empty (or not defined) alloc the new sync checkpoint */
if (!psNewSyncCheckpoint)
{
- psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint));
+ eError = _AllocSyncCheckpoint(psSyncContextInt, &psNewSyncCheckpoint);
PVR_LOG_GOTO_IF_NOMEM(psNewSyncCheckpoint, eError, fail_alloc); /* Sets OOM error code */
+ }
- eError = RA_Alloc(psSyncContextInt->psSubAllocRA,
- sizeof(*psNewSyncCheckpoint->psSyncCheckpointFwObj),
- RA_NO_IMPORT_MULTIPLIER,
- 0,
- sizeof(IMG_UINT32),
- (IMG_CHAR*)pszCheckpointName,
- &psNewSyncCheckpoint->uiSpanAddr,
- NULL,
- (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock);
- PVR_LOG_GOTO_IF_ERROR(eError, "RA_Alloc", fail_raalloc);
-
-#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
- PVR_DPF((PVR_DBG_WARNING,
- "%s CALLED RA_Alloc(), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
- __func__,
- (void*)psSyncContextInt->psSubAllocRA,
- psNewSyncCheckpoint->uiSpanAddr));
-#endif
- psNewSyncCheckpoint->psSyncCheckpointFwObj =
- (volatile SYNC_CHECKPOINT_FW_OBJ*)(void *)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr +
- (_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32)));
- psNewSyncCheckpoint->ui32FWAddr = psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr +
- _SyncCheckpointGetOffset(psNewSyncCheckpoint) + 1;
- OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount);
- psNewSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE;
-#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
- PVR_DPF((PVR_DBG_WARNING,
- "%s called to allocate new sync checkpoint<%p> for context<%p>",
- __func__, (void*)psNewSyncCheckpoint, (void*)psSyncContext));
- PVR_DPF((PVR_DBG_WARNING,
- "%s psSyncCheckpointFwObj<%p>",
- __func__, (void*)psNewSyncCheckpoint->psSyncCheckpointFwObj));
- PVR_DPF((PVR_DBG_WARNING,
- "%s psSyncCheckpoint FwAddr=0x%x",
- __func__, SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint)));
- PVR_DPF((PVR_DBG_WARNING,
- "%s pszCheckpointName = %s",
- __func__, pszCheckpointName));
- PVR_DPF((PVR_DBG_WARNING,
- "%s psSyncCheckpoint Timeline=%d",
- __func__, hTimeline));
-#endif
+ OSSpinLockAcquire(psSyncContextInt->psContextCtl->hSyncCheckpointStatsLock, uiFlags);
+ if (++psSyncContextInt->psContextCtl->ui32CurrentInUseSyncCheckpoints > psSyncContextInt->psContextCtl->ui32MaxInUseSyncCheckpoints)
+ {
+ psSyncContextInt->psContextCtl->ui32MaxInUseSyncCheckpoints = psSyncContextInt->psContextCtl->ui32CurrentInUseSyncCheckpoints;
}
+ OSSpinLockRelease(psSyncContextInt->psContextCtl->hSyncCheckpointStatsLock, uiFlags);
psNewSyncCheckpoint->hTimeline = hTimeline;
OSAtomicWrite(&psNewSyncCheckpoint->hRefCount, 1);
}
}
- {
- OS_SPINLOCK_FLAGS uiFlags;
- /* Add the sync checkpoint to the device list */
- OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
- dllist_add_to_head(&psDevNode->sSyncCheckpointSyncsList,
- &psNewSyncCheckpoint->sListNode);
- OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags);
- }
+ /* Add the sync checkpoint to the device list */
+ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
+ dllist_add_to_head(&psDevNode->sSyncCheckpointSyncsList,
+ &psNewSyncCheckpoint->sListNode);
+ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags);
*ppsSyncCheckpoint = (PSYNC_CHECKPOINT)psNewSyncCheckpoint;
#endif
return PVRSRV_OK;
-fail_raalloc:
- OSFreeMem(psNewSyncCheckpoint);
fail_alloc:
return eError;
}
{
_SYNC_CHECKPOINT_CONTEXT *psContext;
PVRSRV_DEVICE_NODE *psDevNode;
+ OS_SPINLOCK_FLAGS uiFlags;
psContext = psSyncCheckpointInt->psSyncCheckpointBlock->psContext;
- psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode;
+ psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psContextCtl->psDeviceNode;
/*
* Without this reference, the context may be destroyed as soon
*/
SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext);
+#if defined(DEBUG)
PVR_ASSERT(psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE);
+#endif
if (!OSAtomicRead(&psSyncCheckpointInt->hRefCount))
{
PVR_DPF((PVR_DBG_ERROR, "SyncCheckpointUnref sync checkpoint already freed"));
"%s No outstanding FW ops and hRef is zero, deleting SyncCheckpoint..",
__func__));
#endif
+ PVRSRV_ERROR eError;
if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
&& psSyncCheckpointInt->hRecord)
{
- PVRSRV_ERROR eError;
/* remove this sync record */
eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord);
PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove");
}
- {
- OS_SPINLOCK_FLAGS uiFlags;
- /* Remove the sync checkpoint from the global list */
- OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
- dllist_remove_node(&psSyncCheckpointInt->sListNode);
- OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags);
- }
+ /* Remove the sync checkpoint from the global list */
+ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
+ dllist_remove_node(&psSyncCheckpointInt->sListNode);
+ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags);
RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr);
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+ OSSpinLockAcquire(psContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags);
+ psContext->psContextCtl->ui32CurrentInUseSyncCheckpoints--;
+ OSSpinLockRelease(psContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags);
+
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
PVR_DPF((PVR_DBG_WARNING,
"%s attempting to return sync checkpoint to the pool",
if (!_PutCheckpointInPool(psSyncCheckpointInt))
#endif
{
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
PVR_DPF((PVR_DBG_WARNING,
"%s pool is full, so just free it",
#endif
#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
PVR_DPF((PVR_DBG_WARNING,
- "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+ "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32AllocatedAddr=0x%llx",
__func__,
psSyncCheckpointInt->ui32UID,
(void*)psSyncCheckpointInt,
(void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA,
- psSyncCheckpointInt->uiSpanAddr));
+ psSyncCheckpointInt->uiAllocatedAddr));
#endif
_FreeSyncCheckpoint(psSyncCheckpointInt);
}
PVR_LOG_RETURN_VOID_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+#if defined(DEBUG)
PVR_DPF((PVR_DBG_WARNING,
"%s Entry, psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d, psSyncCheckpoint->ui32ValidationCheck=0x%x",
__func__,
(void*)psSyncCheckpoint,
(IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)),
psSyncCheckpointInt->ui32ValidationCheck));
+#else
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s Entry, psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d",
+ __func__,
+ psSyncCheckpointInt->ui32UID,
+ (void*)psSyncCheckpoint,
+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount))));
+#endif
#endif
SyncCheckpointUnref(psSyncCheckpointInt);
}
if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE)
{
#if defined(SUPPORT_RGX)
- PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice;
RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags);
#endif
psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
-#if defined(PDUMP)
- _SyncCheckpointUpdatePDump(psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_SIGNALLED, ui32FenceSyncFlags);
+#if defined(SUPPORT_RGX) && defined(PDUMP)
+ _SyncCheckpointUpdatePDump(psContext->psContextCtl->psDeviceNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_SIGNALLED, ui32FenceSyncFlags);
#endif
}
else
if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE)
{
#if defined(SUPPORT_RGX)
- PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice;
RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, PVRSRV_FENCE_FLAG_NONE);
#endif
if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE)
{
#if defined(SUPPORT_RGX)
- PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice;
if (!(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
{
RGX_HWPERF_UFO_DATA_ELEMENT sSyncData;
psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ERRORED;
-#if defined(PDUMP)
- _SyncCheckpointUpdatePDump(psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_ERRORED, ui32FenceSyncFlags);
+#if defined(SUPPORT_RGX) && defined(PDUMP)
+ _SyncCheckpointUpdatePDump(psContext->psContextCtl->psDeviceNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_ERRORED, ui32FenceSyncFlags);
#endif
}
}
if (psSyncCheckpointInt)
{
#if defined(SUPPORT_RGX)
- PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice;
RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags);
#endif
if (psSyncCheckpointInt)
{
#if defined(SUPPORT_RGX)
- PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice;
RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags);
#endif
{
SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
- PVR_LOG_RETURN_VOID_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint");
+ PVR_LOG_RETURN_VOID_IF_FALSE(psSyncCheckpointInt != NULL, "psSyncCheckpoint");
- if (psSyncCheckpointInt)
- {
-#if !defined(NO_HARDWARE)
+#if defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(psSyncCheckpointInt);
+#else /* !defined(NO_HARDWARE) */
#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
- PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
- __func__,
- (void*)psSyncCheckpoint,
- OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
- OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)+1,
- psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+ PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+ __func__,
+ (void*)psSyncCheckpointInt,
+ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)+1,
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
#endif
- OSAtomicIncrement(&psSyncCheckpointInt->hEnqueuedCCBCount);
+ OSAtomicIncrement(&psSyncCheckpointInt->hEnqueuedCCBCount);
#endif
- }
}
PRGXFWIF_UFO_ADDR*
{
SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
- PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+ PVR_LOG_GOTO_IF_FALSE((psSyncCheckpointInt != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
- if (psSyncCheckpointInt)
+#if defined(DEBUG)
+ if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
+#endif
{
- if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
- {
- return &psSyncCheckpointInt->sCheckpointUFOAddr;
- }
- else
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x",
- __func__,
- (void*)psSyncCheckpoint,
- psSyncCheckpointInt->ui32ValidationCheck));
- }
+ return &psSyncCheckpointInt->sCheckpointUFOAddr;
+ }
+#if defined(DEBUG)
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x",
+ __func__,
+ (void*)psSyncCheckpointInt,
+ psSyncCheckpointInt->ui32ValidationCheck));
}
+#endif
invalid_chkpt:
return NULL;
SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
IMG_UINT32 ui32Ret = 0;
- PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+ PVR_LOG_GOTO_IF_FALSE((psSyncCheckpointInt != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
- if (psSyncCheckpointInt)
+#if defined(DEBUG)
+ if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
+#endif
{
- if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
- {
- ui32Ret = psSyncCheckpointInt->ui32FWAddr;
- }
- else
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x",
- __func__,
- (void*)psSyncCheckpoint,
- psSyncCheckpointInt->ui32ValidationCheck));
- }
+ ui32Ret = psSyncCheckpointInt->ui32FWAddr;
}
+#if defined(DEBUG)
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x",
+ __func__,
+ (void*)psSyncCheckpointInt,
+ psSyncCheckpointInt->ui32ValidationCheck));
+ }
+#endif
invalid_chkpt:
return ui32Ret;
SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint)
{
SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
- IMG_UINT32 ui32Ret = 0;
+ IMG_UINT32 ui32Ret = 0U;
- PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+ PVR_LOG_GOTO_IF_FALSE((psSyncCheckpointInt != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
- if (psSyncCheckpointInt)
- {
#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
- PVR_DPF((PVR_DBG_WARNING,
- "%s returning ID for sync checkpoint<%p>",
- __func__,
- (void*)psSyncCheckpointInt));
- PVR_DPF((PVR_DBG_WARNING,
- "%s (validationCheck=0x%x)",
- __func__,
- psSyncCheckpointInt->ui32ValidationCheck));
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s returning ID for sync checkpoint<%p>",
+ __func__,
+ (void*)psSyncCheckpointInt));
+#if defined(DEBUG)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s (validationCheck=0x%x)",
+ __func__,
+ psSyncCheckpointInt->ui32ValidationCheck));
+#endif
#endif
- ui32Ret = psSyncCheckpointInt->ui32UID;
+ ui32Ret = psSyncCheckpointInt->ui32UID;
#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
- PVR_DPF((PVR_DBG_WARNING,
- "%s (ui32UID=0x%x)",
- __func__,
- psSyncCheckpointInt->ui32UID));
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s (ui32UID=0x%x)",
+ __func__,
+ psSyncCheckpointInt->ui32UID));
#endif
- }
- return ui32Ret;
invalid_chkpt:
- return 0;
+ return ui32Ret;
}
PVRSRV_TIMELINE
SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint)
{
SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint;
- PVRSRV_TIMELINE i32Ret = PVRSRV_NO_TIMELINE;
- PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+ PVR_LOG_GOTO_IF_FALSE((psSyncCheckpointInt != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
- if (psSyncCheckpointInt)
- {
- i32Ret = psSyncCheckpointInt->hTimeline;
- }
- return i32Ret;
+ return psSyncCheckpointInt->hTimeline;
invalid_chkpt:
return 0;
(void*)psSyncCheckpointInt));
#endif
/* Mark as errored */
- SyncCheckpointError((PSYNC_CHECKPOINT)psSyncCheckpointInt, IMG_TRUE);
+ SyncCheckpointError((PSYNC_CHECKPOINT)psSyncCheckpointInt, PVRSRV_FENCE_FLAG_NONE);
break;
}
}
DLLIST_NODE *psNode, *psNext;
OS_SPINLOCK_FLAGS uiFlags;
+ if (psDevNode->hSyncCheckpointContext == NULL) return;
+
if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM))
{
PVR_DUMPDEBUG_LOG("------[ Active Sync Checkpoints ]------");
+
+ OSSpinLockAcquire(psDevNode->hSyncCheckpointContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags);
+ PVR_DUMPDEBUG_LOG("(SyncCP Counts: InUse:%d Max:%d)",
+ psDevNode->hSyncCheckpointContext->psContextCtl->ui32CurrentInUseSyncCheckpoints,
+ psDevNode->hSyncCheckpointContext->psContextCtl->ui32MaxInUseSyncCheckpoints);
+ OSSpinLockRelease(psDevNode->hSyncCheckpointContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags);
+
OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags);
dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
{
const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt)
{
struct SYNC_CHECKPOINT_RECORD * psSyncRec;
- _SYNC_CHECKPOINT_CONTEXT *psContext = hSyncCheckpointBlock->psContext;
- PVRSRV_DEVICE_NODE *psDevNode = psContext->psDevNode;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)hSyncCheckpointBlock->psContext;
+ PVRSRV_DEVICE_NODE *psDevNode = psContext->psContextCtl->psDeviceNode;
PVRSRV_ERROR eError = PVRSRV_OK;
PVR_RETURN_IF_INVALID_PARAM(phRecord);
psSyncRec = OSAllocMem(sizeof(*psSyncRec));
PVR_LOG_GOTO_IF_NOMEM(psSyncRec, eError, fail_alloc); /* Sets OOM error code */
- psSyncRec->psDevNode = psDevNode;
psSyncRec->psSyncCheckpointBlock = hSyncCheckpointBlock;
psSyncRec->ui32SyncOffset = ui32SyncOffset;
psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr;
{
struct SYNC_CHECKPOINT_RECORD **ppFreedSync;
struct SYNC_CHECKPOINT_RECORD *pSync = (struct SYNC_CHECKPOINT_RECORD*)hRecord;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = pSync->psSyncCheckpointBlock->psContext;
PVRSRV_DEVICE_NODE *psDevNode;
PVR_RETURN_IF_INVALID_PARAM(hRecord);
- psDevNode = pSync->psDevNode;
+ psDevNode = psContext->psContextCtl->psDeviceNode;
OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
OSLockRelease(psDevNode->hSyncCheckpointSignalLock);
}
+#if defined(PDUMP)
PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence)
{
PVRSRV_ERROR eError;
PSYNC_CHECKPOINT *apsCheckpoints = NULL;
SYNC_CHECKPOINT *psSyncCheckpoint = NULL;
IMG_UINT32 i, uiNumCheckpoints = 0;
+ _SYNC_CHECKPOINT_CONTEXT *psContext;
#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP)
PVRSRV_RGXDEV_INFO *psDevInfo;
#endif
{
/* Flushing deferred fence signals to pdump */
psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[0];
- MISRHandler_PdumpDeferredSyncSignalPoster(psSyncCheckpoint->psSyncCheckpointBlock->psDevNode);
+ psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpoint->psSyncCheckpointBlock->psContext;
+ MISRHandler_PdumpDeferredSyncSignalPoster(psContext->psContextCtl->psDeviceNode);
}
for (i=0; i < uiNumCheckpoints; i++)
psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[i];
if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED)
{
- PDUMPCOMMENTWITHFLAGS(psSyncCheckpoint->psSyncCheckpointBlock->psDevNode,
+ psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpoint->psSyncCheckpointBlock->psContext;
+ PDUMPCOMMENTWITHFLAGS(psContext->psContextCtl->psDeviceNode,
psSyncCheckpoint->ui32PDumpFlags,
"Wait for Fence %s (ID:%d)",
psSyncCheckpoint->azName,
if (uiNumCheckpoints)
{
psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[0];
- psDevInfo = psSyncCheckpoint->psSyncCheckpointBlock->psDevNode->pvDevice;
+ psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpoint->psSyncCheckpointBlock->psContext;
+ psDevInfo = psContext->psContextCtl->psDeviceNode->pvDevice;
if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER)
{
RGXValidateSOCUSCTimer(psDevInfo, PDUMP_CONT, 0, 0, NULL);
return PVRSRV_OK;
}
+#endif /* #if defined(PDUMP) */
static PVRSRV_ERROR
_SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent)
static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext)
{
_SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl;
- PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)psContext->psDevNode;
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)psCtxCtl->psDeviceNode;
DECLARE_DLLIST(sCleanupList);
DLLIST_NODE *psNode, *psNext;
OS_SPINLOCK_FLAGS uiFlags;
+ PVRSRV_ERROR eError;
#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
PVR_DPF((PVR_DBG_WARNING, "%s called", __func__));
if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
&& psSyncCheckpointInt->hRecord)
{
- PVRSRV_ERROR eError;
/* remove this sync record */
eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord);
PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove");
RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr);
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+ /* Unref the checkpoint in use */
+ OSSpinLockAcquire(psContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags);
+ psContext->psContextCtl->ui32CurrentInUseSyncCheckpoints--;
+ OSSpinLockRelease(psContext->psContextCtl->hSyncCheckpointStatsLock, uiFlags);
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
PVR_DPF((PVR_DBG_WARNING,
"%s attempting to return sync(ID:%d),%p> to pool",
if (!_PutCheckpointInPool(psSyncCheckpointInt))
#endif
{
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
PVR_DPF((PVR_DBG_WARNING, "%s pool is full, so just free it",
__func__));
#endif
#endif
#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
+ }
else
{
+#endif
PVR_DPF((PVR_DBG_WARNING,
"%s psSyncCheckpoint '%s'' (ID:%d)<%p>), still pending (enq=%d,FWRef=%d)",
__func__,
}
}
-#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (SYNC_CHECKPOINT_POOL_LIMIT > 0)
static SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext)
{
_SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl;
SYNC_CHECKPOINT *psSyncCheckpoint = NULL;
OS_SPINLOCK_FLAGS uiFlags;
+ if (psCtxCtl->ui32SyncCheckpointPoolSize == 0)
+ {
+ goto pool_not_used;
+ }
+
/* Acquire sync checkpoint pool lock */
OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags);
/* Check if we can allocate from the pool */
- if (psCtxCtl->bSyncCheckpointPoolValid &&
- (psCtxCtl->ui32SyncCheckpointPoolCount > SYNC_CHECKPOINT_POOL_SEDIMENT) &&
- (psCtxCtl->ui32SyncCheckpointPoolWp != psCtxCtl->ui32SyncCheckpointPoolRp))
+ if (CHECKPOINT_POOL_VALID(psCtxCtl) &&
+ (psCtxCtl->ui32SyncCheckpointPoolCount > SYNC_CHECKPOINT_POOL_SEDIMENT))
{
/* Get the next sync checkpoint from the pool */
- psSyncCheckpoint = psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp];
+ psSyncCheckpoint = psCtxCtl->apsSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp];
psCtxCtl->ui32SyncCheckpointPoolRp =
- (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & SYNC_CHECKPOINT_POOL_MASK;
+ (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & (psCtxCtl->ui32SyncCheckpointPoolSize-1);
psCtxCtl->ui32SyncCheckpointPoolCount--;
- psCtxCtl->bSyncCheckpointPoolFull = IMG_FALSE;
+ CLEAR_CHECKPOINT_POOL_FULL(psCtxCtl);
+#if defined(DEBUG)
psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE;
+#endif
#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
PVR_DPF((PVR_DBG_WARNING,
"%s checkpoint(old ID:%d)<-POOL(%d/%d), psContext=<%p>, "
__func__,
psSyncCheckpoint->ui32UID,
psCtxCtl->ui32SyncCheckpointPoolCount,
- SYNC_CHECKPOINT_POOL_SIZE,
+ psCtxCtl->ui32SyncCheckpointPoolSize,
(void *) psContext,
psCtxCtl->ui32SyncCheckpointPoolRp,
psCtxCtl->ui32SyncCheckpointPoolWp));
/* Release sync checkpoint pool lock */
OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags);
+pool_not_used:
return psSyncCheckpoint;
}
IMG_BOOL bReturnedToPool = IMG_FALSE;
OS_SPINLOCK_FLAGS uiFlags;
+ if (psCtxCtl->ui32SyncCheckpointPoolSize == 0)
+ {
+ return IMG_FALSE;
+ }
+
/* Acquire sync checkpoint pool lock */
OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags);
/* Check if pool has space */
- if (psCtxCtl->bSyncCheckpointPoolValid && !psCtxCtl->bSyncCheckpointPoolFull)
+ if (CHECKPOINT_POOL_VALID(psCtxCtl) && !(CHECKPOINT_POOL_FULL(psCtxCtl)))
{
/* Put the sync checkpoint into the next write slot in the pool */
- psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolWp] = psSyncCheckpoint;
+ psCtxCtl->apsSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolWp] = psSyncCheckpoint;
psCtxCtl->ui32SyncCheckpointPoolWp =
- (psCtxCtl->ui32SyncCheckpointPoolWp + 1) & SYNC_CHECKPOINT_POOL_MASK;
+ (psCtxCtl->ui32SyncCheckpointPoolWp + 1) & (psCtxCtl->ui32SyncCheckpointPoolSize-1);
psCtxCtl->ui32SyncCheckpointPoolCount++;
- psCtxCtl->bSyncCheckpointPoolFull =
- ((psCtxCtl->ui32SyncCheckpointPoolCount > 0) &&
- (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp));
+ /* Update if the checkpoint that was just added filled up the pool */
+ if (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp)
+ {
+ SET_CHECKPOINT_POOL_FULL(psCtxCtl);
+ }
bReturnedToPool = IMG_TRUE;
psSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_UNDEF;
+#if defined(DEBUG)
psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_POOL;
+#endif
#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
PVR_DPF((PVR_DBG_WARNING,
"%s checkpoint(ID:%d)->POOL(%d/%d), poolRp=%d, poolWp=%d",
__func__,
psSyncCheckpoint->ui32UID,
psCtxCtl->ui32SyncCheckpointPoolCount,
- SYNC_CHECKPOINT_POOL_SIZE,
+ psCtxCtl->ui32SyncCheckpointPoolSize,
psCtxCtl->ui32SyncCheckpointPoolRp,
psCtxCtl->ui32SyncCheckpointPoolWp));
#endif
DLLIST_NODE *psThis, *psNext;
OS_SPINLOCK_FLAGS uiFlags;
IMG_UINT32 ui32ItemsFreed = 0, ui32NullScpCount = 0, ui32PoolCount;
- IMG_BOOL bPoolValid;
/* Acquire sync checkpoint pool lock */
OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags);
- bPoolValid = psCtxCtl->bSyncCheckpointPoolValid;
ui32PoolCount = psCtxCtl->ui32SyncCheckpointPoolCount;
/* While the pool still contains sync checkpoints, free them */
- while (bPoolValid && psCtxCtl->ui32SyncCheckpointPoolCount > 0)
+ while (CHECKPOINT_POOL_VALID(psCtxCtl) && psCtxCtl->ui32SyncCheckpointPoolCount > 0)
{
/* Get the sync checkpoint from the next read slot in the pool */
- psCheckpoint = psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp];
+ psCheckpoint = psCtxCtl->apsSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp];
psCtxCtl->ui32SyncCheckpointPoolRp =
- (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & SYNC_CHECKPOINT_POOL_MASK;
+ (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & (psCtxCtl->ui32SyncCheckpointPoolSize-1);
psCtxCtl->ui32SyncCheckpointPoolCount--;
- psCtxCtl->bSyncCheckpointPoolFull =
- ((psCtxCtl->ui32SyncCheckpointPoolCount > 0) &&
- (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp));
+
+ CLEAR_CHECKPOINT_POOL_FULL(psCtxCtl);
if (psCheckpoint)
{
/* go through the local list and free all of the sync checkpoints */
#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
- PVR_DPF((PVR_DBG_WARNING, "%s psContext=<%p>, bSyncCheckpointPoolValid=%d, "
+ PVR_DPF((PVR_DBG_WARNING, "%s psContext=<%p>, ui8PoolStateFlags=0x%x, "
"uiSyncCheckpointPoolCount=%d", __func__, (void *) psContext,
- bPoolValid, ui32PoolCount));
+ psCtxCtl->ui8PoolStateFlags, psCtxCtl->ui32SyncCheckpointPoolCount));
if (ui32NullScpCount > 0)
{
psCheckpoint = IMG_CONTAINER_OF(psThis, SYNC_CHECKPOINT, sListNode);
#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+#if defined(DEBUG)
if (psCheckpoint->ui32ValidationCheck != SYNC_CHECKPOINT_PATTERN_IN_POOL)
{
PVR_DPF((PVR_DBG_WARNING, "%s pool contains invalid entry "
"(ui32ValidationCheck=0x%x)", __func__,
psCheckpoint->ui32ValidationCheck));
}
-
+#endif
PVR_DPF((PVR_DBG_WARNING,
"%s psSyncCheckpoint(ID:%d)",
__func__, psCheckpoint->ui32UID));
+#if defined(DEBUG)
PVR_DPF((PVR_DBG_WARNING,
"%s psSyncCheckpoint->ui32ValidationCheck=0x%x",
__func__, psCheckpoint->ui32ValidationCheck));
+#endif
PVR_DPF((PVR_DBG_WARNING,
- "%s psSyncCheckpoint->uiSpanAddr=0x%llx",
- __func__, psCheckpoint->uiSpanAddr));
+ "%s psSyncCheckpoint->uiAllocatedAddr=0x%llx",
+ __func__, psCheckpoint->uiAllocatedAddr));
PVR_DPF((PVR_DBG_WARNING,
"%s psSyncCheckpoint->psSyncCheckpointBlock=<%p>",
__func__, (void *) psCheckpoint->psSyncCheckpointBlock));
psCheckpoint->ui32UID,
(void *) psCheckpoint,
(void *) psCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA,
- psCheckpoint->uiSpanAddr));
+ psCheckpoint->uiAllocatedAddr));
#endif
dllist_remove_node(psThis);
return ui32ItemsFreed;
}
-#endif /* (SYNC_CHECKPOINT_POOL_SIZE > 0) */
+#endif /* (SYNC_CHECKPOINT_POOL_LIMIT > 0) */
eError = _SyncFbFenceImport(psImport, ppsFence);
- PVR_DPF_RETURN_OK;
err_out:
PVR_DPF_RETURN_RC(eError);
}
#include "sync.h"
#include "sync_internal.h"
#include "connection_server.h"
-#include "htbuffer.h"
+#include "htbserver.h"
#include "rgxhwperf.h"
#include "info_page.h"
_SyncConnectionRef(psSyncConnectionData);
OSLockAcquire(psSyncConnectionData->hLock);
- if (psConnection != NULL)
- {
- dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode);
- }
+ dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode);
OSLockRelease(psSyncConnectionData->hLock);
psBlock->psSyncConnectionData = psSyncConnectionData;
}
PVR_DPF_RETURN_VAL(NULL);
}
-static inline IMG_BOOL IsDigit(IMG_CHAR c)
-{
- return c >= '0' && c <= '9';
-}
-
-static inline IMG_BOOL ReadNumber(const IMG_CHAR *pszBuffer,
- IMG_UINT32 *pui32Number)
-{
- IMG_CHAR acTmp[11] = {0}; /* max 10 digits */
- IMG_UINT32 ui32Result;
- IMG_UINT i;
-
- for (i = 0; i < sizeof(acTmp) - 1; i++)
- {
- if (!IsDigit(*pszBuffer))
- break;
- acTmp[i] = *pszBuffer++;
- }
-
- /* if there are no digits or there is something after the number */
- if (i == 0 || *pszBuffer != '\0')
- return IMG_FALSE;
-
- if (OSStringToUINT32(acTmp, 10, &ui32Result) != PVRSRV_OK)
- return IMG_FALSE;
-
- *pui32Number = ui32Result;
-
- return IMG_TRUE;
-}
-
IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern,
IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
IMG_UINT32 ui32Max)
PVRSRV_ERROR eError;
IMG_DEV_PHYADDR sDevPAddr;
VMM_PVZ_CONNECTION *psVmmPvz;
- IMG_UINT32 uiFuncID = PVZ_BRIDGE_MAPDEVICEPHYSHEAP;
- PHYS_HEAP *psFwPhysHeap = psDevConfig->psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN];
+ PHYS_HEAP *psFwPhysHeap = psDevConfig->psDevNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM];
eError = PhysHeapGetDevPAddr(psFwPhysHeap, &sDevPAddr);
#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES)
{
- /* Host expects PA rather than IPA address, so on the platforms where
- * IPA-PA translation is not done in hw, performs a software translation */
-
IMG_DEV_PHYADDR sDevPAddrTranslated;
+ /* If required, perform a software translation between CPU and Device physical addresses. */
PhysHeapCpuPAddrToDevPAddr(psFwPhysHeap, 1, &sDevPAddrTranslated, (IMG_CPU_PHYADDR *)&sDevPAddr);
sDevPAddr.uiAddr = sDevPAddrTranslated.uiAddr;
}
psVmmPvz = PvzConnectionAcquire();
PvzClientLockAcquire();
- eError = psVmmPvz->sClientFuncTab.pfnMapDevPhysHeap(uiFuncID,
- 0,
- RGX_FIRMWARE_RAW_HEAP_SIZE,
- sDevPAddr.uiAddr);
+ eError = psVmmPvz->sClientFuncTab.pfnMapDevPhysHeap(RGX_FIRMWARE_RAW_HEAP_SIZE, sDevPAddr.uiAddr);
PvzClientLockRelease();
PvzConnectionRelease(psVmmPvz);
PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
{
PVRSRV_ERROR eError;
- IMG_UINT32 uiFuncID = PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP;
VMM_PVZ_CONNECTION *psVmmPvz = PvzConnectionAcquire();
PVR_ASSERT(psVmmPvz);
PVR_ASSERT(psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap);
- eError = psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap(uiFuncID, 0);
+ eError = psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap();
PvzClientLockRelease();
PvzConnectionRelease(psVmmPvz);
OSLockRelease(psPVRSRVData->hPvzConnectionLock);
}
+#define VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID) do { \
+ if ((ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED) || \
+ (ui32DriverID < RGXFW_GUEST_DRIVER_ID_START)) \
+ { \
+ PVR_DPF((PVR_DBG_ERROR, \
+ "%s: Invalid OSID %u. Supported Guest OSID range: %u - %u", \
+ __func__, \
+ ui32DriverID, \
+ RGXFW_GUEST_DRIVER_ID_START, \
+ RGX_NUM_DRIVERS_SUPPORTED-1)); \
+ return PVRSRV_ERROR_INVALID_PARAMS; \
+ } \
+ if (PVRSRVGetDeviceInstance(ui32DevID) == NULL) \
+ { \
+ PVR_DPF((PVR_DBG_ERROR, \
+ "%s: Invalid Device ID %u.", \
+ __func__, \
+ ui32DevID)); \
+ return PVRSRV_ERROR_INVALID_PARAMS; \
+ } \
+} while (false);
/*
* ===========================================================
*/
PVRSRV_ERROR
-PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID,
- IMG_UINT32 ui32FuncID,
+PvzServerMapDevPhysHeap(IMG_UINT32 ui32DriverID,
IMG_UINT32 ui32DevID,
IMG_UINT64 ui64Size,
IMG_UINT64 ui64PAddr)
* preallocate the Guest's firmware heaps from static carveout memory.
*/
PVR_DPF((PVR_DBG_ERROR,
- "%s: Host PVZ config: Does not match with Guest PVZ config\n"
- " Host preallocates the Guest's FW physheap from static memory carveouts at startup.\n", __func__));
+ "%s: Host PVZ config: Does not match with Guest PVZ config."
+ " Host preallocates the Guest's FW physheap from static memory carveouts at startup.", __func__));
return PVRSRV_ERROR_INVALID_PVZ_CONFIG;
#else
PVRSRV_ERROR eError = PVRSRV_OK;
- PVR_LOG_RETURN_IF_FALSE((ui32DevID == 0), "Invalid Device ID", PVRSRV_ERROR_INVALID_PARAMS);
-
- if (ui32FuncID != PVZ_BRIDGE_MAPDEVICEPHYSHEAP)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Host PVZ call: OSID: %d: Invalid function ID: expected %d, got %d",
- __func__,
- ui32OSID,
- (IMG_UINT32)PVZ_BRIDGE_MAPDEVICEPHYSHEAP,
- ui32FuncID));
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
+ VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID);
PvzServerLockAcquire();
#if defined(SUPPORT_RGX)
- if (IsVmOnline(ui32OSID))
+ if (IsVmOnline(ui32DriverID, ui32DevID))
{
- PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
- PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList;
+ PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstance(ui32DevID);
IMG_DEV_PHYADDR sDevPAddr = {ui64PAddr};
IMG_UINT32 sync;
- eError = RGXFwRawHeapAllocMap(psDeviceNode, ui32OSID, sDevPAddr, ui64Size);
+ eError = RGXFwRawHeapAllocMap(psDeviceNode, ui32DriverID, sDevPAddr, ui64Size);
PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", e0);
/* Invalidate MMU cache in preparation for a kick from this Guest */
PVR_LOG_GOTO_IF_ERROR(eError, "MMUCacheInvalidateKick", e0);
/* Everything is ready for the firmware to start interacting with this OS */
- eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32OSID, RGXFWIF_OS_ONLINE);
+ eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32DriverID, RGXFWIF_OS_ONLINE);
}
e0:
#endif /* defined(SUPPORT_RGX) */
}
PVRSRV_ERROR
-PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID,
- IMG_UINT32 ui32FuncID,
+PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32DriverID,
IMG_UINT32 ui32DevID)
{
#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
#else
PVRSRV_ERROR eError = PVRSRV_OK;
- PVR_LOG_RETURN_IF_FALSE((ui32DevID == 0), "Invalid Device ID", PVRSRV_ERROR_INVALID_PARAMS);
-
- if (ui32FuncID != PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Host PVZ call: OSID: %d: Invalid function ID: expected %d, got %d",
- __func__,
- ui32OSID,
- (IMG_UINT32)PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP,
- ui32FuncID));
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
+ VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID);
PvzServerLockAcquire();
#if defined(SUPPORT_RGX)
- if (IsVmOnline(ui32OSID))
+ if (IsVmOnline(ui32DriverID, ui32DevID))
{
- PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
- PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList;
+ PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstance(ui32DevID);
/* Order firmware to offload this OS' data and stop accepting commands from it */
- eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32OSID, RGXFWIF_OS_OFFLINE);
+ eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32DriverID, RGXFWIF_OS_OFFLINE);
PVR_LOG_GOTO_IF_ERROR(eError, "RGXFWSetFwOsState", e0);
/* it is now safe to remove the Guest's memory mappings */
- RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID);
+ RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID);
}
e0:
#endif
*/
PVRSRV_ERROR
-PvzServerOnVmOnline(IMG_UINT32 ui32OSID)
+PvzServerOnVmOnline(IMG_UINT32 ui32DriverID,
+ IMG_UINT32 ui32DevID)
{
PVRSRV_ERROR eError;
+ VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID);
PvzServerLockAcquire();
-
- eError = PvzOnVmOnline(ui32OSID);
-
+ eError = PvzOnVmOnline(ui32DriverID, ui32DevID);
PvzServerLockRelease();
return eError;
}
PVRSRV_ERROR
-PvzServerOnVmOffline(IMG_UINT32 ui32OSID)
+PvzServerOnVmOffline(IMG_UINT32 ui32DriverID,
+ IMG_UINT32 ui32DevID)
{
PVRSRV_ERROR eError;
+ VALIDATE_DRVID_DEVID(ui32DriverID, ui32DevID);
PvzServerLockAcquire();
-
- eError = PvzOnVmOffline(ui32OSID);
-
+ eError = PvzOnVmOffline(ui32DriverID, ui32DevID);
PvzServerLockRelease();
return eError;
}
PVRSRV_ERROR
-PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue)
+PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType,
+ IMG_UINT32 ui32ParamValue,
+ IMG_UINT32 ui32DevID)
{
PVRSRV_ERROR eError;
+ VALIDATE_DRVID_DEVID(RGXFW_GUEST_DRIVER_ID_START, ui32DevID);
PvzServerLockAcquire();
-
- eError = PvzVMMConfigure(eVMMParamType, ui32ParamValue);
-
+ eError = PvzVMMConfigure(eVMMParamType, ui32ParamValue, ui32DevID);
PvzServerLockRelease();
return eError;
#include "pvrsrv.h"
#include "vz_vmm_pvz.h"
-#if (RGX_NUM_OS_SUPPORTED > 1)
+#if (RGX_NUM_DRIVERS_SUPPORTED > 1)
static PVRSRV_ERROR
-PvzConnectionValidate(PVRSRV_DEVICE_CONFIG *psDevConfig)
+PvzConnectionValidate(void)
{
VMM_PVZ_CONNECTION *psVmmPvz;
PVRSRV_ERROR eError = PVRSRV_OK;
*
* This setup uses carve-out memory, has no hypercall mechanism & does not support
* out-of-order initialisation of host/guest VMs/drivers. The host driver has all
- * the information needed to initialize all OSIDs firmware state when it's loaded
- * and its PVZ layer must mark all guest OSIDs as being online as part of its PVZ
+ * the information needed to initialize all Drivers firmware state when it's loaded
+ * and its PVZ layer must mark all guest Drivers as being online as part of its PVZ
* initialisation. Having no out-of-order initialisation support, the guest driver
* can only submit a workload to the device after the host driver has completely
* initialized the firmware, the VZ hypervisor/VM setup must guarantee this.
*
* This setup uses guest memory, has PVZ hypercall mechanism & supports out-of-order
* initialisation of host/guest VMs/drivers. The host driver initializes only its
- * own OSID-0 firmware state when its loaded and each guest driver will use its PVZ
+ * own Driver-0 firmware state when its loaded and each guest driver will use its PVZ
* interface to hypercall to the host driver to both synchronise its initialisation
* so it does not submit any workload to the firmware before the host driver has
- * had a chance to initialize the firmware and to also initialize its own OSID-x
+ * had a chance to initialize the firmware and to also initialize its own Driver-x
* firmware state.
*/
PVR_LOG(("Using dynamic PVZ bootstrap setup"));
e0:
return eError;
}
-#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */
-PVRSRV_ERROR PvzConnectionInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+PVRSRV_ERROR PvzConnectionInit(void)
{
PVRSRV_ERROR eError;
PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
-#if (RGX_NUM_OS_SUPPORTED == 1)
-# if !defined(PVRSRV_NEED_PVR_DPF)
+#if (RGX_NUM_DRIVERS_SUPPORTED == 1)
+#if !defined(PVRSRV_NEED_PVR_DPF)
PVR_UNREFERENCED_PARAMETER(psPVRSRVData);
# endif
- PVR_DPF((PVR_DBG_ERROR, "This kernel driver does not support virtualization. Please rebuild with RGX_NUM_OS_SUPPORTED > 1"));
+ PVR_DPF((PVR_DBG_ERROR, "This kernel driver does not support virtualization. Please rebuild with RGX_NUM_DRIVERS_SUPPORTED > 1"));
PVR_DPF((PVR_DBG_ERROR, "Halting initialisation, cannot transition to %s mode",
psPVRSRVData->eDriverMode == DRIVER_MODE_HOST ? "host" : "guest"));
eError = PVRSRV_ERROR_NOT_SUPPORTED;
goto e0;
#else
+ if ((psPVRSRVData->hPvzConnection != NULL) &&
+ (psPVRSRVData->hPvzConnectionLock != NULL))
+ {
+ eError = PVRSRV_OK;
+ PVR_DPF((PVR_DBG_MESSAGE, "PVzConnection already initialised."));
+ goto e0;
+ }
+
/* Create para-virtualization connection lock */
eError = OSLockCreate(&psPVRSRVData->hPvzConnectionLock);
PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0);
}
/* Ensure pvz connection is configured correctly */
- eError = PvzConnectionValidate(psDevConfig);
+ eError = PvzConnectionValidate();
PVR_LOG_RETURN_IF_ERROR(eError, "PvzConnectionValidate");
-
- psPVRSRVData->abVmOnline[RGXFW_HOST_OS] = IMG_TRUE;
#endif
e0:
return eError;
{
PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ if ((psPVRSRVData->hPvzConnection == NULL) &&
+ (psPVRSRVData->hPvzConnectionLock == NULL))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "PVzConnection already deinitialised."));
+ return;
+ }
+
VMMDestroyPvzConnection(psPVRSRVData->hPvzConnection);
psPVRSRVData->hPvzConnection = NULL;
#include "pvrsrv_error.h"
#include "vz_vm.h"
#include "rgxfwutils.h"
+#include "rgxfwdbg.h"
-bool IsVmOnline(IMG_UINT32 ui32OSID)
+bool IsVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID)
{
- PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDevNode = PVRSRVGetDeviceInstance(ui32DevID);
- return (ui32OSID >= RGX_NUM_OS_SUPPORTED) ? (false) : (psPVRSRVData->abVmOnline[ui32OSID]);
+ if (psDevNode == NULL)
+ {
+ return false;
+ }
+ else
+ {
+ return BIT_ISSET(psDevNode->ui32VmState, ui32DriverID);
+ }
}
-PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSid)
+PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID)
{
-#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED == 1)
PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
#else
PVRSRV_ERROR eError = PVRSRV_OK;
- PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
PVRSRV_DEVICE_NODE *psDevNode;
- if (ui32OSid == 0 || ui32OSid >= RGX_NUM_OS_SUPPORTED)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: invalid OSID (%d)",
- __func__, ui32OSid));
+ psDevNode = PVRSRVGetDeviceInstance(ui32DevID);
- eError = PVRSRV_ERROR_INVALID_PARAMS;
+ if (psDevNode == NULL)
+ {
+ eError = PVRSRV_ERROR_NO_DEVICENODE_FOUND;
goto e0;
}
-
- if (psPVRSRVData->abVmOnline[ui32OSid])
+ else if (BIT_ISSET(psDevNode->ui32VmState, ui32DriverID))
{
PVR_DPF((PVR_DBG_ERROR,
- "%s: OSID %d is already enabled.",
- __func__, ui32OSid));
+ "%s: DriverID %u on Device %u is already enabled.",
+ __func__, ui32DriverID, ui32DevID));
eError = PVRSRV_ERROR_INVALID_PARAMS;
goto e0;
}
- /* For now, limit support to single device setups */
- psDevNode = psPVRSRVData->psDeviceNodeList;
-
- if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT)
+ if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_CREATED)
{
-
/* Firmware not initialized yet, do it here */
eError = PVRSRVCommonDeviceInitialise(psDevNode);
if (eError != PVRSRV_OK)
goto e0;
}
- psPVRSRVData->abVmOnline[ui32OSid] = IMG_TRUE;
+ BIT_SET(psDevNode->ui32VmState, ui32DriverID);
#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
/* Everything is ready for the firmware to start interacting with this OS */
- eError = RGXFWSetFwOsState(psDevNode->pvDevice, ui32OSid, RGXFWIF_OS_ONLINE);
+ eError = RGXFWSetFwOsState(psDevNode->pvDevice, ui32DriverID, RGXFWIF_OS_ONLINE);
#endif
e0:
return eError;
}
-PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32OSid)
+PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID)
{
-#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED == 1)
PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
#else
PVRSRV_ERROR eError = PVRSRV_OK;
- PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
PVRSRV_DEVICE_NODE *psDevNode;
PVRSRV_RGXDEV_INFO *psDevInfo;
- if (ui32OSid == 0 || ui32OSid >= RGX_NUM_OS_SUPPORTED)
+ psDevNode = PVRSRVGetDeviceInstance(ui32DevID);
+
+ if (psDevNode == NULL)
{
- PVR_DPF((PVR_DBG_ERROR,
- "%s: invalid OSID (%d)",
- __func__, ui32OSid));
- eError = PVRSRV_ERROR_INVALID_PARAMS;
+ eError = PVRSRV_ERROR_NO_DEVICENODE_FOUND;
goto e0;
}
-
- if (!psPVRSRVData->abVmOnline[ui32OSid])
+ else if (!BIT_ISSET(psDevNode->ui32VmState, ui32DriverID))
{
PVR_DPF((PVR_DBG_ERROR,
- "%s: OSID %d is already disabled.",
- __func__, ui32OSid));
+ "%s: DriverID %u on Device %u is already disabled.",
+ __func__, ui32DriverID, ui32DevID));
eError = PVRSRV_ERROR_INVALID_PARAMS;
goto e0;
}
- /* For now, limit support to single device setups */
- psDevNode = psPVRSRVData->psDeviceNodeList;
psDevInfo = psDevNode->pvDevice;
+ if (psDevInfo == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_DEVINFO;
+ goto e0;
+ }
- eError = RGXFWSetFwOsState(psDevInfo, ui32OSid, RGXFWIF_OS_OFFLINE);
+ eError = RGXFWSetFwOsState(psDevInfo, ui32DriverID, RGXFWIF_OS_OFFLINE);
if (eError == PVRSRV_OK)
{
- psPVRSRVData->abVmOnline[ui32OSid] = IMG_FALSE;
+ BIT_UNSET(psDevNode->ui32VmState, ui32DriverID);
}
e0:
return eError;
}
-PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue)
+PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType,
+ IMG_UINT32 ui32ParamValue,
+ IMG_UINT32 ui32DevID)
{
- PVRSRV_ERROR eError = PVRSRV_OK;
-
- PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
- PVRSRV_DEVICE_NODE *psDeviceNode;
+#if defined(SUPPORT_RGX)
+ PVRSRV_DEVICE_NODE *psDevNode;
PVRSRV_RGXDEV_INFO *psDevInfo;
+ PVRSRV_ERROR eError;
+
+ psDevNode = PVRSRVGetDeviceInstance(ui32DevID);
+ if (psDevNode == NULL)
+ {
+ eError = PVRSRV_ERROR_NO_DEVICENODE_FOUND;
+ goto e0;
+ }
- psDeviceNode = psPVRSRVData->psDeviceNodeList;
- psDevInfo = psDeviceNode->pvDevice;
+ psDevInfo = psDevNode->pvDevice;
+ if (psDevInfo == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_DEVINFO;
+ goto e0;
+ }
switch (eVMMParamType)
{
-#if defined(SUPPORT_RGX)
- case VMM_CONF_PRIO_OSID0:
- case VMM_CONF_PRIO_OSID1:
- case VMM_CONF_PRIO_OSID2:
- case VMM_CONF_PRIO_OSID3:
- case VMM_CONF_PRIO_OSID4:
- case VMM_CONF_PRIO_OSID5:
- case VMM_CONF_PRIO_OSID6:
- case VMM_CONF_PRIO_OSID7:
+ case VMM_CONF_PRIO_DRV0:
+ case VMM_CONF_PRIO_DRV1:
+ case VMM_CONF_PRIO_DRV2:
+ case VMM_CONF_PRIO_DRV3:
+ case VMM_CONF_PRIO_DRV4:
+ case VMM_CONF_PRIO_DRV5:
+ case VMM_CONF_PRIO_DRV6:
+ case VMM_CONF_PRIO_DRV7:
{
- IMG_UINT32 ui32OSid = eVMMParamType;
+ IMG_UINT32 ui32DriverID = eVMMParamType;
IMG_UINT32 ui32Prio = ui32ParamValue;
- if (ui32OSid < RGX_NUM_OS_SUPPORTED)
+ if (ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED)
{
- eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32Prio);
+ eError = PVRSRVRGXFWDebugSetDriverPriorityKM(NULL, psDevNode, ui32DriverID, ui32Prio);
}
else
{
eError = RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadline);
break;
}
-#else
- PVR_UNREFERENCED_PARAMETER(ui32ParamValue);
-#endif
default:
{
eError = PVRSRV_ERROR_INVALID_PARAMS;
}
}
+e0:
return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(eVMMParamType);
+ PVR_UNREFERENCED_PARAMETER(ui32ParamValue);
+ PVR_UNREFERENCED_PARAMETER(ui32DevID);
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+#endif
}
/******************************************************************************
PVRSRV_DEVICE_NODE * psDeviceNode,
IMG_HANDLE hMemCtxPrivData,
RGXFWIF_DM eFWDataMaster,
+ IMG_UINT64 ui64TempSpillingAddr,
IMG_UINT32 ui32BPAddr,
IMG_UINT32 ui32HandlerAddr,
IMG_UINT32 ui32DataMaster)
sBPCmd.uCmdData.sBPData.ui32BPAddr = ui32BPAddr;
sBPCmd.uCmdData.sBPData.ui32HandlerAddr = ui32HandlerAddr;
sBPCmd.uCmdData.sBPData.ui32BPDM = ui32DataMaster;
+ sBPCmd.uCmdData.sBPData.ui64SpillAddr = ui64TempSpillingAddr;
sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_ENABLE;
sBPCmd.uCmdData.sBPData.eDM = eFWDataMaster;
PVRSRV_DEVICE_NODE * psDeviceNode,
IMG_HANDLE hMemCtxPrivData,
RGXFWIF_DM eFWDataMaster,
+ IMG_UINT64 ui64TempSpillingAddr,
IMG_UINT32 ui32BPAddr,
IMG_UINT32 ui32HandlerAddr,
IMG_UINT32 ui32DataMaster);
#include "pdump_km.h"
#include "rgx_compat_bvnc.h"
+#define RGX_FEATURE_TRUE_VALUE_TYPE_UINT16 (RGX_FEATURE_VALUE_TYPE_UINT16 >> RGX_FEATURE_TYPE_BIT_SHIFT)
+#define RGX_FEATURE_TRUE_VALUE_TYPE_UINT32 (RGX_FEATURE_VALUE_TYPE_UINT32 >> RGX_FEATURE_TYPE_BIT_SHIFT)
#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(RGX_BVNC_STR_SIZE_MAX))+1)
/* This function searches the given array for a given search value */
sizeof((t)[0])/sizeof(IMG_UINT64)) )
-#if defined(DEBUG)
+#if defined(DEBUG) || defined(SUPPORT_PERFORMANCE_RUN)
#define PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, szShortName, Feature) \
if ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] != RGX_FEATURE_VALUE_DISABLED ) \
}
#endif
+#if !defined(ERNSBRNS_IDS_MAX_IDX) && !defined(FEATURE_NO_VALUES_NAMES_MAX_IDX)
+ PVR_UNREFERENCED_PARAMETER(ui64Mask);
+ PVR_UNREFERENCED_PARAMETER(ui32IdOrNameIdx);
+#endif
+
}
#endif
-static void _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT64 *pui64Cfg)
+static PVRSRV_ERROR _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT64 *pui64Cfg)
{
IMG_UINT32 ui32Index;
/* Read the feature values for the runtime BVNC */
for (ui32Index = 0; ui32Index < RGX_FEATURE_WITH_VALUES_MAX_IDX; ui32Index++)
{
- IMG_UINT16 bitPosition = aui16FeaturesWithValuesBitPositions[ui32Index];
- IMG_UINT64 ui64PackedValues = pui64Cfg[2 + bitPosition / 64];
- IMG_UINT16 ui16ValueIndex = (ui64PackedValues & aui64FeaturesWithValuesBitMasks[ui32Index]) >> (bitPosition % 64);
+ IMG_UINT16 ui16BitPosition = aui16FeaturesWithValuesBitPositions[ui32Index];
+ IMG_UINT64 ui64PackedValues = pui64Cfg[2 + ui16BitPosition / 64];
+ IMG_UINT16 ui16ValueIndex = (ui64PackedValues & aui64FeaturesWithValuesBitMasks[ui32Index]) >> (ui16BitPosition % 64);
- if (ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index])
- {
- if (gaFeaturesValues[ui32Index][ui16ValueIndex] == (IMG_UINT16)RGX_FEATURE_VALUE_DISABLED)
- {
- psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_DISABLED;
- }
- else
- {
- psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = gaFeaturesValues[ui32Index][ui16ValueIndex];
- }
- }
- else
+ if (ui16ValueIndex >= gaFeaturesValuesMaxIndexes[ui32Index])
{
/* This case should never be reached */
psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_INVALID;
PVR_DPF((PVR_DBG_ERROR, "%s: Feature with index (%d) decoded wrong value index (%d)", __func__, ui32Index, ui16ValueIndex));
PVR_ASSERT(ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]);
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+
+ switch (ui16BitPosition >> RGX_FEATURE_TYPE_BIT_SHIFT)
+ {
+ case RGX_FEATURE_TRUE_VALUE_TYPE_UINT16:
+ {
+ IMG_UINT16 *pui16FeatureValues = (IMG_UINT16*)gaFeaturesValues[ui32Index];
+ if (pui16FeatureValues[ui16ValueIndex] == (IMG_UINT16)RGX_FEATURE_VALUE_DISABLED)
+ {
+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] =
+ RGX_FEATURE_VALUE_DISABLED;
+ }
+ else
+ {
+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] =
+ pui16FeatureValues[ui16ValueIndex];
+ }
+ break;
+ }
+ case RGX_FEATURE_TRUE_VALUE_TYPE_UINT32:
+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] =
+ ((IMG_UINT32*)gaFeaturesValues[ui32Index])[ui16ValueIndex];
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Feature with index %d has invalid feature type",
+ __func__,
+ ui32Index));
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
}
}
psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID;
PVR_DPF((PVR_DBG_ERROR, "%s: Power island feature version not found!", __func__));
PVR_ASSERT(0);
+ return PVRSRV_ERROR_FEATURE_DISABLED;
}
if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) &&
psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID;
PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__));
PVR_ASSERT(0);
+ return PVRSRV_ERROR_FEATURE_DISABLED;
}
#else /* defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) */
- /* Code path for Rogue and Oceanic */
+ /* Code path for Rogue */
psDevInfo->sDevFeatureCfg.ui32MAXDMCount = RGXFWIF_DM_CDM+1;
#if defined(SUPPORT_AGP)
psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_GEOM2+1);
#endif
- /* Meta feature not present in oceanic */
-#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
{
psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_IDX] = RGX_FEATURE_VALUE_DISABLED;
}
-#endif
/* Get the max number of dusts in the core */
if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS))
psDevInfo->sDevFeatureCfg.ui32MAXDustCount = RGX_FEATURE_VALUE_INVALID;
PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__));
PVR_ASSERT(0);
+ return PVRSRV_ERROR_FEATURE_DISABLED;
}
#endif /* defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) */
- /* Meta feature not present in oceanic */
-#if defined(RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX)
/* Transform the META coremem size info in bytes */
if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE))
{
psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_COREMEM_SIZE_IDX] *= 1024;
}
-#endif
+
+ return PVRSRV_OK;
}
static void _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNC, const IMG_UINT32 ui32RGXDevCount)
IMG_UINT32 *pC,
const IMG_UINT32 ui32RGXDevCount)
{
- unsigned int ui32ScanCount = 0;
IMG_CHAR aszBVNCString[RGX_BVNC_STR_SIZE_MAX];
+ IMG_CHAR *pcTemp, *pcNext;
aszBVNCString[0] = '\0';
}
/* Parse the given RGX_BVNC string */
- ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STR_FMTSPEC, pB, pV, pN, pC);
- if (RGX_BVNC_INFO_PARAMS != ui32ScanCount)
+ pcTemp = &aszBVNCString[0];
+ pcNext = strchr(pcTemp, '.');
+ if (pcNext == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+
+ *pcNext = '\0';
+ if (OSStringToUINT32(pcTemp, 0, pB) != PVRSRV_OK)
+ {
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+ pcTemp = pcNext+1;
+ /* remove any 'p' from the V string, as this will
+ * cause the call to OSStringToUINT32 to fail
+ */
+ pcNext = strchr(pcTemp, 'p');
+ if (pcNext)
{
- ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STRP_FMTSPEC, pB, pV, pN, pC);
+ /* found one- - changing to '\0' */
+ *pcNext = '\0';
+ /* Move to next '.' */
+ pcNext++;
}
- if (RGX_BVNC_INFO_PARAMS != ui32ScanCount)
+ else
+ {
+ /* none found, so find next '.' and change to '\0' */
+ pcNext = strchr(pcTemp, '.');
+ if (pcNext == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+ *pcNext = '\0';
+ }
+ if (OSStringToUINT32(pcTemp, 0, pV) != PVRSRV_OK)
{
return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
}
- PVR_LOG(("BVNC module parameter honoured: %s", aszBVNCString));
+ pcTemp = pcNext+1;
+ pcNext = strchr(pcTemp, '.');
+ if (pcNext == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+ *pcNext = '\0';
+ if (OSStringToUINT32(pcTemp, 0, pN) != PVRSRV_OK)
+ {
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+ pcTemp = pcNext+1;
+ if (OSStringToUINT32(pcTemp, 0, pC) != PVRSRV_OK)
+ {
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+ PVR_LOG(("BVNC module parameter honoured: %d.%d.%d.%d", *pB, *pV, *pN, *pC));
return PVRSRV_OK;
}
IMG_UINT64 ui64SLCSize = 0ULL;
#if defined(RGX_CR_SLC_SIZE_IN_KB)
- /* Rogue and Oceanic hardware */
+ /* Rogue hardware */
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_SIZE_CONFIGURABLE))
{
ui64SLCSize = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SLC_SIZE_IN_KB);
#if defined(RGX_BVNC_KM_B) && defined(RGX_BVNC_KM_N) && defined(RGX_BVNC_KM_C)
if (NULL == pui64Cfg)
{
+ IMG_CHAR acVStr[5] = RGX_BVNC_KM_V_ST;
+
/* We reach here if the HW is not present,
* or we are running in a guest OS with no COREID_PER_OS feature,
* or HW is unstable during register read giving invalid values,
B = RGX_BVNC_KM_B;
N = RGX_BVNC_KM_N;
C = RGX_BVNC_KM_C;
+
+ /* Clear any 'p' that may have been in RGX_BVNC_KM_V_ST,
+ * as OSStringToUINT32() will otherwise return an error.
+ */
+ if (acVStr[strlen(acVStr)-1] == 'p')
{
- IMG_UINT32 ui32ScanCount = 0;
- ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%u", &V);
- if (1 != ui32ScanCount)
- {
- ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%up", &V);
- if (1 != ui32ScanCount)
- {
- V = 0;
- }
- }
+ acVStr[strlen(acVStr)-1] = '\0';
+ }
+
+ if (OSStringToUINT32(&acVStr[0], 0, &V) != PVRSRV_OK)
+ {
+ V = 0;
}
PVR_LOG(("Reverting to compile time BVNC %s", RGX_BVNC_KM));
/* Parsing feature config depends on available features on the core
* hence this parsing should always follow the above feature assignment */
psDevInfo->sDevFeatureCfg.ui64Features = pui64Cfg[1];
- _RGXBvncParseFeatureValues(psDevInfo, pui64Cfg);
+ eError = _RGXBvncParseFeatureValues(psDevInfo, pui64Cfg);
+ PVR_RETURN_IF_ERROR(eError);
/* Add 'V' to the packed BVNC value to get the BVNC ERN and BRN config. */
ui64BVNC = BVNC_PACK(B,V,N,C);
ui32RGXDevCnt++;
-#if defined(DEBUG)
+#if defined(DEBUG) || defined(SUPPORT_PERFORMANCE_RUN)
_RGXBvncDumpParsedConfig(psDeviceNode);
#endif
return PVRSRV_OK;
#include "allocmem.h"
#include "devicemem.h"
#include "rgxfwutils.h"
+
#include "osfunc.h"
#include "rgxccb.h"
#include "rgx_memallocflags.h"
if (BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN))
{
PHYS_HEAP *psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN];
- PHYS_HEAP_TYPE eHeapType = PhysHeapGetType(psPhysHeap);
+ PHYS_HEAP_POLICY uiHeapPolicy = PhysHeapGetPolicy(psPhysHeap);
psClientCCB->ui32VirtualAllocSize = ui32VirtualAllocSize;
- /*
- * Growing CCB is doubling the size. Last grow would require only ui32NumVirtChunks/2 new chunks
- * because another ui32NumVirtChunks/2 is already allocated.
- * Sometimes initial chunk count would be higher (when CCB size is equal to CCB maximum size) so MAX is needed.
- */
- psClientCCB->pui32MappingTable = OSAllocMem(MAX(ui32NumChunks, ui32NumVirtChunks/2) * sizeof(IMG_UINT32));
- if (psClientCCB->pui32MappingTable == NULL)
- {
- eError = PVRSRV_ERROR_OUT_OF_MEMORY;
- goto fail_alloc_mtable;
- }
- for (i = 0; i < ui32NumChunks; i++)
- {
- psClientCCB->pui32MappingTable[i] = i;
- }
-
- if (eHeapType == PHYS_HEAP_TYPE_LMA ||
- eHeapType == PHYS_HEAP_TYPE_DMA)
+ if (uiHeapPolicy != PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG)
{
+ psClientCCB->pui32MappingTable = NULL;
/*
- * On LMA sparse memory can't be mapped to kernel.
+ * On LMA sparse memory can't be mapped to kernel without support for non physically
+ * sparse allocations.
* To work around this whole ccb memory is allocated at once as contiguous.
*/
eError = DevmemFwAllocate(psDevInfo,
}
else
{
+ /*
+ * Growing CCB is doubling the size. Last grow would require only ui32NumVirtChunks/2 new chunks
+ * because another ui32NumVirtChunks/2 is already allocated.
+ * Sometimes initial chunk count would be higher (when CCB size is equal to CCB maximum size) so MAX is needed.
+ */
+ psClientCCB->pui32MappingTable = OSAllocMem(MAX(ui32NumChunks, ui32NumVirtChunks/2) * sizeof(IMG_UINT32));
+ if (psClientCCB->pui32MappingTable == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_mtable;
+ }
+
+ for (i = 0; i < ui32NumChunks; i++)
+ {
+ psClientCCB->pui32MappingTable[i] = i;
+ }
+
eError = DevmemFwAllocateSparse(psDevInfo,
ui32VirtualAllocSize,
- ui32ChunkSize,
ui32NumChunks,
ui32NumVirtChunks,
psClientCCB->pui32MappingTable,
DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc);
#if defined(PVRSRV_ENABLE_CCCB_GROW)
fail_alloc_ccb:
- if ( psClientCCB->ui32VirtualAllocSize > 0)
+ if (psClientCCB->pui32MappingTable)
{
OSFreeMem(psClientCCB->pui32MappingTable);
}
{
/* Grow CCB */
PHYS_HEAP *psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN];
- PHYS_HEAP_TYPE eHeapType = PhysHeapGetType(psPhysHeap);
+ PHYS_HEAP_POLICY uiHeapPolicy = PhysHeapGetPolicy(psPhysHeap);
PVRSRV_ERROR eErr = PVRSRV_OK;
/* Something went wrong if we are here a second time */
OSLockAcquire(psClientCCB->hCCBGrowLock);
/*
- * On LMA sparse memory can't be mapped to kernel.
+ * On LMA sparse memory can't be mapped to kernel without support for non physically
+ * sparse allocations.
* To work around this whole ccb memory was allocated at once as contiguous.
* In such case below sparse change is not needed because memory is already allocated.
*/
- if (eHeapType != PHYS_HEAP_TYPE_LMA &&
- eHeapType != PHYS_HEAP_TYPE_DMA)
+ if (uiHeapPolicy == PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG)
{
IMG_UINT32 ui32AllocChunkCount = psClientCCB->ui32Size / psClientCCB->ui32ChunkSize;
if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
{
- /* If an UPDATE then record the values incase an adjacent fence uses it. */
+ /* If an UPDATE then record the values in case an adjacent fence uses it. */
IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER));
psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
#if defined(SUPPORT_AGP)
psClientCCB->psClientCCBCtrl->ui32ReadOffset2 = psClientCCB->ui32HostWriteOffset;
+#if defined(SUPPORT_AGP4)
+ psClientCCB->psClientCCBCtrl->ui32ReadOffset3 = psClientCCB->ui32HostWriteOffset;
+ psClientCCB->psClientCCBCtrl->ui32ReadOffset4 = psClientCCB->ui32HostWriteOffset;
+#endif
#endif
#endif
return psClientCCB->ui32Size-1;
}
-PVRSRV_ERROR RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB,
- IMG_UINT32 ui32Flags)
+void RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32Flags)
{
if ((ui32Flags & RGX_CONTEXT_FLAG_DISABLESLR))
{
{
BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED);
}
- return PVRSRV_OK;
}
void RGXCmdHelperInitCmdCCB_CommandSize(PVRSRV_RGXDEV_INFO *psDevInfo,
if (ppPreAddr && (ppPreAddr->ui32Addr != 0))
{
psCmdHelperData->ui32PreTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER)
- + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1));
+ + PVR_ALIGN(sizeof(RGXFWIF_DEV_VIRTADDR), RGXFWIF_FWALLOC_ALIGN);
}
if (ppPostAddr && (ppPostAddr->ui32Addr != 0))
{
psCmdHelperData->ui32PostTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER)
- + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1));
+ + PVR_ALIGN(sizeof(RGXFWIF_DEV_VIRTADDR), RGXFWIF_FWALLOC_ALIGN);
}
if (ppRMWUFOAddr && (ppRMWUFOAddr->ui32Addr != 0))
FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Workload Data added */
- psCmdHelperData->psWorkEstKickData = psWorkEstKickData;
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Workload Data added */
+ psCmdHelperData->psWorkEstKickData = psWorkEstKickData;
+ }
#endif
}
psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
- psHeader->sWorkEstKickData.ui64Deadline = 0;
- psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64Deadline = 0;
+ psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+ }
#endif
pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER));
psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
- psHeader->sWorkEstKickData.ui64Deadline = 0;
- psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64Deadline = 0;
+ psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+ }
#endif
pui64FBSCInvalCmdData = IMG_OFFSET_ADDR(psHeader, sizeof(RGXFWIF_CCB_CMD_HEADER));
*pui64FBSCInvalCmdData = psCmdHelperData->ui64FBSCEntryMask;
psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- if (psCmdHelperData->psWorkEstKickData != NULL &&
- psCmdHelperData->eType != RGXFWIF_CCB_CMD_TYPE_NULL)
- {
- PVR_ASSERT(psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_GEOM ||
- psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_3D ||
- psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_CDM ||
- psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_TQ_TDM);
- psHeader->sWorkEstKickData = *psCmdHelperData->psWorkEstKickData;
- }
- else
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
- psHeader->sWorkEstKickData.ui64Deadline = 0;
- psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+ if (psCmdHelperData->psWorkEstKickData != NULL &&
+ RGXIsValidWorkloadEstCCBCommand(psCmdHelperData->eType))
+ {
+ psHeader->sWorkEstKickData = *psCmdHelperData->psWorkEstKickData;
+ }
+ else
+ {
+ psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64Deadline = 0;
+ psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+ }
}
#endif
psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
- psHeader->sWorkEstKickData.ui64Deadline = 0;
- psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64Deadline = 0;
+ psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+ }
#endif
pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER));
psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
- psHeader->sWorkEstKickData.ui64Deadline = 0;
- psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64Deadline = 0;
+ psHeader->sWorkEstKickData.ui32CyclesPrediction = 0;
+ }
#endif
pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER));
for (i=0;i<ui32CmdCount;i++)
{
RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i];
-#if defined(PDUMP)
+#if defined(PDUMP) || defined(__linux__)
PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCmdHelperData->psClientCCB->psServerCommonContext);
#endif
-#if (!defined(__linux__) || !defined(SUPPORT_RGX)) && !defined(PDUMP)
+#if (!defined(__linux__) || !defined(PDUMP))
PVR_UNREFERENCED_PARAMETER(psCmdHelperData);
#endif
-#if defined(__linux__) && defined(SUPPORT_RGX)
+#if defined(__linux__)
if (bTraceChecks)
{
trace_rogue_fence_checks(psCmdHelperData->pszCommandName,
pcszDMName,
+ psDevInfo->psDeviceNode->sDevId.ui32InternalID,
ui32CtxAddr,
psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
psCmdHelperData->ui32ClientFenceCount,
{
trace_rogue_fence_updates(psCmdHelperData->pszCommandName,
pcszDMName,
+ psDevInfo->psDeviceNode->sDevId.ui32InternalID,
ui32CtxAddr,
psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
psCmdHelperData->ui32ClientUpdateCount,
#endif
}
-void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+void DumpFirstCCBCmd(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
RGX_CLIENT_CCB *psCurrentClientCCB,
DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
void *pvDumpDebugFile)
{
volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
void *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB;
- volatile void *pvPtr;
IMG_UINT32 ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
IMG_UINT32 ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset;
IMG_UINT32 ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
- pvPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff);
-
if ((ui32SampledRdOff == ui32SampledDepOff) &&
(ui32SampledRdOff != ui32SampledWrOff))
{
IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB);
IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB);
-PVRSRV_ERROR RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB,
- IMG_UINT32 ui32Flags);
+void RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32Flags);
void RGXCmdHelperInitCmdCCB_CommandSize(PVRSRV_RGXDEV_INFO *psDevInfo,
IMG_UINT64 ui64FBSCEntryMask,
IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData);
-void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+void DumpFirstCCBCmd(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
RGX_CLIENT_CCB *psCurrentClientCCB,
DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
void *pvDumpDebugFile);
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX Compute routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Compute routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "srvkm.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxcompute.h"
+#include "rgx_bvnc_defs_km.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "htbserver.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_memallocflags.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+#include "rgxtimerquery.h"
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+
+#define HASH_CLEAN_LIMIT 6
+#endif
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_CMP_UFO_DUMP 0
+
+//#define CMP_CHECKPOINT_DEBUG 1
+
+#if defined(CMP_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+struct _RGX_SERVER_COMPUTE_CONTEXT_ {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ DEVMEM_MEMDESC *psFWComputeContextMemDesc;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ DEVMEM_MEMDESC *psFWComputeContextStateMemDesc;
+ DLLIST_NODE sListNode;
+ SYNC_ADDR_LIST sSyncAddrListFence;
+ SYNC_ADDR_LIST sSyncAddrListUpdate;
+ POS_LOCK hLock;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ WORKEST_HOST_DATA sWorkEstData;
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+};
+
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_INT32 i32Priority,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pbyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ IMG_UINT32 ui32StaticComputeContextStateSize,
+ IMG_PBYTE pStaticComputeContextState,
+ IMG_UINT32 ui32PackedCCBSizeU88,
+ IMG_UINT32 ui32ContextFlags,
+ IMG_UINT64 ui64RobustnessAddress,
+ IMG_UINT32 ui32MaxDeadlineMS,
+ RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext;
+ RGX_COMMON_CONTEXT_INFO sInfo = {NULL};
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext;
+ IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2;
+
+ /* Prepare cleanup struct */
+ *ppsComputeContext = NULL;
+
+ psComputeContext = OSAllocZMem(sizeof(*psComputeContext));
+ if (psComputeContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /*
+ Create the FW compute context, this has the CDM common
+ context embedded within it
+ */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_FWCOMPUTECONTEXT),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwComputeContext",
+ &psComputeContext->psFWComputeContextMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_fwcomputecontext;
+ }
+
+ eError = OSLockCreate(&psComputeContext->hLock);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to create lock (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ goto fail_createlock;
+ }
+
+ psComputeContext->psDeviceNode = psDeviceNode;
+
+ /*
+ Allocate device memory for the firmware GPU context suspend state.
+ Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+ */
+ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware compute context suspend state");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_COMPUTECTX_STATE),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwComputeContextState",
+ &psComputeContext->psFWComputeContextStateMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to allocate firmware GPU context suspend state (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ goto fail_contextsuspendalloc;
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ WorkEstInitCompute(psDevInfo, &psComputeContext->sWorkEstData);
+ }
+#endif
+
+ if (ui32FrameworkCommandSize)
+ {
+ /*
+ * Create the FW framework buffer
+ */
+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+ &psComputeContext->psFWFrameworkMemDesc,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to allocate firmware GPU framework state (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ goto fail_frameworkcreate;
+ }
+
+ /* Copy the Framework client data into the framework buffer */
+ eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode,
+ psComputeContext->psFWFrameworkMemDesc,
+ pbyFrameworkCommand,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to populate the framework buffer (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ goto fail_frameworkcopy;
+ }
+
+ sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc;
+ }
+
+ ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88);
+ ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88);
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_CDM,
+ RGXFWIF_DM_CDM,
+ hMemCtxPrivData,
+ psComputeContext->psFWComputeContextMemDesc,
+ offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext),
+ psFWMemContextMemDesc,
+ psComputeContext->psFWComputeContextStateMemDesc,
+ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_CDM_CCB_SIZE_LOG2,
+ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_CDM_CCB_MAX_SIZE_LOG2,
+ ui32ContextFlags,
+ i32Priority,
+ ui32MaxDeadlineMS,
+ ui64RobustnessAddress,
+ &sInfo,
+ &psComputeContext->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextalloc;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc,
+ (void **)&psFWComputeContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_acquire_cpu_mapping;
+ }
+
+ OSDeviceMemCopy(&psFWComputeContext->sStaticComputeContextState, pStaticComputeContextState, ui32StaticComputeContextStateSize);
+ DevmemPDumpLoadMem(psComputeContext->psFWComputeContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS);
+ DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ psComputeContext->psBufferSyncContext =
+ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+ "rogue-cdm");
+ if (IS_ERR(psComputeContext->psBufferSyncContext))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to create buffer_sync context (err=%ld)",
+ __func__, PTR_ERR(psComputeContext->psBufferSyncContext)));
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_buffer_sync_context_create;
+ }
+#endif
+
+ SyncAddrListInit(&psComputeContext->sSyncAddrListFence);
+ SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate);
+
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+ }
+
+ *ppsComputeContext = psComputeContext;
+ return PVRSRV_OK;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+fail_buffer_sync_context_create:
+#endif
+fail_acquire_cpu_mapping:
+ FWCommonContextFree(psComputeContext->psServerCommonContext);
+fail_contextalloc:
+fail_frameworkcopy:
+ if (psComputeContext->psFWFrameworkMemDesc)
+ {
+ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+ }
+fail_frameworkcreate:
+ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc);
+fail_contextsuspendalloc:
+ OSLockDestroy(psComputeContext->hLock);
+fail_createlock:
+ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc);
+fail_fwcomputecontext:
+ OSFreeMem(psComputeContext);
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode,
+ psComputeContext->psServerCommonContext,
+ RGXFWIF_DM_CDM,
+ PDUMP_FLAGS_NONE);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ return eError;
+ }
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ /* remove after RGXFWRequestCommonContextCleanUp() because we might return
+ * RETRY and don't want to be calling this twice */
+ if (psComputeContext->psBufferSyncContext != NULL)
+ {
+ pvr_buffer_sync_context_destroy(psComputeContext->psBufferSyncContext);
+ psComputeContext->psBufferSyncContext = NULL;
+ }
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext;
+ IMG_UINT32 ui32WorkEstCCBSubmitted;
+
+ eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc,
+ (void **)&psFWComputeContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to map firmware compute context (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ return eError;
+ }
+
+ ui32WorkEstCCBSubmitted = psFWComputeContext->ui32WorkEstCCBSubmitted;
+
+ DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc);
+
+ /* Check if all of the workload estimation CCB commands for this workload are read */
+ if (ui32WorkEstCCBSubmitted != psComputeContext->sWorkEstData.ui32WorkEstCCBReceived)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
+ __func__, ui32WorkEstCCBSubmitted,
+ psComputeContext->sWorkEstData.ui32WorkEstCCBReceived));
+
+ return PVRSRV_ERROR_RETRY;
+ }
+ }
+#endif
+
+ /* ... it has so we can free its resources */
+
+ OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+ dllist_remove_node(&(psComputeContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ WorkEstDeInitCompute(psDevInfo, &psComputeContext->sWorkEstData);
+ }
+#endif
+
+ SyncAddrListDeinit(&psComputeContext->sSyncAddrListFence);
+ SyncAddrListDeinit(&psComputeContext->sSyncAddrListUpdate);
+
+ FWCommonContextFree(psComputeContext->psServerCommonContext);
+ if (psComputeContext->psFWFrameworkMemDesc)
+ {
+ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+ }
+ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc);
+ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc);
+
+ OSLockDestroy(psComputeContext->hLock);
+ OSFreeMem(psComputeContext);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock,
+ IMG_UINT32 *paui32ClientUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE *piUpdateFence,
+ IMG_CHAR pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 *paui32SyncPMRFlags,
+ PMR **ppsSyncPMRs,
+ IMG_UINT32 ui32NumWorkgroups,
+ IMG_UINT32 ui32NumWorkitems,
+ IMG_UINT64 ui64DeadlineInus)
+{
+ RGXFWIF_KCCB_CMD sCmpKCCBCmd;
+ RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1];
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32CDMCmdOffset = 0;
+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->psServerCommonContext);
+ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext);
+ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+ IMG_UINT32 ui32FWCtx;
+ IMG_BOOL bCCBStateOpen = IMG_FALSE;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataCompute = {0};
+ IMG_UINT32 ui32CDMWorkloadDataRO = 0;
+ IMG_UINT32 ui32CDMCmdHeaderOffset = 0;
+ IMG_UINT32 ui32CDMCmdOffsetWrapCheck = 0;
+ RGX_WORKLOAD sWorkloadCharacteristics = {0};
+#endif
+
+ IMG_UINT64 ui64FBSCEntryMask = 0;
+ IMG_UINT32 ui32IntClientFenceCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+ IMG_UINT32 ui32IntClientUpdateCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+ IMG_UINT32 *paui32IntUpdateValue = NULL;
+ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
+ IMG_UINT64 uiCheckFenceUID = 0;
+ IMG_UINT64 uiUpdateFenceUID = 0;
+ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+ IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+ void *pvUpdateFenceFinaliseData = NULL;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+ CMD_COMMON *psComputeCmdCmn = IMG_OFFSET_ADDR(pui8DMCmd, 0);
+
+ if (iUpdateTimeline >= 0 && !piUpdateFence)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Ensure we haven't been given a null ptr to
+ * update values if we have been told we
+ * have updates
+ */
+ if (ui32ClientUpdateCount > 0)
+ {
+ PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL,
+ "paui32ClientUpdateValue NULL but "
+ "ui32ClientUpdateCount > 0",
+ PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ pszUpdateFenceName[31] = '\0';
+
+ OSLockAcquire(psComputeContext->hLock);
+
+ eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence,
+ 0,
+ NULL,
+ NULL);
+ if (eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+
+ ui32IntClientUpdateCount = ui32ClientUpdateCount;
+
+ eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFODevVarBlock,
+ paui32ClientUpdateSyncOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+ if (ui32IntClientUpdateCount)
+ {
+ pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ paui32IntUpdateValue = paui32ClientUpdateValue;
+
+ if (ui32SyncPMRCount != 0)
+ {
+#if defined(SUPPORT_BUFFER_SYNC)
+ int err;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling "
+ "pvr_buffer_sync_resolve_and_create_fences", __func__));
+
+ err = pvr_buffer_sync_resolve_and_create_fences(
+ psComputeContext->psBufferSyncContext,
+ psComputeContext->psDeviceNode->hSyncCheckpointContext,
+ ui32SyncPMRCount,
+ ppsSyncPMRs,
+ paui32SyncPMRFlags,
+ &ui32BufferFenceSyncCheckpointCount,
+ &apsBufferFenceSyncCheckpoints,
+ &psBufferUpdateSyncCheckpoint,
+ &psBufferSyncData
+ );
+
+ if (unlikely(err))
+ {
+ switch (err)
+ {
+ case -EINTR:
+ eError = PVRSRV_ERROR_RETRY;
+ break;
+ case -ENOMEM:
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ break;
+ default:
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ break;
+ }
+
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: "
+ "pvr_buffer_sync_resolve_and_create_fences failed (%s)",
+ __func__, PVRSRVGetErrorString(eError)));
+ }
+
+ goto fail_resolve_input_fence;
+ }
+
+ /* Append buffer sync fences */
+ if (ui32BufferFenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints "
+ "to CDM Fence (&psTransferContext->sSyncAddrListFence=<%p>, "
+ "pauiIntFenceUFOAddress=<%p>)...", __func__,
+ ui32BufferFenceSyncCheckpointCount,
+ (void *) &psComputeContext->sSyncAddrListFence ,
+ (void *) pauiIntFenceUFOAddress));
+
+ SyncAddrListAppendAndDeRefCheckpoints(&psComputeContext->sSyncAddrListFence,
+ ui32BufferFenceSyncCheckpointCount,
+ apsBufferFenceSyncCheckpoints);
+ if (pauiIntFenceUFOAddress == NULL)
+ {
+ pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+ }
+ ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+ }
+
+ /* Append the update (from output fence) */
+ if (psBufferUpdateSyncCheckpoint)
+ {
+ SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate,
+ 1, &psBufferUpdateSyncCheckpoint);
+ if (pauiIntUpdateUFOAddress == NULL)
+ {
+ pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ ui32IntClientUpdateCount++;
+ }
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers",
+ __func__, ui32SyncPMRCount));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_populate_sync_addr_list;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+ /* Resolve the sync checkpoints that make up the input fence */
+ eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext,
+ iCheckFence,
+ &ui32FenceSyncCheckpointCount,
+ &apsFenceSyncCheckpoints,
+ &uiCheckFenceUID, ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (%s)", __func__, PVRSRVGetErrorString(eError)));
+ goto fail_free_buffer_sync_data;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ IMG_UINT32 ii;
+ for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+ {
+ PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint));
+ }
+ }
+#endif
+ /* Create the output fence (if required) */
+ if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+ eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode,
+ pszUpdateFenceName,
+ iUpdateTimeline,
+ psComputeContext->psDeviceNode->hSyncCheckpointContext,
+ &iUpdateFence,
+ &uiUpdateFenceUID,
+ &pvUpdateFenceFinaliseData,
+ &psUpdateSyncCheckpoint,
+ (void*)&psFenceTimelineUpdateSync,
+ &ui32FenceTimelineUpdateValue,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%s)", __func__, PVRSRVGetErrorString(eError)));
+ goto fail_create_output_fence;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync));
+ /* Append the sync prim update for the timeline (if required) */
+ if (psFenceTimelineUpdateSync)
+ {
+ IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+ /* Allocate memory to hold the list of update values (including our timeline update) */
+ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ if (!pui32IntAllocatedUpdateValues)
+ {
+ /* Failed to allocate memory */
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_update_values_mem;
+ }
+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ /* Copy the update values into the new memory, then append our timeline update value */
+ if (paui32IntUpdateValue)
+ {
+ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+ }
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32IntClientUpdateCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount));
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Now set the additional update value */
+ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+ ui32IntClientUpdateCount++;
+ /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */
+ paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__, (void*)psFenceTimelineUpdateSync));
+ /* Now append the timeline sync prim addr to the compute context update list */
+ SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate,
+ psFenceTimelineUpdateSync);
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32IntClientUpdateCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount));
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+ paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+ }
+ }
+
+ /* Append the checks (from input fence) */
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence));
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32IntClientUpdateCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (!pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+ }
+ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+ }
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32IntClientUpdateCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Dumping %d update values (paui32IntUpdateValue=<%p>)...", __func__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue));
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: paui32IntUpdateValue[%d] = <%p>", __func__, iii, (void*)pui32Tmp));
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __func__, iii, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+
+ if (psUpdateSyncCheckpoint)
+ {
+ /* Append the update (from output fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint));
+ SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate,
+ 1,
+ &psUpdateSyncCheckpoint);
+ if (!pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ ui32IntClientUpdateCount++;
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32IntClientUpdateCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount));
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+
+#if (ENABLE_CMP_UFO_DUMP == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: dumping Compute (CDM) fence/updates syncs...", __func__));
+ {
+ IMG_UINT32 ii;
+ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+ /* Dump Fence syncs and Update syncs */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) fence syncs (&psComputeContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psComputeContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+ for (ii=0; ii<ui32IntClientFenceCount; ii++)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+ psTmpIntFenceUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) update syncs (&psComputeContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psComputeContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+ for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+ {
+ if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+ pui32TmpIntUpdateValue++;
+ }
+ psTmpIntUpdateUFOAddress++;
+ }
+ }
+#endif
+
+#if defined(RGX_FBSC_INVALIDATE_COMMAND_SUPPORTED)
+ /*
+ * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command,
+ * in other words, take the value and set it to zero afterwards.
+ * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts
+ * as it must be ready at the time of context activation.
+ */
+ {
+ eError = RGXExtractFBSCEntryMaskFromMMUContext(psComputeContext->psDeviceNode,
+ FWCommonContextGetServerMMUCtx(psComputeContext->psServerCommonContext),
+ &ui64FBSCEntryMask);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%s)", PVRSRVGetErrorString(eError)));
+ goto fail_cmdinvalfbsc;
+ }
+ }
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ sWorkloadCharacteristics.sCompute.ui32NumberOfWorkgroups = ui32NumWorkgroups;
+ sWorkloadCharacteristics.sCompute.ui32NumberOfWorkitems = ui32NumWorkitems;
+
+ /* Prepare workload estimation */
+ WorkEstPrepare(psComputeContext->psDeviceNode->pvDevice,
+ &psComputeContext->sWorkEstData,
+ &psComputeContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM,
+ RGXFWIF_CCB_CMD_TYPE_CDM,
+ &sWorkloadCharacteristics,
+ ui64DeadlineInus,
+ &sWorkloadKickDataCompute);
+ }
+#endif
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psComputeContext->psDeviceNode->pvDevice,
+ &pPreAddr,
+ &pPostAddr,
+ &pRMWUFOAddr);
+
+ RGXCmdHelperInitCmdCCB(psDevInfo,
+ psClientCCB,
+ ui64FBSCEntryMask,
+ ui32IntClientFenceCount,
+ pauiIntFenceUFOAddress,
+ NULL,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ ui32CmdSize,
+ pui8DMCmd,
+ &pPreAddr,
+ &pPostAddr,
+ &pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_CDM,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ &sWorkloadKickDataCompute,
+#else
+ NULL,
+#endif
+ "Compute",
+ bCCBStateOpen,
+ asCmdHelperData);
+
+ eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_cmdaquire;
+ }
+
+
+ /*
+ We should reserve space in the kernel CCB here and fill in the command
+ directly.
+ This is so if there isn't space in the kernel CCB we can return with
+ retry back to services client before we take any operations
+ */
+
+ /*
+ We might only be kicking for flush out a padding packet so only submit
+ the command if the create was successful
+ */
+
+ /*
+ All the required resources are ready at this point, we can't fail so
+ take the required server sync operations and commit all the resources
+ */
+
+ ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB);
+ RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* The following is used to determine the offset of the command header containing
+ the workload estimation data so that can be accessed when the KCCB is read */
+ ui32CDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData);
+
+ ui32CDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext));
+
+ /* This checks if the command would wrap around at the end of the CCB and
+ * therefore would start at an offset of 0 rather than the current command
+ * offset */
+ if (ui32CDMCmdOffset < ui32CDMCmdOffsetWrapCheck)
+ {
+ ui32CDMWorkloadDataRO = ui32CDMCmdOffset;
+ }
+ else
+ {
+ ui32CDMWorkloadDataRO = 0;
+ }
+ }
+#endif
+
+ /* Construct the kernel compute CCB command. */
+ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ /* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Store the offset to the CCCB command header so that it can be referenced
+ * when the KCCB command reaches the FW */
+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32CDMWorkloadDataRO + ui32CDMCmdHeaderOffset;
+ }
+#endif
+
+ ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr;
+
+ if (psComputeCmdCmn)
+ {
+ HTBLOGK(HTB_SF_MAIN_KICK_CDM,
+ sCmpKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui32CDMCmdOffset,
+ psComputeCmdCmn->ui32FrameNum,
+ ui32ExtJobRef,
+ ui32IntJobRef);
+ }
+
+ RGXSRV_HWPERF_ENQ(psComputeContext,
+ OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE2_CDM,
+ iCheckFence,
+ iUpdateFence,
+ iUpdateTimeline,
+ uiCheckFenceUID,
+ uiUpdateFenceUID,
+ NO_DEADLINE,
+ NO_CYCEST);
+
+ /*
+ * Submit the compute command to the firmware.
+ */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_CDM,
+ &sCmpKCCBCmd,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s failed to schedule kernel CCB command (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ goto fail_schedulecmd;
+ }
+ else
+ {
+ PVRGpuTraceEnqueueEvent(psComputeContext->psDeviceNode,
+ ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE2_CDM);
+ }
+
+#if defined(NO_HARDWARE)
+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+ if (psUpdateSyncCheckpoint)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+ }
+ if (psFenceTimelineUpdateSync)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+ }
+ SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined(NO_HARDWARE) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psBufferSyncData)
+ {
+ pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+ }
+ if (apsBufferFenceSyncCheckpoints)
+ {
+ kfree(apsBufferFenceSyncCheckpoints);
+ }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+ *piUpdateFence = iUpdateFence;
+
+ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+ {
+ SyncCheckpointFinaliseFence(psComputeContext->psDeviceNode, iUpdateFence,
+ pvUpdateFenceFinaliseData,
+ psUpdateSyncCheckpoint, pszUpdateFenceName);
+ }
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ pui32IntAllocatedUpdateValues = NULL;
+ }
+
+ OSLockRelease(psComputeContext->hLock);
+
+ return PVRSRV_OK;
+
+fail_schedulecmd:
+fail_cmdaquire:
+#if defined(RGX_FBSC_INVALIDATE_COMMAND_SUPPORTED)
+fail_cmdinvalfbsc:
+#endif
+ SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence);
+ SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+ if (iUpdateFence != PVRSRV_NO_FENCE)
+ {
+ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+fail_create_output_fence:
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+
+fail_free_buffer_sync_data:
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psBufferSyncData)
+ {
+ pvr_buffer_sync_kick_failed(psBufferSyncData);
+ }
+ if (apsBufferFenceSyncCheckpoints)
+ {
+ kfree(apsBufferFenceSyncCheckpoints);
+ }
+
+fail_resolve_input_fence:
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+err_populate_sync_addr_list:
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ pui32IntAllocatedUpdateValues = NULL;
+ }
+ OSLockRelease(psComputeContext->hLock);
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+ RGXFWIF_KCCB_CMD sFlushCmd;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32kCCBCommandSlot;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(psComputeContext->psDeviceNode,
+ PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush");
+#endif
+ sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+ sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE;
+ sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE;
+ sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+ OSLockAcquire(psComputeContext->hLock);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
+ RGXFWIF_DM_CDM,
+ &sFlushCmd,
+ PDUMP_FLAGS_CONTINUOUS,
+ &ui32kCCBCommandSlot);
+ /* Iterate if we hit a PVRSRV_ERROR_KERNEL_CCB_FULL error */
+ if ((eError != PVRSRV_ERROR_RETRY) &&
+ (eError != PVRSRV_ERROR_KERNEL_CCB_FULL))
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ /* If we hit a temporary KCCB exhaustion, return a RETRY to caller */
+ if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Returning RETRY to caller", __func__));
+ eError = PVRSRV_ERROR_RETRY;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to schedule SLC flush command (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ }
+ }
+ else
+ {
+ /* Wait for the SLC flush to complete */
+ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Compute flush aborted (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ }
+ else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] &
+ RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__));
+ }
+ }
+
+ OSLockRelease(psComputeContext->hLock);
+ return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) &&
+ 2 == RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT))
+ {
+
+ RGXFWIF_KCCB_CMD sKCCBCmd;
+ PVRSRV_ERROR eError;
+
+ OSLockAcquire(psComputeContext->hLock);
+
+ /* Schedule the firmware command */
+ sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+ sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_CDM,
+ &sKCCBCmd,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to schedule the FW command %d (%s)",
+ __func__,
+ eError,
+ PVRSRVGetErrorString(eError)));
+ }
+
+ OSLockRelease(psComputeContext->hLock);
+ return eError;
+ }
+ else
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_INT32 i32Priority)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+ OSLockAcquire(psComputeContext->hLock);
+
+ eError = ContextSetPriority(psComputeContext->psServerCommonContext,
+ psConnection,
+ psComputeContext->psDeviceNode->pvDevice,
+ i32Priority,
+ RGXFWIF_DM_CDM);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __func__, PVRSRVGetErrorString(eError)));
+ }
+
+ OSLockRelease(psComputeContext->hLock);
+ return eError;
+}
+
+/*
+ * PVRSRVRGXSetComputeContextPropertyKM
+ */
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ RGX_CONTEXT_PROPERTY eContextProperty,
+ IMG_UINT64 ui64Input,
+ IMG_UINT64 *pui64Output)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ switch (eContextProperty)
+ {
+ case RGX_CONTEXT_PROPERTY_FLAGS:
+ {
+ IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input;
+
+ OSLockAcquire(psComputeContext->hLock);
+ eError = FWCommonContextSetFlags(psComputeContext->psServerCommonContext,
+ ui32ContextFlags);
+ OSLockRelease(psComputeContext->hLock);
+ break;
+ }
+
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty));
+ eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+ }
+
+ return eError;
+}
+
+void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ IMG_UINT32 ui32VerbLevel)
+{
+ DLLIST_NODE *psNode, *psNext;
+ OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+ dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+ DumpFWCommonContextInfo(psCurrentServerComputeCtx->psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+ }
+ OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 ui32ContextBitMask = 0;
+ DLLIST_NODE *psNode, *psNext;
+ OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+ dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+
+ if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->psServerCommonContext, RGX_KICK_TYPE_DM_CDM)
+ == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM;
+ }
+ }
+ OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+ return ui32ContextBitMask;
+}
+
+/*
+ * PVRSRVRGXGetLastDeviceErrorKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *ui32Error)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ *ui32Error = psDevInfo->eLastDeviceError;
+ psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_NONE;
+ return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXKickTimestampQueryKM
+ */
+PVRSRV_ERROR PVRSRVRGXKickTimestampQueryKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ PVRSRV_FENCE iCheckFence,
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32ExtJobRef)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->psServerCommonContext);
+ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext);
+ RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1];
+ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+ IMG_UINT32 ui32PDumpFlags = 0;
+ IMG_UINT64 uiCheckFenceUID = 0;
+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+ RGXFWIF_KCCB_CMD sCmpKCCBCmd;
+ PVRSRV_ERROR eError;
+
+ OSLockAcquire(psComputeContext->hLock);
+
+ if (iCheckFence != PVRSRV_NO_FENCE)
+ {
+
+ eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence,
+ 0,
+ NULL,
+ NULL);
+ if (eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+ /* Resolve the sync checkpoints that make up the input fence */
+ eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext,
+ iCheckFence,
+ &ui32FenceSyncCheckpointCount,
+ &apsFenceSyncCheckpoints,
+ &uiCheckFenceUID, ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (%s)", __func__, PVRSRVGetErrorString(eError)));
+ goto fail_resolve_fence;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ IMG_UINT32 ii;
+ for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+ {
+ PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint));
+ }
+ }
+#endif
+ /* Append the checks (from input fence) */
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence));
+ eError = SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (%s)", __func__, PVRSRVGetErrorString(eError)));
+ goto fail_append_checkpoints;
+ }
+ }
+ }
+
+ RGXCmdHelperInitCmdCCB(psDevInfo,
+ psClientCCB,
+ 0, /* empty ui64FBSCEntryMask */
+ ui32FenceSyncCheckpointCount,
+ psComputeContext->sSyncAddrListFence.pasFWAddrs,
+ NULL,
+ 0,
+ NULL,
+ NULL,
+ ui32CmdSize,
+ pui8DMCmd,
+ NULL,
+ NULL,
+ NULL,
+ RGXFWIF_CCB_CMD_TYPE_VK_TIMESTAMP,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ PDUMP_FLAGS_NONE,
+ NULL,
+ "VkTimestamp",
+ IMG_FALSE, /* bCCBStateOpen */
+ asCmdHelperData);
+
+ eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData);
+
+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXCmdHelperAcquireCmdCCB", fail_cmdaquire);
+
+ RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM",
+ FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr);
+
+ /* Construct the kernel compute CCB command. */
+ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ /*
+ * Submit the RGXFWIF_CCB_CMD_TYPE_VK_TIMESTAMP
+ * command to the firmware.
+ */
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_CDM,
+ &sCmpKCCBCmd,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommand", fail_cmdaquire);
+
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ /* Free the memory that was allocated for the sync checkpoint list returned
+ * by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+
+ OSLockRelease(psComputeContext->hLock);
+ return PVRSRV_OK;
+
+fail_cmdaquire:
+ SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode,
+ &psComputeContext->sSyncAddrListFence);
+
+fail_append_checkpoints:
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ /* Free memory allocated to hold the resolved fence's checkpoints */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+fail_resolve_fence:
+err_populate_sync_addr_list:
+ OSLockRelease(psComputeContext->hLock);
+ return eError;
+}
+
+/******************************************************************************
+ End of file (rgxcompute.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title RGX compute functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX compute functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(RGXCOMPUTE_H)
+#define RGXCOMPUTE_H
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+
+
+typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT;
+
+/*************************************************************************/ /*!
+@Function PVRSRVRGXCreateComputeContextKM
+@Description Creates an RGX device context for submitting commands to CDM.
+@Input psConnection Device connection
+@Input psDeviceNode Services-managed device
+@Input i32Priority Scheduling priority for commands
+ on this context
+@Input ui32FrameworkCommandSize
+ Framework command size
+@Input pabyFrameworkCommand Pointer to framework command
+@Input hMemCtxPrivData Private data
+@Input ui32StaticComputeContextStateSize
+ Size of fixed compute context state
+@Input pStaticComputeContextState
+ Compute context state
+@Input ui32PackedCCBSizeU88 Packed CCB size. The first byte contains
+ the log2 CCB size and the second byte
+ the log2 maximum CCB size.
+@Input ui32ContextFlags Flags with context properties
+@Input ui64RobustnessAddress Address for FW to signal a context reset
+@Input ui32MaxDeadlineMS Max deadline limit in MS that the
+ workload can run
+@Output ppsComputeContext Cleanup data
+@Return PVRSRV_ERROR Returns PVRSRV_OK or an error.
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_INT32 i32Priority,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ IMG_UINT32 ui32StaticComputeContextStateSize,
+ IMG_PBYTE pStaticComputeContextState,
+ IMG_UINT32 ui32PackedCCBSizeU88,
+ IMG_UINT32 ui32ContextFlags,
+ IMG_UINT64 ui64RobustnessAddress,
+ IMG_UINT32 ui32MaxDeadlineMS,
+ RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXDestroyComputeContextKM
+
+ @Description
+ Server-side implementation of RGXDestroyComputeContext
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXKickCDMKM
+
+ @Description
+ Server-side implementation of RGXKickCDM
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock,
+ IMG_UINT32 *paui32ClientUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE *piUpdateFence,
+ IMG_CHAR pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 *paui32SyncPMRFlags,
+ PMR **ppsSyncPMRs,
+ IMG_UINT32 ui32NumWorkgroups,
+ IMG_UINT32 ui32NumWorkitems,
+ IMG_UINT64 ui64DeadlineInus);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXFlushComputeDataKM
+
+ @Description
+ Server-side implementation of RGXFlushComputeData
+
+ @Input psComputeContext - Compute context to flush
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXNotifyComputeWriteOffsetUpdateKM
+ @Description Server-side implementation of RGXNotifyComputeWriteOffsetUpdate
+
+ @Input psComputeContext - Compute context to flush
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_INT32 i32Priority);
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ RGX_CONTEXT_PROPERTY eContextProperty,
+ IMG_UINT64 ui64Input,
+ IMG_UINT64 *pui64Output);
+
+PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *ui32Error);
+
+PVRSRV_ERROR PVRSRVRGXKickTimestampQueryKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ PVRSRV_FENCE iCheckFence,
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32ExtJobRef);
+
+/* Debug - Dump debug info of compute contexts on this device */
+void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ IMG_UINT32 ui32VerbLevel);
+
+/* Debug/Watchdog - check if client compute contexts are stalled */
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* RGXCOMPUTE_H */
#include "mmu_common.h"
#include "devicemem_server.h"
#include "osfunc.h"
+#include "vmm_pvz_server.h"
+#include "vz_vm.h"
+#if defined(PDUMP)
+#include "devicemem_pdump.h"
+#endif
PVRSRV_ERROR
PVRSRVRGXFWDebugQueryFWLogKM(
}
PVRSRV_ERROR
-PVRSRVRGXFWDebugSetOSidPriorityKM(
+PVRSRVRGXFWDebugMapGuestHeapKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32DriverID,
+ IMG_UINT64 ui64GuestHeapBase)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32DeviceID = psDeviceNode->sDevId.ui32InternalID;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (PVRSRV_VZ_MODE_IS(HOST))
+ {
+ if (ui64GuestHeapBase == IMG_UINT64_MAX)
+ {
+ /* unmap heap and set DriverID to offline */
+ eError = PvzServerUnmapDevPhysHeap(ui32DriverID, ui32DeviceID);
+ eError = PvzServerOnVmOffline(ui32DriverID, ui32DeviceID);
+ }
+ else
+ {
+ /* set DriverID online if necessary and map firmware heap */
+ if (!IsVmOnline(ui32DriverID, ui32DeviceID))
+ {
+ eError = PvzServerOnVmOnline(ui32DriverID, ui32DeviceID);
+ }
+
+ eError = PvzServerMapDevPhysHeap(ui32DriverID, ui32DeviceID, RGX_FIRMWARE_RAW_HEAP_SIZE, ui64GuestHeapBase);
+ }
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ PVR_DPF((PVR_DBG_ERROR, " %s: Driver must run in Host mode to support Guest Mapping operations\n", __func__));
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetDriverPriorityKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32DriverID,
+ IMG_UINT32 ui32DriverPriority)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_KCCB_CMD sVzPriorityCmd = { 0 };
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+ if (psDevInfo->psRGXFWIfRuntimeCfg == NULL)
+ {
+ return PVRSRV_ERROR_NOT_INITIALISED;
+ }
+
+ if (ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ sVzPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE;
+ psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverPriority[ui32DriverID] = ui32DriverPriority;
+ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverPriority[ui32DriverID]);
+
+#if defined(PDUMP)
+ PDUMPCOMMENT(psDevInfo->psDeviceNode,
+ "Updating the priority of DriverID %u inside RGXFWIfRuntimeCfg", ui32DriverID);
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+ offsetof(RGXFWIF_RUNTIME_CFG, aui32DriverPriority) + (ui32DriverID * sizeof(ui32DriverPriority)),
+ ui32DriverPriority,
+ PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sVzPriorityCmd,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetDriverIsolationGroupKM(
CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32OSid,
- IMG_UINT32 ui32OSidPriority)
+ IMG_UINT32 ui32DriverID,
+ IMG_UINT32 ui32DriverIsolationGroup)
{
+ PVRSRV_ERROR eError;
PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_KCCB_CMD sVzIsolationGroupCmd = { 0 };
+
PVR_UNREFERENCED_PARAMETER(psConnection);
- return RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32OSidPriority);
+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+ if (psDevInfo->psRGXFWIfRuntimeCfg == NULL)
+ {
+ return PVRSRV_ERROR_NOT_INITIALISED;
+ }
+
+ if (ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ sVzIsolationGroupCmd.eCmdType = RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE;
+ psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID] = ui32DriverIsolationGroup;
+ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID]);
+
+#if defined(PDUMP)
+ PDUMPCOMMENT(psDevInfo->psDeviceNode,
+ "Updating the isolation group of DriverID%u inside RGXFWIfRuntimeCfg", ui32DriverID);
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+ offsetof(RGXFWIF_RUNTIME_CFG, aui32DriverIsolationGroup) + (ui32DriverID * sizeof(ui32DriverIsolationGroup)),
+ ui32DriverIsolationGroup,
+ PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sVzIsolationGroupCmd,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return eError;
}
PVRSRV_ERROR
PVRSRVRGXFWDebugSetOSNewOnlineStateKM(
CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32DriverID,
IMG_UINT32 ui32OSNewState)
{
PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
PVR_UNREFERENCED_PARAMETER(psConnection);
eState = (ui32OSNewState) ? (RGXFWIF_OS_ONLINE) : (RGXFWIF_OS_OFFLINE);
- return RGXFWSetFwOsState(psDevInfo, ui32OSid, eState);
+ return RGXFWSetFwOsState(psDevInfo, ui32DriverID, eState);
}
PVRSRV_ERROR
return PVRSRV_OK;
}
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugInjectFaultKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return RGXFWInjectFault(psDevInfo);
+}
IMG_UINT32 ui32HCSDeadlineMS);
PVRSRV_ERROR
-PVRSRVRGXFWDebugSetOSidPriorityKM(
+PVRSRVRGXFWDebugSetDriverPriorityKM(
CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32OSid,
- IMG_UINT32 ui32OSidPriority);
+ IMG_UINT32 ui32DriverID,
+ IMG_UINT32 ui32DriverPriority);
+
+PVRSRV_ERROR
+PVRSRVRGXFWDebugSetDriverIsolationGroupKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32DriverID,
+ IMG_UINT32 ui32DriverIsolationGroup);
PVRSRV_ERROR
PVRSRVRGXFWDebugSetOSNewOnlineStateKM(
CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32DriverID,
IMG_UINT32 ui32OSNewState);
+PVRSRV_ERROR
+PVRSRVRGXFWDebugMapGuestHeapKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32DriverID,
+ IMG_UINT64 ui64GuestHeapBase);
+
PVRSRV_ERROR
PVRSRVRGXFWDebugPHRConfigureKM(
CONNECTION_DATA *psConnection,
CONNECTION_DATA * psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR
+PVRSRVRGXFWDebugInjectFaultKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
#endif
* which should be extended when necessary. */
#include "rgxfwimageutils.h"
#include "pvrsrv.h"
+#include "pvrversion.h"
/************************************************************************
return PVRSRV_OK;
}
+ /* Add offset to pointer to FW allocation now that allocation is found */
+
/* Direct Mem write to mapped memory */
ui32OffsetIn -= asRGXFWLayoutTable[i].ui32BaseAddr;
ui32OffsetIn += asRGXFWLayoutTable[i].ui32AllocOffset;
- /* Add offset to pointer to FW allocation only if
- * that allocation is available
- */
- if (*uiHostAddrOut)
- {
- *(IMG_UINT8 **)uiHostAddrOut += ui32OffsetIn;
- }
+ *(IMG_UINT8 **)uiHostAddrOut += ui32OffsetIn;
return PVRSRV_OK;
}
return psEntry->ui32BaseAddr;
}
+static inline
+PVRSRV_ERROR RGXValidateFWHeaderVersion1(const void *hPrivate,
+ const RGX_FW_INFO_HEADER *psInfoHeader)
+{
+ /* Applicable to any FW_INFO_VERSION */
+ if (psInfoHeader->ui32LayoutEntrySize != sizeof(RGX_FW_LAYOUT_ENTRY))
+ {
+ RGXErrorLog(hPrivate, "%s: FW layout entry sizes mismatch (expected: %u, found: %u)",
+ __func__,
+ (IMG_UINT32) sizeof(RGX_FW_LAYOUT_ENTRY),
+ psInfoHeader->ui32LayoutEntrySize);
+ }
+
+ /* Applicable to any FW_INFO_VERSION */
+ if (psInfoHeader->ui32LayoutEntryNum > MAX_NUM_ENTRIES)
+ {
+ RGXErrorLog(hPrivate, "%s: Not enough storage for the FW layout table (max: %u entries, found: %u)",
+ __func__,
+ MAX_NUM_ENTRIES,
+ psInfoHeader->ui32LayoutEntryNum);
+ }
+
+#if defined(RGX_FEATURE_MIPS_BIT_MASK)
+ /* Applicable to any FW_INFO_VERSION */
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS))
+ {
+ if (psInfoHeader->ui32FwPageSize != RGXGetOSPageSize(hPrivate))
+ {
+ RGXErrorLog(hPrivate, "%s: FW page size mismatch (expected: %u, found: %u)",
+ __func__,
+ (IMG_UINT32) RGXGetOSPageSize(hPrivate),
+ psInfoHeader->ui32FwPageSize);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+#endif
+
+ if (psInfoHeader->ui32InfoVersion != FW_INFO_VERSION)
+ {
+ /* Not an error because RGX_FW_INFO_HEADER is now versioned. It can grow
+ * incrementally and it must be backwards compatible.
+ */
+ RGXCommentLog(hPrivate, "%s: FW info version mismatch (expected: %u, found: %u)",
+ __func__,
+ (IMG_UINT32) FW_INFO_VERSION,
+ psInfoHeader->ui32InfoVersion);
+ goto exit_version1_validation;
+ }
+
+ if (psInfoHeader->ui32HeaderLen != sizeof(RGX_FW_INFO_HEADER))
+ {
+ RGXErrorLog(hPrivate, "%s: FW info header sizes mismatch (expected: %u, found: %u)",
+ __func__,
+ (IMG_UINT32) sizeof(RGX_FW_INFO_HEADER),
+ psInfoHeader->ui32HeaderLen);
+ }
+
+exit_version1_validation:
+ return PVRSRV_OK;
+}
+
+static inline
+PVRSRV_ERROR RGXValidateFWHeaderVersion2(const void *hPrivate,
+ const RGX_FW_INFO_HEADER *psInfoHeader)
+{
+ if (psInfoHeader->ui16PVRVersionMajor != PVRVERSION_MAJ ||
+ psInfoHeader->ui16PVRVersionMinor != PVRVERSION_MIN)
+ {
+ RGXErrorLog(hPrivate, "%s: KM and FW version mismatch (expected: %u.%u, found: %u.%u)",
+ __func__,
+ PVRVERSION_MAJ,
+ PVRVERSION_MIN,
+ psInfoHeader->ui16PVRVersionMajor,
+ psInfoHeader->ui16PVRVersionMinor);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return PVRSRV_OK;
+}
+
+static inline
+PVRSRV_ERROR RGXValidateFWHeaderVersion(const void *hPrivate,
+ const RGX_FW_INFO_HEADER *psInfoHeader)
+{
+ PVRSRV_ERROR eError;
+
+ switch (psInfoHeader->ui32InfoVersion)
+ {
+ default:
+ __fallthrough;
+ case 2:
+ eError = RGXValidateFWHeaderVersion2(hPrivate, psInfoHeader);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ __fallthrough;
+ case 1:
+ eError = RGXValidateFWHeaderVersion1(hPrivate, psInfoHeader);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ break;
+ case 0:
+ RGXErrorLog(hPrivate, "%s: invalid FW_INFO_VERSION", __func__);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return PVRSRV_OK;
+}
+
PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate,
const IMG_BYTE *pbRGXFirmware,
const IMG_UINT32 ui32RGXFirmwareSize,
RGX_FW_INFO_HEADER *psInfoHeader;
const IMG_BYTE *pbRGXFirmwareInfo;
const IMG_BYTE *pbRGXFirmwareLayout;
+ PVRSRV_ERROR eError;
IMG_UINT32 i;
if (pbRGXFirmware == NULL || ui32RGXFirmwareSize == 0 || ui32RGXFirmwareSize <= FW_BLOCK_SIZE)
return PVRSRV_ERROR_INVALID_PARAMS;
}
-
/*
* Acquire pointer to the FW info header within the FW image.
* The format of the header in the FW image might not be the one expected
pbRGXFirmwareInfo = pbRGXFirmware + ui32RGXFirmwareSize - FW_BLOCK_SIZE;
psInfoHeader = (RGX_FW_INFO_HEADER*)pbRGXFirmwareInfo;
- /* If any of the following checks fails, the FW will likely not work properly */
-
- if (psInfoHeader->ui32InfoVersion != FW_INFO_VERSION)
- {
- RGXErrorLog(hPrivate, "%s: FW info version mismatch (expected: %u, found: %u)",
- __func__,
- (IMG_UINT32) FW_INFO_VERSION,
- psInfoHeader->ui32InfoVersion);
- }
-
- if (psInfoHeader->ui32HeaderLen != sizeof(RGX_FW_INFO_HEADER))
- {
- RGXErrorLog(hPrivate, "%s: FW info header sizes mismatch (expected: %u, found: %u)",
- __func__,
- (IMG_UINT32) sizeof(RGX_FW_INFO_HEADER),
- psInfoHeader->ui32HeaderLen);
- }
-
- if (psInfoHeader->ui32LayoutEntrySize != sizeof(RGX_FW_LAYOUT_ENTRY))
- {
- RGXErrorLog(hPrivate, "%s: FW layout entry sizes mismatch (expected: %u, found: %u)",
- __func__,
- (IMG_UINT32) sizeof(RGX_FW_LAYOUT_ENTRY),
- psInfoHeader->ui32LayoutEntrySize);
- }
-
- if (psInfoHeader->ui32LayoutEntryNum > MAX_NUM_ENTRIES)
- {
- RGXErrorLog(hPrivate, "%s: Not enough storage for the FW layout table (max: %u entries, found: %u)",
- __func__,
- MAX_NUM_ENTRIES,
- psInfoHeader->ui32LayoutEntryNum);
- }
-
-#if defined(RGX_FEATURE_MIPS_BIT_MASK)
- if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS))
+ eError = RGXValidateFWHeaderVersion(hPrivate, psInfoHeader);
+ if (eError != PVRSRV_OK)
{
- if (psInfoHeader->ui32FwPageSize != RGXGetOSPageSize(hPrivate))
- {
- RGXErrorLog(hPrivate, "%s: FW page size mismatch (expected: %u, found: %u)",
- __func__,
- (IMG_UINT32) RGXGetOSPageSize(hPrivate),
- psInfoHeader->ui32FwPageSize);
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
+ return eError;
}
-#endif
ui32LayoutEntryNum = psInfoHeader->ui32LayoutEntryNum;
/* Currently supported by default */
-#if defined(SUPPORT_TL_PRODUCER_CALLBACK)
+#if !defined(NO_HARDWARE) && defined(SUPPORT_TL_PRODUCER_CALLBACK)
static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream,
IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser)
{
/* avoid uninitialised data */
psRgxDevInfo->hHWPerfStream = (IMG_HANDLE) NULL;
psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL;
+ psRgxDevInfo->bHWPerfHasRun = IMG_FALSE;
PVR_DPF_RETURN_OK;
}
@Description This function allocates the HWperf firmware buffer (L1 buffer)
and host driver TL buffer (L2 buffer) if HWPerf is enabled at
driver load time. Otherwise, these buffers are allocated
- on-demand as and when required. Caller
- must possess hHWPerfLock lock before calling this
- function so the state tested is not inconsistent if called
- outside of driver initialisation.
+ on-demand as and when required. Caller must possess hHWPerfLock
+ lock before calling this function so the state tested is not
+ inconsistent if called outside of driver initialisation.
@Input psRgxDevInfo RGX Device Info, on which init is done
PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo)
{
IMG_HANDLE hStream = NULL; /* Init required for noHW */
- PVRSRV_ERROR eError;
- IMG_UINT32 ui32L2BufferSize = 0;
PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags;
- IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; /* 5 seems reasonable as it can hold
- names up to "hwperf_9999", which is enough */
+ PVRSRV_ERROR eError;
PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
/* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence,
* L2 buffer is not allocated */
#if !defined(NO_HARDWARE)
- /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer
- * accessed by the FW. The MISR may try to write one packet the size of the L1
- * buffer in some scenarios. When logging is enabled in the MISR, it can be seen
- * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers
- * are the more chance of this happening.
- * Size chosen to allow MISR to write an L1 sized packet and for the client
- * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1.
- */
- ui32L2BufferSize = psRgxDevInfo->ui32RGXFWIfHWPerfBufSize +
- (psRgxDevInfo->ui32RGXFWIfHWPerfBufSize>>1);
-
- /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
- if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d",
- PVRSRV_TL_HWPERF_RGX_FW_STREAM,
- psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0)
{
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to form HWPerf stream name for device %d",
- __func__,
- psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID));
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
+ TL_STREAM_INFO sTLStreamInfo;
+ IMG_UINT32 ui32L2BufferSize;
+ IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5];
+ /* + 5 is used to allow names up to "hwperf_9999", which is enough */
+
+ /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer
+ * accessed by the FW. The MISR may try to write one packet the size of the L1
+ * buffer in some scenarios. When logging is enabled in the MISR, it can be seen
+ * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers
+ * are the more chance of this happening.
+ * Size chosen to allow MISR to write an L1 sized packet and for the client
+ * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1.
+ */
+ ui32L2BufferSize = psRgxDevInfo->ui32RGXFWIfHWPerfBufSize +
+ (psRgxDevInfo->ui32RGXFWIfHWPerfBufSize>>1);
- eError = TLStreamCreate(&hStream,
- pszHWPerfStreamName,
- ui32L2BufferSize,
- TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT,
- _HWPerfFWOnReaderOpenCB, psRgxDevInfo,
+ /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+ if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d",
+ PVRSRV_TL_HWPERF_RGX_FW_STREAM,
+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to form HWPerf stream name for device %d",
+ __func__,
+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = TLStreamCreate(&hStream,
+ pszHWPerfStreamName,
+ ui32L2BufferSize,
+ TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT,
+ _HWPerfFWOnReaderOpenCB, psRgxDevInfo,
#if !defined(SUPPORT_TL_PRODUCER_CALLBACK)
- NULL, NULL
+ NULL, NULL
#else
- /* Not enabled by default */
- RGXHWPerfTLCB, psRgxDevInfo
+ /* Not enabled by default */
+ RGXHWPerfTLCB, psRgxDevInfo
#endif
- );
- PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate", e1);
+ );
+ PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate", e1);
- eError = TLStreamSetNotifStream(hStream,
- PVRSRVGetPVRSRVData()->hTLCtrlStream);
- /* we can still discover host stream so leave it as is and just log error */
- PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream");
+ eError = TLStreamSetNotifStream(hStream,
+ PVRSRVGetPVRSRVData()->hTLCtrlStream);
+ /* we can still discover host stream so leave it as is and just log error */
+ PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream");
- /* send the event here because host stream is implicitly opened for write
- * in TLStreamCreate and TLStreamOpen is never called (so the event is
- * never emitted) */
- TLStreamMarkStreamOpen(hStream);
-
- {
- TL_STREAM_INFO sTLStreamInfo;
+ /* send the event here because host stream is implicitly opened for write
+ * in TLStreamCreate and TLStreamOpen is never called (so the event is
+ * never emitted) */
+ TLStreamMarkStreamOpen(hStream);
TLStreamInfo(hStream, &sTLStreamInfo);
psRgxDevInfo->ui32L2BufMaxPacketSize = sTLStreamInfo.maxTLpacketSize;
psRgxDevInfo->bSuspendHWPerfL2DataCopy = IMG_FALSE;
- }
-
- PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d L2: %d",
- psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, ui32L2BufferSize));
-#else /* defined(NO_HARDWARE) */
- PVR_UNREFERENCED_PARAMETER(ui32L2BufferSize);
- PVR_UNREFERENCED_PARAMETER(RGXHWPerfTLCB);
- PVR_UNREFERENCED_PARAMETER(pszHWPerfStreamName);
- ui32L2BufferSize = 0;
+ PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d L2: %d",
+ psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, ui32L2BufferSize));
+ }
#endif
psRgxDevInfo->hHWPerfStream = hStream;
IMG_BOOL bToggle,
IMG_UINT64 ui64Mask)
{
- PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_ERROR eError;
PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice;
RGXFWIF_KCCB_CMD sKccbCmd;
IMG_UINT32 ui32kCCBCommandSlot;
if (!psDevice->bFirmwareInitialised)
{
- psDevice->ui64HWPerfFilter = ui64Mask; // at least set filter
- eError = PVRSRV_ERROR_NOT_INITIALISED;
-
- PVR_DPF((PVR_DBG_ERROR,
- "HWPerf has NOT been initialised yet. Mask has been SET to "
- "(%" IMG_UINT64_FMTSPECx ")",
- ui64Mask));
-
- goto unlock_and_return;
+ /* No other initialisation can be done at this point until the FW is
+ * initialised so unlock, log and return Ok so the caller knows
+ * the filter was set. */
+ psDevice->ui64HWPerfFilter = ui64Mask;
+ /* If the firmware has not been initialised treat this as if the HWPerf
+ * has run. If the next call to enable HWPerf is from FTrace module
+ * it will make it skip the first check for ordinals inconsistency. It's
+ * not ideal, but it's not bad either since we know that the ordinals
+ * are correct. */
+ psDevice->bHWPerfHasRun = IMG_TRUE;
+ OSLockRelease(psDevice->hHWPerfLock);
+ goto done_;
}
if (RGXHWPerfIsInitRequired(psDevice))
}
}
-#if defined(RGX_FEATURE_HWPERF_VOLCANIC) && defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
- if (RGXPowmonBufferIsInitRequired(psDeviceNode->pvDevice))
- {
- /* Allocate power monitoring log buffer if enabled */
- eError = RGXPowmonBufferInitOnDemandResources(psDeviceNode->pvDevice);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand power monitoring "
- "resources failed", __func__));
- goto unlock_and_return;
- }
- }
-#endif
-
/* Unlock here as no further HWPerf resources are used below that would be
* affected if freed by another thread */
OSLockRelease(psDevice->hHWPerfLock);
/* Return if the filter is the same */
if (!bToggle && psDevice->ui64HWPerfFilter == ui64Mask)
- goto return_;
+ {
+ goto done_;
+ }
/* Prepare command parameters ... */
sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG;
eError = RGXScheduleCommandAndGetKCCBSlot(psDevice,
RGXFWIF_DM_GP,
&sKccbCmd,
- IMG_TRUE,
+ PDUMP_FLAGS_CONTINUOUS,
&ui32kCCBCommandSlot);
if (eError != PVRSRV_OK)
{
goto return_;
}
+ if (psDevice->ui64HWPerfFilter == 0U)
+ {
+ /* Set to notify FTrace module that HWPerf has been enabled at least
+ * once since the last time FTrace module was used. */
+ psDevice->bHWPerfHasRun = IMG_TRUE;
+ }
+
psDevice->ui64HWPerfFilter = bToggle ?
psDevice->ui64HWPerfFilter ^ ui64Mask : ui64Mask;
eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_);
+done_:
#if defined(DEBUG)
if (bToggle)
{
eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS);
PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread");
}
- else if (!(psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)))
+ else
{
eError = PVRSRVDestroyHWPerfHostThread();
PVR_LOG_IF_ERROR(eError, "PVRSRVDestroyHWPerfHostThread");
IMG_UINT64 ui64Value)
{
PVRSRV_ERROR eError;
- PVRSRV_RGXDEV_INFO *psDeviceInfo;
PVR_UNREFERENCED_PARAMETER(psPrivate);
PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL);
PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL);
- psDeviceInfo = psDeviceNode->pvDevice;
-
eError = RGXHWPerfCtrlFwBuffer(psDeviceNode, IMG_FALSE, ui64Value);
if (eError != PVRSRV_OK)
{
(void *) HWPERF_FILTER_OPENGL_IDX);
}
+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCountersKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ const IMG_UINT32 ui32BlockID,
+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters)
+{
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is invalid", PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_LOG_RETURN_IF_FALSE(psConfiguredCounters != NULL, "psConfiguredCounters is invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+ eError = RGXAcquireHWPerfCtlCPUAddr(psDeviceNode, &psHWPerfCtl);
+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXGetHWPerfCtl");
+
+ eError = PVRSRVRGXGetConfiguredHWPerfCounters(psDeviceNode,
+ psHWPerfCtl,
+ ui32BlockID,
+ psConfiguredCounters);
+ PVR_LOG_IF_ERROR(eError, "PVRSRVRGXGetConfiguredHWPerfCounters");
+
+ RGXReleaseHWPerfCtlCPUAddr(psDeviceNode);
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocksKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ const IMG_UINT32 ui32ArrayLen,
+ IMG_UINT32 *pui32BlockCount,
+ IMG_UINT32 *pui32EnabledBlockIDs)
+{
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl;
+ IMG_UINT32 *pui32BlockIDs = NULL;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is invalid", PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_LOG_RETURN_IF_FALSE(pui32BlockCount != NULL, "pui32BlockCount is invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+ eError = RGXAcquireHWPerfCtlCPUAddr(psDeviceNode, &psHWPerfCtl);
+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXGetHWPerfCtl");
+
+ if (pui32EnabledBlockIDs != NULL)
+ {
+ pui32BlockIDs = OSAllocMem(sizeof(IMG_UINT32) * ui32ArrayLen);
+ if (pui32BlockIDs == NULL)
+ {
+ PVR_LOG_GOTO_WITH_ERROR("OSAllocMem", eError, PVRSRV_ERROR_OUT_OF_MEMORY, Error);
+ }
+ }
+
+ eError = PVRSRVRGXGetEnabledHWPerfBlocks(psDeviceNode,
+ psHWPerfCtl,
+ ui32ArrayLen,
+ pui32BlockCount,
+ pui32BlockIDs);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRGXGetEnabledHWPerfBlocks", Error);
+
+ if (pui32EnabledBlockIDs != NULL)
+ {
+ IMG_UINT32 i;
+ if (*pui32BlockCount > ui32ArrayLen)
+ {
+ *pui32BlockCount = 0;
+ PVR_DPF((PVR_DBG_ERROR, "ui32ArrayLen less than the number of enabled blocks."));
+ PVR_LOG_GOTO_WITH_ERROR(__func__, eError, PVRSRV_ERROR_OUT_OF_MEMORY, Error);
+ }
+ else if (*pui32BlockCount < ui32ArrayLen)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "ui32ArrayLen greater than the number of enabled blocks."));
+ }
+
+ for (i = 0; i < *pui32BlockCount; i++)
+ {
+ pui32EnabledBlockIDs[i] = pui32BlockIDs[i];
+ }
+ }
+
+Error:
+ if (pui32BlockIDs != NULL)
+ {
+ OSFreeMem(pui32BlockIDs);
+ }
+
+ RGXReleaseHWPerfCtlCPUAddr(psDeviceNode);
+
+ return eError;
+}
+
static INLINE IMG_UINT32 _RGXHWPerfFixBufferSize(IMG_UINT32 ui32BufSizeKB)
{
if (ui32BufSizeKB > HWPERF_HOST_TL_STREAM_SIZE_MAX)
psDevice = (PVRSRV_RGXDEV_INFO*) pvArg;
+ /* Handle the case where we may be being called as part of a multi-device
+ * initialisation sequence. If the bDevInit2Done flag is not yet set we can
+ * perform no action for this device. Simply return.
+ */
+ if (!psDevice->bDevInit2Done)
+ {
+ return;
+ }
+
/* Handle the case where the RGX_HWPERF_HOST_INFO bit is set in the event filter
* before the host stream is opened for reading by a HWPerf client.
* Which can result in the host periodic thread sleeping for a long duration as TLStreamIsOpenForReading may return false. */
PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread");
}
+ RGXSRV_HWPERF_DEVICE_INFO_FEATURES(psDevice);
+
if (RGXHWPerfHostIsEventEnabled(psDevice, RGX_HWPERF_HOST_CLIENT_INFO))
{
// GCC throws -Werror=frame-larger-than error if the frame size is > 1024 bytes,
/* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */
if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d",
PVRSRV_TL_HWPERF_HOST_SERVER_STREAM,
- psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0)
+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0)
{
PVR_DPF((PVR_DBG_ERROR,
"%s: Failed to form HWPerf host stream name for device %d",
__func__,
- psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID));
+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID));
return PVRSRV_ERROR_INVALID_PARAMS;
}
_PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
}
-static inline RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS _ConvDeviceHealthStatus(PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus)
-{
- switch (eDeviceHealthStatus)
- {
- case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED;
- case PVRSRV_DEVICE_HEALTH_STATUS_OK: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK;
- case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING;
- case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD;
- case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT;
- default: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED;
- }
-}
-
-static inline RGX_HWPERF_HOST_DEVICE_HEALTH_REASON _ConvDeviceHealthReason(PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason)
-{
- switch (eDeviceHealthReason)
- {
- case PVRSRV_DEVICE_HEALTH_REASON_NONE: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE;
- case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED;
- case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING;
- case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS;
- case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT;
- case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED;
- case PVRSRV_DEVICE_HEALTH_REASON_IDLING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING;
- case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING;
- case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS:return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS;
- default: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED;
- }
-}
-
-static inline void _SetupHostDeviceInfoPacketData(RGX_HWPERF_DEV_INFO_EV eEvType,
- PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus,
- PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason,
- IMG_UINT8 *pui8Dest)
+static inline void _SetupHostDeviceInfoPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ RGX_HWPERF_DEV_INFO_EV eEvType,
+ RGX_HWPERF_HOST_DEV_INFO_DETAIL *puPacketData,
+ IMG_UINT8 *pui8Dest)
{
RGX_HWPERF_HOST_DEV_INFO_DATA *psData = (RGX_HWPERF_HOST_DEV_INFO_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR));
psData->eEvType = eEvType;
switch (eEvType)
{
case RGX_HWPERF_DEV_INFO_EV_HEALTH:
- psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthStatus = _ConvDeviceHealthStatus(eDeviceHealthStatus);
- psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthReason = _ConvDeviceHealthReason(eDeviceHealthReason);
+ if (puPacketData != NULL)
+ {
+ psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthStatus =
+ puPacketData->sDeviceStatus.eDeviceHealthStatus;
+ psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthReason =
+ puPacketData->sDeviceStatus.eDeviceHealthReason;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: puPacketData is invalid."));
+ }
+ break;
+ case RGX_HWPERF_DEV_INFO_EV_FEATURES:
+ {
+ PVRSRV_ERROR eError;
+ eError = RGXServerFeatureFlagsToHWPerfFlags(psRgxDevInfo,
+ &psData->uDevInfoDetail.sBVNC);
+ PVR_LOG_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags");
+ psData->uDevInfoDetail.sBVNC.ui32BvncKmFeatureFlags |=
+#if defined(RGX_FEATURE_HWPERF_ROGUE)
+ RGX_HWPERF_FEATURE_ROGUE_FLAG;
+#elif defined(RGX_FEATURE_HWPERF_VOLCANIC)
+ RGX_HWPERF_FEATURE_VOLCANIC_FLAG;
+#else
+ 0x0;
+#endif
+ }
break;
default:
// unknown type - this should never happen
case RGX_HWPERF_DEV_INFO_EV_HEALTH:
ui32Size += sizeof(((RGX_HWPERF_HOST_DEV_INFO_DATA*)0)->uDevInfoDetail.sDeviceStatus);
break;
+ case RGX_HWPERF_DEV_INFO_EV_FEATURES:
+ ui32Size += sizeof(((RGX_HWPERF_HOST_DEV_INFO_DATA*)0)->uDevInfoDetail.sBVNC);
+ break;
default:
// unknown type - this should never happen
PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type"));
}
void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
- RGX_HWPERF_DEV_INFO_EV eEvType,
- PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus,
- PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason)
+ RGX_HWPERF_DEV_INFO_EV eEvType,
+ RGX_HWPERF_HOST_DEV_INFO_DETAIL *puData)
{
IMG_UINT8 *pui8Dest;
IMG_UINT32 ui32Ordinal;
if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL)
{
_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_DEV_INFO, ui32Size, ui32Ordinal, ui64Timestamp);
- _SetupHostDeviceInfoPacketData(eEvType, eDeviceHealthStatus, eDeviceHealthReason, pui8Dest);
+ _SetupHostDeviceInfoPacketData(psRgxDevInfo, eEvType, puData, pui8Dest);
_CommitHWPerfStream(psRgxDevInfo, ui32Size);
}
}
static inline void _SetupHostInfoPacketData(RGX_HWPERF_INFO_EV eEvType,
- IMG_UINT32 ui32TotalMemoryUsage,
+ IMG_UINT64 ui64TotalMemoryUsage,
IMG_UINT32 ui32LivePids,
PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage,
IMG_UINT8 *pui8Dest)
switch (eEvType)
{
- case RGX_HWPERF_INFO_EV_MEM_USAGE:
- psData->uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage = ui32TotalMemoryUsage;
+ case RGX_HWPERF_INFO_EV_MEM64_USAGE:
+ psData->uInfoDetail.sMemUsageStats.ui64TotalMemoryUsage = ui64TotalMemoryUsage;
if (psPerProcessMemUsage)
{
for (i = 0; i < ui32LivePids; ++i)
{
psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32Pid = psPerProcessMemUsage[i].ui32Pid;
- psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32KernelMemUsage = psPerProcessMemUsage[i].ui32KernelMemUsage;
- psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32GraphicsMemUsage = psPerProcessMemUsage[i].ui32GraphicsMemUsage;
+ psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui64KernelMemUsage = psPerProcessMemUsage[i].ui64KernelMemUsage;
+ psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui64GraphicsMemUsage = psPerProcessMemUsage[i].ui64GraphicsMemUsage;
}
}
break;
}
static inline IMG_UINT32 _CalculateHostInfoPacketSize(RGX_HWPERF_INFO_EV eEvType,
- IMG_UINT32 *pui32TotalMemoryUsage,
+ IMG_UINT64 *pui64TotalMemoryUsage,
IMG_UINT32 *pui32LivePids,
PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsage)
{
switch (eEvType)
{
- case RGX_HWPERF_INFO_EV_MEM_USAGE:
+ case RGX_HWPERF_INFO_EV_MEM64_USAGE:
#if !defined(__QNXNTO__)
- if (PVRSRVGetProcessMemUsage(pui32TotalMemoryUsage, pui32LivePids, ppsPerProcessMemUsage) == PVRSRV_OK)
+ if (PVRSRVGetProcessMemUsage(pui64TotalMemoryUsage, pui32LivePids, ppsPerProcessMemUsage) == PVRSRV_OK)
{
- ui32Size += ((offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage) - ui32Size)
- + ((*pui32LivePids) * sizeof(((RGX_HWPERF_HOST_INFO_DATA*)0)->uInfoDetail.sMemUsageStats.sPerProcessUsage)));
+ ui32Size += offsetof(RGX_HWPERF_HOST_INFO_DETAIL, sMemUsageStats.sPerProcessUsage)
+ + ((*pui32LivePids) * sizeof(((RGX_HWPERF_HOST_INFO_DETAIL*)0)->sMemUsageStats.sPerProcessUsage));
}
#else
PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform"));
IMG_UINT32 ui32Size;
IMG_UINT32 ui32Ordinal;
IMG_UINT64 ui64Timestamp;
- IMG_UINT32 ui32TotalMemoryUsage = 0;
+ IMG_UINT64 ui64TotalMemoryUsage = 0;
PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage = NULL;
IMG_UINT32 ui32LivePids = 0;
_GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE);
_PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
- ui32Size = _CalculateHostInfoPacketSize(eEvType, &ui32TotalMemoryUsage, &ui32LivePids, &psPerProcessMemUsage);
+ ui32Size = _CalculateHostInfoPacketSize(eEvType, &ui64TotalMemoryUsage, &ui32LivePids, &psPerProcessMemUsage);
if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL)
{
_SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_INFO, ui32Size, ui32Ordinal, ui64Timestamp);
- _SetupHostInfoPacketData(eEvType, ui32TotalMemoryUsage, ui32LivePids, psPerProcessMemUsage, pui8Dest);
+ _SetupHostInfoPacketData(eEvType, ui64TotalMemoryUsage, ui32LivePids, psPerProcessMemUsage, pui8Dest);
_CommitHWPerfStream(psRgxDevInfo, ui32Size);
}
PVR_DPF((PVR_DBG_WARNING,
"%s: HWPerf: Device not currently active. ID:%u",
__func__,
- psDeviceNode->sDevId.i32OsDeviceID));
+ psDeviceNode->sDevId.i32KernelDeviceID));
psDeviceNode = psDeviceNode->psNext;
continue;
}
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
if (OSSNPrintf(psNewHWPerfDevice->pszName, sizeof(psNewHWPerfDevice->pszName),
- "hwperf_device_%d", psDeviceNode->sDevId.i32OsDeviceID) < 0)
+ "hwperf_device_%d", psDeviceNode->sDevId.i32KernelDeviceID) < 0)
{
OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
PVR_DPF((PVR_DBG_ERROR,
"%s: Failed to form HWPerf device name for device %d",
__func__,
- psDeviceNode->sDevId.i32OsDeviceID));
+ psDeviceNode->sDevId.i32KernelDeviceID));
return PVRSRV_ERROR_INVALID_PARAMS;
}
/* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
if (OSSNPrintf(pszHWPerfFwStreamName, sizeof(pszHWPerfFwStreamName), "%s%d",
PVRSRV_TL_HWPERF_RGX_FW_STREAM,
- psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0)
+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0)
{
PVR_DPF((PVR_DBG_ERROR,
"%s: Failed to form HWPerf stream name for device %d",
__func__,
- psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID));
+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID));
return PVRSRV_ERROR_INVALID_PARAMS;
}
/* Open the RGX TL stream for reading in this session */
/* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */
if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d",
PVRSRV_TL_HWPERF_HOST_SERVER_STREAM,
- psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0)
+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID) < 0)
{
PVR_DPF((PVR_DBG_ERROR,
"%s: Failed to form HWPerf host stream name for device %d",
__func__,
- psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID));
+ psRgxDevInfo->psDeviceNode->sDevId.i32KernelDeviceID));
return PVRSRV_ERROR_INVALID_PARAMS;
}
#include "connection_server.h"
#include "rgxdevice.h"
#include "rgx_hwperf.h"
+#include "rgx_fwif_hwperf.h"
/* HWPerf host buffer size constraints in KBs */
#define HWPERF_HOST_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB
void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode);
void RGXHWPerfClientInitAppHintCallbacks(void);
+static INLINE PVRSRV_ERROR RGXAcquireHWPerfCtlCPUAddr(PVRSRV_DEVICE_NODE *psDevNode,
+ RGXFWIF_HWPERF_CTL **ppsHWPerfCtl)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ PVRSRV_ERROR eError;
+
+ PVR_RETURN_IF_FALSE(psDevNode != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+
+ psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice;
+ PVR_RETURN_IF_FALSE(psDevInfo != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+ (void**)ppsHWPerfCtl);
+
+ return eError;
+}
+
+static INLINE void RGXReleaseHWPerfCtlCPUAddr(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_LOG_RETURN_VOID_IF_FALSE(psDevNode != NULL, "psDevNode is invalid");
+
+ psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice;
+ PVR_LOG_RETURN_VOID_IF_FALSE(psDevInfo != NULL, "psDevInfo invalid");
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+}
+
+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCountersKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ const IMG_UINT32 ui32BlockID,
+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters);
+
+PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocksKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ const IMG_UINT32 ui32ArrayLen,
+ IMG_UINT32 *pui32BlockCount,
+ IMG_UINT32 *pui32EnabledBlockIDs);
+
/******************************************************************************
* RGX HW Performance Profiling API(s)
*****************************************************************************/
* RGX HW Performance Host Stream API
*****************************************************************************/
+static inline RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS
+RGXHWPerfConvDeviceHealthStatus(PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus)
+{
+ switch (eDeviceHealthStatus)
+ {
+ case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED;
+ case PVRSRV_DEVICE_HEALTH_STATUS_OK: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK;
+ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD;
+ case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT;
+ default: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED;
+ }
+}
+
+static inline RGX_HWPERF_HOST_DEVICE_HEALTH_REASON
+RGXHWPerfConvDeviceHealthReason(PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason)
+{
+ switch (eDeviceHealthReason)
+ {
+ case PVRSRV_DEVICE_HEALTH_REASON_NONE: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE;
+ case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED;
+ case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING;
+ case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS;
+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT;
+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED;
+ case PVRSRV_DEVICE_HEALTH_REASON_IDLING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING;
+ case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING;
+ case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS;
+ default: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED;
+ }
+}
+
PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB);
PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo);
void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
RGX_HWPERF_DEV_INFO_EV eEvType,
- PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus,
- PVRSRV_DEVICE_HEALTH_REASON eDeviceHeathReason);
+ RGX_HWPERF_HOST_DEV_INFO_DETAIL *psData);
void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
RGX_HWPERF_INFO_EV eEvType);
/**
* This macro checks if HWPerfHost and the event are enabled and if they are
- * it posts a device info event to the HWPerfHost stream.
+ * it posts a device info health event to the HWPerfHost stream.
*
* @param I Device info pointer
- * @param T Event type
- * @param H Health status enum
- * @param R Health reason enum
+ * @param H Health status enum
+ * @param R Health reason enum
*/
-#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) \
- do { \
- if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \
- { \
- RGXHWPerfHostPostDeviceInfo((I), (T), (H), (R)); \
- } \
- } while (0)
+#define RGXSRV_HWPERF_DEVICE_INFO_HEALTH(I, H, R) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \
+ { \
+ RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevDetail; \
+ uDevDetail.sDeviceStatus.eDeviceHealthStatus = RGXHWPerfConvDeviceHealthStatus(H); \
+ uDevDetail.sDeviceStatus.eDeviceHealthReason = RGXHWPerfConvDeviceHealthReason(R); \
+ RGXHWPerfHostPostDeviceInfo((I), RGX_HWPERF_DEV_INFO_EV_HEALTH, &uDevDetail); \
+ } \
+ } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts a device info features event to the HWPerfHost stream.
+ *
+ * @param I Device info pointer
+ */
+#define RGXSRV_HWPERF_DEVICE_INFO_FEATURES(I) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \
+ { \
+ RGXHWPerfHostPostDeviceInfo((I), RGX_HWPERF_DEV_INFO_EV_FEATURES, NULL); \
+ } \
+ } while (0)
/**
* This macro checks if HWPerfHost and the event are enabled and if they are
#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR)
#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z)
#define RGXSRV_HWPERF_CLK_SYNC(I)
-#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R)
+#define RGXSRV_HWPERF_DEVICE_INFO_HEALTH(I, H, R)
+#define RGXSRV_HWPERF_DEVICE_INFO_FEATURES(I)
#define RGXSRV_HWPERF_HOST_INFO(I, T)
#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D)
#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI)
SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListFence,
ui32FenceSyncCheckpointCount,
apsFenceSyncCheckpoints);
- if (!pauiClientFenceUFOAddress)
- {
- pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs;
- }
+
+ pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs;
+
ui32ClientFenceCount += ui32FenceSyncCheckpointCount;
#if defined(KICKSYNC_CHECKPOINT_DEBUG)
{
*/
/*
- * We might only be kicking for flush out a padding packet so only submit
- * the command if the create was successful
+ * All the required resources are ready at this point, we can't fail so
+ * take the required server sync operations and commit all the resources
*/
- if (eError == PVRSRV_OK)
- {
- /*
- * All the required resources are ready at this point, we can't fail so
- * take the required server sync operations and commit all the resources
- */
- RGXCmdHelperReleaseCmdCCB(1,
- asCmdHelperData,
- "KickSync",
- FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr);
- }
+ RGXCmdHelperReleaseCmdCCB(1,
+ asCmdHelperData,
+ "KickSync",
+ FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr);
/* Construct the kernel kicksync CCB command. */
sKickSyncKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
/*
* Submit the kicksync command to the firmware.
ui32FWCtx,
ui32ExtJobRef,
ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_SYNC,
+ RGX_HWPERF_KICK_TYPE2_SYNC,
iCheckFence,
iUpdateFence,
iUpdateTimeline,
OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
} END_LOOP_UNTIL_TIMEOUT();
- PVRGpuTraceEnqueueEvent(psKickSyncContext->psDeviceNode->pvDevice,
+ PVRGpuTraceEnqueueEvent(psKickSyncContext->psDeviceNode,
ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_SYNC);
+ RGX_HWPERF_KICK_TYPE2_SYNC);
if (eError2 != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,
"PVRSRVRGXKickSync failed to schedule kernel CCB command. (0x%x)",
eError));
- if (eError == PVRSRV_OK)
- {
- eError = eError2;
- }
+ eError = eError2;
}
/*
PVRSRV_RGXDEV_INFO *psDevInfo;
}; /* SERVER_MMU_CONTEXT is typedef-ed in rgxmem.h */
-PVRSRV_ERROR RGXSLCFlushRange(PVRSRV_DEVICE_NODE *psDeviceNode,
- MMU_CONTEXT *psMMUContext,
- IMG_DEV_VIRTADDR sDevVAddr,
- IMG_DEVMEM_SIZE_T uiSize,
- IMG_BOOL bInvalidate)
-{
- PVRSRV_ERROR eError;
- DLLIST_NODE *psNode, *psNext;
- RGXFWIF_KCCB_CMD sFlushInvalCmd;
- SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
- PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
- IMG_UINT32 ui32kCCBCommandSlot;
-
- OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock);
-
- dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
- {
- SERVER_MMU_CONTEXT *psIter = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
- if (psIter->psMMUContext == psMMUContext)
- {
- psServerMMUContext = psIter;
- }
- }
-
- OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock);
-
- if (! psServerMMUContext)
- {
- return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
- }
-
- /* Schedule the SLC flush command */
-#if defined(PDUMP)
- PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
- "Submit SLC flush and invalidate");
-#endif
- sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
- sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = bInvalidate;
- sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
- sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Size = uiSize;
- sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Address = sDevVAddr.uiAddr;
- eError = RGXGetFWCommonContextAddrFromServerMMUCtx(psDevInfo,
- psServerMMUContext,
- &sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext);
- if (eError != PVRSRV_OK)
- {
- return eError;
- }
-
- eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo,
- &sFlushInvalCmd,
- PDUMP_FLAGS_CONTINUOUS,
- &ui32kCCBCommandSlot);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "RGXSLCFlush: Failed to schedule SLC flush command with error (%u)",
- eError));
- }
- else
- {
- /* Wait for the SLC flush to complete */
- eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "RGXSLCFlush: SLC flush and invalidate aborted with error (%u)",
- eError));
- }
- }
-
- return eError;
-}
-
PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode,
MMU_CONTEXT *psMMUContext,
IMG_UINT64 ui64FBSCEntryMask)
psFWMemContext->uiBPHandlerAddr = 0;
psFWMemContext->uiBreakpointCtl = 0;
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(SUPPORT_CUSTOM_OSID_EMISSION)
{
IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0;
IMG_BOOL bOSidAxiProt;
/* no active memory context found with the given PC address.
* Check the list of most recently freed memory contexts.
*/
- IMG_UINT32 i;
+ const IMG_UINT32 ui32Mask = UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1;
+ IMG_UINT32 i, j;
OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
/* iterate through the list of unregistered memory contexts
* from newest (one before the head) to the oldest (the current head)
*/
- i = gui32UnregisteredMemCtxsHead;
-
- do
+ for (i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j = 0;
+ j < UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE;
+ i = (i - 1) & ui32Mask, j++)
{
- UNREGISTERED_MEMORY_CONTEXT *psRecord;
-
- i ? i-- : (i = (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1));
-
- psRecord = &gasUnregisteredMemCtxs[i];
+ UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i];
if (psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr)
{
bRet = IMG_TRUE;
break;
}
- } while (i != gui32UnregisteredMemCtxsHead);
+ }
OSLockRelease(psDevInfo->hMMUCtxUnregLock);
-
}
return bRet;
for (i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j = 0;
j < UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE;
- i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j++)
+ i = (i - 1) & ui32Mask, j++)
{
UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i];
void RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDevNode);
void RGXMMUSyncPrimFree(void);
-PVRSRV_ERROR RGXSLCFlushRange(PVRSRV_DEVICE_NODE *psDevNode,
- MMU_CONTEXT *psMMUContext,
- IMG_DEV_VIRTADDR sDevVAddr,
- IMG_DEVMEM_SIZE_T uiLength,
- IMG_BOOL bInvalidate);
-
PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode,
MMU_CONTEXT *psMMUContext,
IMG_UINT64 ui64FBSCEntryMask);
if (psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions &
psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions &
- OPTIONS_PDVFS_MASK)
+ OPTIONS_PDVFS_EN)
{
return IMG_TRUE;
}
/* Increasing frequency, change voltage first */
if (ui32CoreClockRate > ui32CoreClockRateCurrent)
{
- psDVFSDeviceCfg->pfnSetVoltage(psOpp->ui32Volt);
+ psDVFSDeviceCfg->pfnSetVoltage(psDevConfig->hSysData, psOpp->ui32Volt);
}
- psDVFSDeviceCfg->pfnSetFrequency(ui32CoreClockRate);
+ psDVFSDeviceCfg->pfnSetFrequency(psDevConfig->hSysData, ui32CoreClockRate);
/* Decreasing frequency, change frequency first */
if (ui32CoreClockRate < ui32CoreClockRateCurrent)
{
- psDVFSDeviceCfg->pfnSetVoltage(psOpp->ui32Volt);
+ psDVFSDeviceCfg->pfnSetVoltage(psDevConfig->hSysData, psOpp->ui32Volt);
}
PVRSRVDevicePostClockSpeedChange(psDevInfo->psDeviceNode, psDVFSDeviceCfg->bIdleReq, NULL);
RGXShaderReadHeader(psShaderFW, &sHeader);
+ if (sHeader.ui32Version != RGX_TQ_SHADERS_VERSION_PACK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: unsupported TQ shaders version: %d != %d",
+ __func__, sHeader.ui32Version, RGX_TQ_SHADERS_VERSION_PACK));
+ eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ goto failed_firmware;
+ }
+
ui32NumPages = (sHeader.ui32SizeFragment / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1;
PDUMPCOMMENT(psDeviceNode, "Allocate TDM USC PMR Block (Pages %08X)", ui32NumPages);
eError = PhysmemNewRamBackedPMR(NULL,
psDeviceNode,
(IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE,
- (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE,
1,
1,
&ui32MappingTable,
eError = PhysmemNewRamBackedPMR(NULL,
psDeviceNode,
(IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE,
- (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE,
1,
1,
&ui32MappingTable,
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxtdmtransfer.c
+@Title Device specific TDM transfer queue routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtdmtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_memallocflags.h"
+#include "rgxhwperf.h"
+#include "ospvr_gputrace.h"
+#include "rgxshader.h"
+
+#include "pdump_km.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER)
+#include "validation_soc.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#include "rgxtimerquery.h"
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TDM_UFO_DUMP 0
+
+//#define TDM_CHECKPOINT_DEBUG 1
+
+#if defined(TDM_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+typedef struct {
+ RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
+ IMG_INT32 i32Priority;
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+} RGX_SERVER_TQ_TDM_DATA;
+
+
+struct _RGX_SERVER_TQ_TDM_CONTEXT_ {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ DEVMEM_MEMDESC *psFWTransferContextMemDesc;
+ IMG_UINT32 ui32Flags;
+ RGX_SERVER_TQ_TDM_DATA sTDMData;
+ DLLIST_NODE sListNode;
+ SYNC_ADDR_LIST sSyncAddrListFence;
+ SYNC_ADDR_LIST sSyncAddrListUpdate;
+ POS_LOCK hLock;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ WORKEST_HOST_DATA sWorkEstData;
+#endif
+};
+
+static PVRSRV_ERROR _CreateTDMTransferContext(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ DEVMEM_MEMDESC * psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ SERVER_MMU_CONTEXT * psServerMMUContext,
+ DEVMEM_MEMDESC * psFWMemContextMemDesc,
+ IMG_INT32 i32Priority,
+ RGX_COMMON_CONTEXT_INFO * psInfo,
+ RGX_SERVER_TQ_TDM_DATA * psTDMData,
+ IMG_UINT32 ui32CCBAllocSizeLog2,
+ IMG_UINT32 ui32CCBMaxAllocSizeLog2,
+ IMG_UINT32 ui32ContextFlags,
+ IMG_UINT64 ui64RobustnessAddress)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ psTDMData->psBufferSyncContext =
+ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+ "rogue-tdm");
+ if (IS_ERR(psTDMData->psBufferSyncContext))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to create buffer_sync context (err=%ld)",
+ __func__, PTR_ERR(psTDMData->psBufferSyncContext)));
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_buffer_sync_context_create;
+ }
+#endif
+
+ eError = FWCommonContextAllocate(
+ psConnection,
+ psDeviceNode,
+ REQ_TYPE_TQ_TDM,
+ RGXFWIF_DM_TDM,
+ psServerMMUContext,
+ psAllocatedMemDesc,
+ ui32AllocatedOffset,
+ psFWMemContextMemDesc,
+ NULL,
+ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TDM_CCB_SIZE_LOG2,
+ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TDM_CCB_MAX_SIZE_LOG2,
+ ui32ContextFlags,
+ i32Priority,
+ UINT_MAX, /* max deadline MS */
+ ui64RobustnessAddress,
+ psInfo,
+ &psTDMData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextalloc;
+ }
+
+ psTDMData->i32Priority = i32Priority;
+ return PVRSRV_OK;
+
+fail_contextalloc:
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext);
+ psTDMData->psBufferSyncContext = NULL;
+fail_buffer_sync_context_create:
+#endif
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+static PVRSRV_ERROR _DestroyTDMTransferContext(
+ RGX_SERVER_TQ_TDM_DATA * psTDMData,
+ PVRSRV_DEVICE_NODE * psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(
+ psDeviceNode,
+ psTDMData->psServerCommonContext,
+ RGXFWIF_DM_TDM,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free it's resources */
+ FWCommonContextFree(psTDMData->psServerCommonContext);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext);
+ psTDMData->psBufferSyncContext = NULL;
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_INT32 i32Priority,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ IMG_UINT32 ui32PackedCCBSizeU88,
+ IMG_UINT32 ui32ContextFlags,
+ IMG_UINT64 ui64RobustnessAddress,
+ RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext)
+{
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext;
+
+ DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice;
+ RGX_COMMON_CONTEXT_INFO sInfo = {NULL};
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Allocate the server side structure */
+ *ppsTransferContext = NULL;
+ psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
+ if (psTransferContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /*
+ Create the FW transfer context, this has the TDM common
+ context embedded within it
+ */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_FWTDMCONTEXT),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwTransferContext",
+ &psTransferContext->psFWTransferContextMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_fwtransfercontext;
+ }
+
+ eError = OSLockCreate(&psTransferContext->hLock);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ goto fail_lockcreate;
+ }
+
+ psTransferContext->psDeviceNode = psDeviceNode;
+
+ if (ui32FrameworkCommandSize)
+ {
+ /*
+ * Create the FW framework buffer
+ */
+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+ &psTransferContext->psFWFrameworkMemDesc,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to allocate firmware GPU framework state (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ goto fail_frameworkcreate;
+ }
+
+ /* Copy the Framework client data into the framework buffer */
+ eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode,
+ psTransferContext->psFWFrameworkMemDesc,
+ pabyFrameworkCommand,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to populate the framework buffer (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ goto fail_frameworkcopy;
+ }
+
+ sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+ }
+
+ eError = _CreateTDMTransferContext(psConnection,
+ psDeviceNode,
+ psTransferContext->psFWTransferContextMemDesc,
+ offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext),
+ hMemCtxPrivData,
+ psFWMemContextMemDesc,
+ i32Priority,
+ &sInfo,
+ &psTransferContext->sTDMData,
+ U32toU8_Unpack1(ui32PackedCCBSizeU88),
+ U32toU8_Unpack2(ui32PackedCCBSizeU88),
+ ui32ContextFlags,
+ ui64RobustnessAddress);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_tdmtransfercontext;
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ WorkEstInitTDM(psDevInfo, &psTransferContext->sWorkEstData);
+ }
+#endif
+
+ SyncAddrListInit(&psTransferContext->sSyncAddrListFence);
+ SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate);
+
+ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+ *ppsTransferContext = psTransferContext;
+
+ return PVRSRV_OK;
+
+fail_tdmtransfercontext:
+fail_frameworkcopy:
+ if (psTransferContext->psFWFrameworkMemDesc != NULL)
+ {
+ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+ }
+fail_frameworkcreate:
+ OSLockDestroy(psTransferContext->hLock);
+fail_lockcreate:
+ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc);
+fail_fwtransfercontext:
+ OSFreeMem(psTransferContext);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ *ppsTransferContext = NULL;
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ PMR ** ppsCLIPMRMem,
+ PMR ** ppsUSCPMRMem)
+{
+ PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psPMRMem)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMRMem);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ RGXFWIF_FWTDMCONTEXT *psFWTransferContext;
+ IMG_UINT32 ui32WorkEstCCBSubmitted;
+
+ eError = DevmemAcquireCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc,
+ (void **)&psFWTransferContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to map firmware transfer context (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ return eError;
+ }
+
+ ui32WorkEstCCBSubmitted = psFWTransferContext->ui32WorkEstCCBSubmitted;
+
+ DevmemReleaseCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc);
+
+ /* Check if all of the workload estimation CCB commands for this workload are read */
+ if (ui32WorkEstCCBSubmitted != psTransferContext->sWorkEstData.ui32WorkEstCCBReceived)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
+ __func__, ui32WorkEstCCBSubmitted,
+ psTransferContext->sWorkEstData.ui32WorkEstCCBReceived));
+
+ return PVRSRV_ERROR_RETRY;
+ }
+ }
+#endif
+
+
+ /* remove node from list before calling destroy - as destroy, if successful
+ * will invalidate the node
+ * must be re-added if destroy fails
+ */
+ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+ dllist_remove_node(&(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+
+
+ eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData,
+ psTransferContext->psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_destroyTDM;
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ WorkEstDeInitTDM(psDevInfo, &psTransferContext->sWorkEstData);
+ }
+#endif
+
+ if (psTransferContext->psFWFrameworkMemDesc)
+ {
+ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+ }
+
+ SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence);
+ SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate);
+
+ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc);
+
+ OSLockDestroy(psTransferContext->hLock);
+
+ OSFreeMem(psTransferContext);
+
+ return PVRSRV_OK;
+
+fail_destroyTDM:
+
+ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock,
+ IMG_UINT32 * paui32ClientUpdateSyncOffset,
+ IMG_UINT32 * paui32ClientUpdateValue,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE * piUpdateFence,
+ IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
+ IMG_UINT32 ui32FWCommandSize,
+ IMG_UINT8 * pui8FWCommand,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 * paui32SyncPMRFlags,
+ PMR ** ppsSyncPMRs,
+ IMG_UINT32 ui32TDMCharacteristic1,
+ IMG_UINT32 ui32TDMCharacteristic2,
+ IMG_UINT64 ui64DeadlineInus)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+ PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress = NULL;
+ PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress = NULL;
+ IMG_UINT32 ui32IntClientFenceCount = 0;
+ IMG_UINT32 * paui32IntUpdateValue = paui32ClientUpdateValue;
+ IMG_UINT32 ui32IntClientUpdateCount = ui32ClientUpdateCount;
+ PVRSRV_ERROR eError;
+ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psTransferContext->sTDMData.psServerCommonContext);
+ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext);
+ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+
+ IMG_UINT64 ui64FBSCEntryMask = 0;
+
+ IMG_BOOL bCCBStateOpen;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+ IMG_UINT64 uiCheckFenceUID = 0;
+ IMG_UINT64 uiUpdateFenceUID = 0;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ IMG_UINT32 ui32CmdOffset = 0;
+ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTransfer = {0};
+ IMG_UINT32 ui32TDMWorkloadDataRO = 0;
+ IMG_UINT32 ui32TDMCmdHeaderOffset = 0;
+ IMG_UINT32 ui32TDMCmdOffsetWrapCheck = 0;
+ RGX_WORKLOAD sWorkloadCharacteristics = {0};
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+#endif
+
+ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+ IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+ void *pvUpdateFenceFinaliseData = NULL;
+
+ if (iUpdateTimeline >= 0 && !piUpdateFence)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#if !defined(SUPPORT_WORKLOAD_ESTIMATION)
+ PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic1);
+ PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic2);
+ PVR_UNREFERENCED_PARAMETER(ui64DeadlineInus);
+#endif
+
+ /* Ensure we haven't been given a null ptr to
+ * update values if we have been told we
+ * have updates
+ */
+ if (ui32ClientUpdateCount > 0)
+ {
+ PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL,
+ "paui32ClientUpdateValue NULL but "
+ "ui32ClientUpdateCount > 0",
+ PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ szUpdateFenceName[31] = '\0';
+
+ if (ui32SyncPMRCount != 0)
+ {
+ if (!ppsSyncPMRs)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+ OSLockAcquire(psTransferContext->hLock);
+
+ /* We can't allocate the required amount of stack space on all consumer architectures */
+ psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA));
+ if (psCmdHelper == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_allochelper;
+ }
+
+
+ /*
+ Init the command helper commands for all the prepares
+ */
+ {
+ IMG_CHAR *pszCommandName;
+ RGXFWIF_CCB_CMD_TYPE eType;
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+
+ pszCommandName = "TQ-TDM";
+
+ if (ui32FWCommandSize == 0)
+ {
+ /* A NULL CMD for TDM is used to append updates to a non finished
+ * FW command. bCCBStateOpen is used in case capture range is
+ * entered on this command, to not drain CCB up to the Roff for this
+ * command, but the finished command prior to this.
+ */
+ bCCBStateOpen = IMG_TRUE;
+ eType = RGXFWIF_CCB_CMD_TYPE_NULL;
+ }
+ else
+ {
+ bCCBStateOpen = IMG_FALSE;
+ eType = RGXFWIF_CCB_CMD_TYPE_TQ_TDM;
+ }
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ psBufferSyncContext = psTransferContext->sTDMData.psBufferSyncContext;
+#endif
+
+ eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence,
+ 0,
+ NULL,
+ NULL);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_populate_sync_addr_list;
+ }
+
+ eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFODevVarBlock,
+ paui32ClientUpdateSyncOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_populate_sync_addr_list;
+ }
+ paui32IntUpdateValue = paui32ClientUpdateValue;
+ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+
+
+ if (ui32SyncPMRCount)
+ {
+#if defined(SUPPORT_BUFFER_SYNC)
+ int err;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__));
+ err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext,
+ psTransferContext->psDeviceNode->hSyncCheckpointContext,
+ ui32SyncPMRCount,
+ ppsSyncPMRs,
+ paui32SyncPMRFlags,
+ &ui32BufferFenceSyncCheckpointCount,
+ &apsBufferFenceSyncCheckpoints,
+ &psBufferUpdateSyncCheckpoint,
+ &psBufferSyncData);
+ if (err)
+ {
+ switch (err)
+ {
+ case -EINTR:
+ eError = PVRSRV_ERROR_RETRY;
+ break;
+ case -ENOMEM:
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ break;
+ default:
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ break;
+ }
+
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError)));
+ }
+ goto fail_resolve_input_fence;
+ }
+
+ /* Append buffer sync fences */
+ if (ui32BufferFenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence , (void*)pauiIntFenceUFOAddress));
+ SyncAddrListAppendAndDeRefCheckpoints(&psTransferContext->sSyncAddrListFence,
+ ui32BufferFenceSyncCheckpointCount,
+ apsBufferFenceSyncCheckpoints);
+ if (!pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+ }
+ ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+ }
+
+ if (psBufferUpdateSyncCheckpoint)
+ {
+ /* Append the update (from output fence) */
+ SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
+ 1,
+ &psBufferUpdateSyncCheckpoint);
+ if (!pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ ui32IntClientUpdateCount++;
+ }
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_populate_sync_addr_list;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+ }
+
+ /* Resolve the sync checkpoints that make up the input fence */
+ eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext,
+ iCheckFence,
+ &ui32FenceSyncCheckpointCount,
+ &apsFenceSyncCheckpoints,
+ &uiCheckFenceUID,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_resolve_input_fence;
+ }
+#if defined(TDM_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 ii;
+ for (ii=0; ii<32; ii++)
+ {
+ PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii]));
+ }
+ }
+#endif
+ /* Create the output fence (if required) */
+ if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+ {
+ eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode,
+ szUpdateFenceName,
+ iUpdateTimeline,
+ psTransferContext->psDeviceNode->hSyncCheckpointContext,
+ &iUpdateFence,
+ &uiUpdateFenceUID,
+ &pvUpdateFenceFinaliseData,
+ &psUpdateSyncCheckpoint,
+ (void*)&psFenceTimelineUpdateSync,
+ &ui32FenceTimelineUpdateValue,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_create_output_fence;
+ }
+
+ /* Append the sync prim update for the timeline (if required) */
+ if (psFenceTimelineUpdateSync)
+ {
+ IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+ /* Allocate memory to hold the list of update values (including our timeline update) */
+ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ if (!pui32IntAllocatedUpdateValues)
+ {
+ /* Failed to allocate memory */
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_update_values_mem;
+ }
+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ /* Copy the update values into the new memory, then append our timeline update value */
+ if (paui32IntUpdateValue)
+ {
+ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+ }
+ /* Now set the additional update value */
+ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+ ui32IntClientUpdateCount++;
+#if defined(TDM_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Now append the timeline sync prim addr to the transfer context update list */
+ SyncAddrListAppendSyncPrim(&psTransferContext->sSyncAddrListUpdate,
+ psFenceTimelineUpdateSync);
+#if defined(TDM_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+ paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+ }
+ }
+
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ /* Append the checks (from input fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence));
+#if defined(TDM_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntFenceUFOAddress[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (!pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+ }
+ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+
+#if defined(TDM_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+ if (psUpdateSyncCheckpoint)
+ {
+ /* Append the update (from output fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+ SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
+ 1,
+ &psUpdateSyncCheckpoint);
+ if (!pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ ui32IntClientUpdateCount++;
+#if defined(TDM_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+
+#if (ENABLE_TDM_UFO_DUMP == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__));
+ {
+ IMG_UINT32 ii;
+ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+ /* Dump Fence syncs and Update syncs */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM fence syncs (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+ for (ii=0; ii<ui32IntClientFenceCount; ii++)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+ psTmpIntFenceUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM update syncs (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+ for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+ {
+ if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+ pui32TmpIntUpdateValue++;
+ }
+ psTmpIntUpdateUFOAddress++;
+ }
+ }
+#endif
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice,
+ &pPreAddr,
+ &pPostAddr,
+ &pRMWUFOAddr);
+
+#if defined(RGX_FBSC_INVALIDATE_COMMAND_SUPPORTED)
+ /*
+ * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command,
+ * in other words, take the value and set it to zero afterwards.
+ * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts
+ * as it must be ready at the time of context activation.
+ */
+ {
+ eError = RGXExtractFBSCEntryMaskFromMMUContext(psTransferContext->psDeviceNode,
+ FWCommonContextGetServerMMUCtx(psTransferContext->sTDMData.psServerCommonContext),
+ &ui64FBSCEntryMask);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError));
+ goto fail_invalfbsc;
+ }
+ }
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ sWorkloadCharacteristics.sTransfer.ui32Characteristic1 = ui32TDMCharacteristic1;
+ sWorkloadCharacteristics.sTransfer.ui32Characteristic2 = ui32TDMCharacteristic2;
+
+ /* Prepare workload estimation */
+ WorkEstPrepare(psDeviceNode->pvDevice,
+ &psTransferContext->sWorkEstData,
+ &psTransferContext->sWorkEstData.uWorkloadMatchingData.sTransfer.sDataTDM,
+ eType,
+ &sWorkloadCharacteristics,
+ ui64DeadlineInus,
+ &sWorkloadKickDataTransfer);
+ }
+#endif
+
+ /*
+ Create the command helper data for this command
+ */
+ RGXCmdHelperInitCmdCCB(psDevInfo,
+ psClientCCB,
+ ui64FBSCEntryMask,
+ ui32IntClientFenceCount,
+ pauiIntFenceUFOAddress,
+ NULL,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ ui32FWCommandSize,
+ pui8FWCommand,
+ &pPreAddr,
+ &pPostAddr,
+ &pRMWUFOAddr,
+ eType,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ &sWorkloadKickDataTransfer,
+#else /* SUPPORT_WORKLOAD_ESTIMATION */
+ NULL,
+#endif /* SUPPORT_WORKLOAD_ESTIMATION */
+ pszCommandName,
+ bCCBStateOpen,
+ psCmdHelper);
+ }
+
+ /*
+ Acquire space for all the commands in one go
+ */
+
+ eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_3dcmdacquire;
+ }
+
+
+ /*
+ We should acquire the kernel CCB(s) space here as the schedule could fail
+ and we would have to roll back all the syncs
+ */
+
+ /*
+ Only do the command helper release (which takes the server sync
+ operations if the acquire succeeded
+ */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+ }
+#endif
+ RGXCmdHelperReleaseCmdCCB(1,
+ psCmdHelper,
+ "TQ_TDM",
+ FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr);
+
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* The following is used to determine the offset of the command header containing
+ the workload estimation data so that can be accessed when the KCCB is read */
+ ui32TDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(psCmdHelper);
+
+ ui32TDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+
+ /* This checks if the command would wrap around at the end of the CCB and
+ * therefore would start at an offset of 0 rather than the current command
+ * offset */
+ if (ui32CmdOffset < ui32TDMCmdOffsetWrapCheck)
+ {
+ ui32TDMWorkloadDataRO = ui32CmdOffset;
+ }
+ else
+ {
+ ui32TDMWorkloadDataRO = 0;
+ }
+ }
+#endif
+
+ /*
+ Even if we failed to acquire the client CCB space we might still need
+ to kick the HW to process a padding packet to release space for us next
+ time round
+ */
+ {
+ RGXFWIF_KCCB_CMD sTDMKCCBCmd;
+ IMG_UINT32 ui32FWAddr = FWCommonContextGetFWAddress(
+ psTransferContext->sTDMData.psServerCommonContext).ui32Addr;
+
+ /* Construct the kernel 3D CCB command. */
+ sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
+ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
+ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ /* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Store the offset to the CCCB command header so that it can be referenced
+ * when the KCCB command reaches the FW */
+ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TDMWorkloadDataRO + ui32TDMCmdHeaderOffset;
+ }
+#endif
+
+ /* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */
+ /* s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */
+ /* ui323DCmdOffset); */
+ RGXSRV_HWPERF_ENQ(psTransferContext,
+ OSGetCurrentClientProcessIDKM(),
+ FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE2_TQTDM,
+ iCheckFence,
+ iUpdateFence,
+ iUpdateTimeline,
+ uiCheckFenceUID,
+ uiUpdateFenceUID,
+ NO_DEADLINE,
+ NO_CYCEST);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_TDM,
+ & sTDMKCCBCmd,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXTDMSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError));
+ goto fail_2dcmdacquire;
+ }
+
+ PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWAddr, ui32ExtJobRef,
+ ui32IntJobRef, RGX_HWPERF_KICK_TYPE2_TQTDM);
+ }
+
+#if defined(NO_HARDWARE)
+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+ if (psUpdateSyncCheckpoint)
+ {
+ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+ }
+ if (psFenceTimelineUpdateSync)
+ {
+ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+ }
+ SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined(NO_HARDWARE) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psBufferSyncData)
+ {
+ pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+ }
+ if (apsBufferFenceSyncCheckpoints)
+ {
+ kfree(apsBufferFenceSyncCheckpoints);
+ }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+ * piUpdateFence = iUpdateFence;
+ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+ {
+ SyncCheckpointFinaliseFence(psDeviceNode, iUpdateFence, pvUpdateFenceFinaliseData,
+ psUpdateSyncCheckpoint, szUpdateFenceName);
+ }
+
+ OSFreeMem(psCmdHelper);
+
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ pui32IntAllocatedUpdateValues = NULL;
+ }
+
+ OSLockRelease(psTransferContext->hLock);
+ return PVRSRV_OK;
+
+/*
+ No resources are created in this function so there is nothing to free
+ unless we had to merge syncs.
+ If we fail after the client CCB acquire there is still nothing to do
+ as only the client CCB release will modify the client CCB
+*/
+fail_2dcmdacquire:
+fail_3dcmdacquire:
+#if defined(RGX_FBSC_INVALIDATE_COMMAND_SUPPORTED)
+fail_invalfbsc:
+#endif
+ SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListFence);
+ SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListUpdate);
+
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ pui32IntAllocatedUpdateValues = NULL;
+ }
+fail_alloc_update_values_mem:
+
+/* fail_pdumpcheck: */
+/* fail_cmdtype: */
+
+ if (iUpdateFence != PVRSRV_NO_FENCE)
+ {
+ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+fail_create_output_fence:
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+
+fail_resolve_input_fence:
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psBufferSyncData)
+ {
+ pvr_buffer_sync_kick_failed(psBufferSyncData);
+ }
+ if (apsBufferFenceSyncCheckpoints)
+ {
+ kfree(apsBufferFenceSyncCheckpoints);
+ }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+fail_populate_sync_addr_list:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ OSFreeMem(psCmdHelper);
+fail_allochelper:
+
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ OSLockRelease(psTransferContext->hLock);
+ return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ RGXFWIF_KCCB_CMD sKCCBCmd;
+ PVRSRV_ERROR eError;
+
+ OSLockAcquire(psTransferContext->hLock);
+
+ /* Schedule the firmware command */
+ sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+ sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_TDM,
+ &sKCCBCmd,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to schedule the FW command %d (%s)",
+ __func__, eError, PVRSRVGETERRORSTRING(eError)));
+ }
+
+ OSLockRelease(psTransferContext->hLock);
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+ IMG_INT32 i32Priority)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+ OSLockAcquire(psTransferContext->hLock);
+
+ if (psTransferContext->sTDMData.i32Priority != i32Priority)
+ {
+ eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext,
+ psConnection,
+ psTransferContext->psDeviceNode->pvDevice,
+ i32Priority,
+ RGXFWIF_DM_TDM);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __func__, PVRSRVGetErrorString(eError)));
+
+ OSLockRelease(psTransferContext->hLock);
+ return eError;
+ }
+ }
+
+ OSLockRelease(psTransferContext->hLock);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+ RGX_CONTEXT_PROPERTY eContextProperty,
+ IMG_UINT64 ui64Input,
+ IMG_UINT64 *pui64Output)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ switch (eContextProperty)
+ {
+ case RGX_CONTEXT_PROPERTY_FLAGS:
+ {
+ IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input;
+
+ OSLockAcquire(psTransferContext->hLock);
+ eError = FWCommonContextSetFlags(psTransferContext->sTDMData.psServerCommonContext,
+ ui32ContextFlags);
+ OSLockRelease(psTransferContext->hLock);
+ break;
+ }
+
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty));
+ eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+ }
+
+ return eError;
+}
+
+void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ IMG_UINT32 ui32VerbLevel)
+{
+ DLLIST_NODE *psNode, *psNext;
+
+ OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+ DumpFWCommonContextInfo(psCurrentServerTransferCtx->sTDMData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+}
+
+
+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32ContextBitMask = 0;
+
+ OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+ if (CheckStalledClientCommonContext(
+ psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D)
+ == PVRSRV_ERROR_CCCB_STALLED) {
+ ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D;
+ }
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+ return ui32ContextBitMask;
+}
+
+/**************************************************************************//**
+ End of file (rgxtdmtransfer.c)
+******************************************************************************/
PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
CONNECTION_DATA * psConnection,
PVRSRV_DEVICE_NODE * psDeviceNode,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32FrameworkCommandSize,
IMG_PBYTE pabyFrameworkCommand,
IMG_HANDLE hMemCtxPrivData,
PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE * psDeviceNode,
RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
- IMG_UINT32 ui32Priority);
+ IMG_INT32 i32Priority);
PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
RGX_CONTEXT_PROPERTY eContextProperty,
* they represent the same current time sampled from different clock sources.
*/
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- if (OSClockMonotonicns64(&psTimeCorr->ui64OSMonoTimeStamp) != PVRSRV_OK)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- PVR_DPF((PVR_DBG_ERROR,
- "_RGXMakeTimeCorrData: System Monotonic Clock not available."));
- PVR_ASSERT(0);
+ if (OSClockMonotonicns64(&psTimeCorr->ui64OSMonoTimeStamp) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "_RGXMakeTimeCorrData: System Monotonic Clock not available."));
+ PVR_ASSERT(0);
+ }
}
#endif
psTimeCorr->ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo);
|| eCmdType == RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP);
psHeader->eCmdType = eCmdType;
- psHeader->ui32CmdSize = (sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1);
+ psHeader->ui32CmdSize = PVR_ALIGN(sizeof(RGXFWIF_DEV_VIRTADDR), RGXFWIF_FWALLOC_ALIGN);
(*ppvPtr) = IMG_OFFSET_ADDR(*ppvPtr, sizeof(RGXFWIF_CCB_CMD_HEADER));
}
}
+PHYS_HEAP_POLICY RGXPhysHeapGetLMAPolicy(PHYS_HEAP_USAGE_FLAGS ui32UsageFlags)
+{
+ PHYS_HEAP_POLICY ui32Policy;
+
+ if (OSIsMapPhysNonContigSupported())
+ {
+ ui32Policy = PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG;
+
+ if (BITMASK_ANY(ui32UsageFlags,
+ (PHYS_HEAP_USAGE_FW_SHARED |
+ PHYS_HEAP_USAGE_FW_PRIVATE |
+ PHYS_HEAP_USAGE_FW_PREMAP_PT |
+ PHYS_HEAP_USAGE_FW_CODE |
+ PHYS_HEAP_USAGE_FW_PRIV_DATA)))
+ {
+ if (PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Guest Firmware heaps are always premepped */
+ ui32Policy = PHYS_HEAP_POLICY_DEFAULT;
+ }
+#if defined(RGX_PREMAP_FW_HEAPS)
+ else if (PVRSRV_VZ_MODE_IS(HOST))
+ {
+ /* All Firmware heaps are premapped under AutoVz*/
+ ui32Policy = PHYS_HEAP_POLICY_DEFAULT;
+ }
+#endif
+ }
+
+ if (BITMASK_ANY(ui32UsageFlags, PHYS_HEAP_USAGE_FW_PREMAP))
+ {
+ ui32Policy = PHYS_HEAP_POLICY_DEFAULT;
+ }
+ }
+ else
+ {
+ ui32Policy = PHYS_HEAP_POLICY_DEFAULT;
+ }
+
+ return ui32Policy;
+}
+
/******************************************************************************
End of file (rgxutils.c)
******************************************************************************/
******************************************************************************/
const char* RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM);
+/*************************************************************************/ /*!
+
+@Function RGXPhysHeapGetLMAPolicy
+
+@Description Returns the optimal LMA allocation policy based on a heap's
+ usage flags
+
+@Input ui32UsageFlags Flags specifying a heap's intended use
+
+@Return PHYS_HEAP_POLICY The recommended LMA policy
+
+*/ /**************************************************************************/
+PHYS_HEAP_POLICY RGXPhysHeapGetLMAPolicy(PHYS_HEAP_USAGE_FLAGS ui32UsageFlags);
+
#define RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(bitmask, eKickTypeDM) bitmask & eKickTypeDM ? RGXStringifyKickTypeDM(eKickTypeDM) : ""
/******************************************************************************
End of file (rgxutils.h)
if (psPVRSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions &
psPVRSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions &
- OPTIONS_WORKLOAD_ESTIMATION_MASK)
+ OPTIONS_WORKLOAD_ESTIMATION_EN)
{
return IMG_TRUE;
}
return IMG_FALSE;
}
-static inline IMG_UINT32 _WorkEstDoHash(IMG_UINT32 ui32Input)
+inline IMG_UINT32 _WorkEstDoHash(IMG_UINT32 ui32Input)
{
IMG_UINT32 ui32HashPart;
#if defined(SUPPORT_SOC_TIMER)
psDevConfig = psDevInfo->psDeviceNode->psDevConfig;
- PVR_LOG_RETURN_IF_FALSE(psDevConfig->pfnSoCTimerRead, "SoC timer not available", eError);
- ui64CurrentSoCTime = psDevConfig->pfnSoCTimerRead(psDevConfig->hSysData);
+ if (psDevConfig->pfnSoCTimerRead)
+ {
+ ui64CurrentSoCTime = psDevConfig->pfnSoCTimerRead(psDevConfig->hSysData);
+ }
+ else
+ {
+ /* Fallback to OS clock */
+ ui64CurrentSoCTime = 0;
+ }
#endif
eError = OSClockMonotonicus64(&ui64CurrentTime);
{
/* Rounding is done to reduce multiple deadlines with minor spread flooding the fw workload array. */
#if defined(SUPPORT_SOC_TIMER)
- IMG_UINT64 ui64TimeDelta = (ui64DeadlineInus - ui64CurrentTime) * SOC_TIMER_FREQ;
- psWorkEstKickData->ui64Deadline = ROUND_DOWN_TO_NEAREST_1024(ui64CurrentSoCTime + ui64TimeDelta);
+ if (psDevConfig->pfnSoCTimerRead)
+ {
+ IMG_UINT64 ui64TimeDelta = (ui64DeadlineInus - ui64CurrentTime) * SOC_TIMER_FREQ;
+ psWorkEstKickData->ui64Deadline = ROUND_DOWN_TO_NEAREST_1024(ui64CurrentSoCTime + ui64TimeDelta);
+ }
+ else
+ {
+ psWorkEstKickData->ui64Deadline = ROUND_DOWN_TO_NEAREST_1024(ui64DeadlineInus);
+ }
#else
psWorkEstKickData->ui64Deadline = ROUND_DOWN_TO_NEAREST_1024(ui64DeadlineInus);
#endif
psWorkloadCharacteristics->sTransfer.ui32Characteristic2,
*pui64CyclePrediction));
break;
+
default:
break;
}
"WorkEstRetire: Missing host data",
unlock_workest);
+ /* Skip if cycle data unavailable */
+ PVR_LOG_GOTO_IF_FALSE(psReturnCmd->ui32CyclesTaken,
+ "WorkEstRetire: Cycle data not available",
+ unlock_workest);
+
/* Retrieve/validate completed workload matching data */
psWorkloadMatchingData = psReturnData->psWorkloadMatchingData;
PVR_LOG_GOTO_IF_FALSE(psWorkloadMatchingData,
PVR_LOG(("WorkEstRetire: HASH_Insert failed"));
}
+#if defined(DEBUG)
+ /* Zero the current entry in the return data table.
+ * Helps detect invalid ReturnDataIndex values from the
+ * firmware before the hash table is corrupted. */
+ memset(psReturnData, 0, sizeof(WORKEST_RETURN_DATA));
+#endif
+
psWorkloadMatchingData->ui32HashArrayWO = (ui32HashArrayWO + 1) & WORKLOAD_HASH_WRAP_MASK;
OSLockRelease(psWorkloadMatchingData->psHashLock);
unlock_workest:
OSLockRelease(psDevInfo->hWorkEstLock);
- psWorkEstHostData->ui32WorkEstCCBReceived++;
+
+ PVR_ASSERT(psWorkEstHostData);
+ if (psWorkEstHostData)
+ {
+ psWorkEstHostData->ui32WorkEstCCBReceived++;
+ }
return PVRSRV_ERROR_INVALID_PARAMS;
}
-
-static void _WorkEstInit(PVRSRV_RGXDEV_INFO *psDevInfo,
+void _WorkEstInit(PVRSRV_RGXDEV_INFO *psDevInfo,
WORKLOAD_MATCHING_DATA *psWorkloadMatchingData,
HASH_FUNC *pfnWorkEstHashFunc,
HASH_KEY_COMP *pfnWorkEstHashCompare)
psWorkloadMatchingData->psHashTable = psWorkloadHashTable;
}
-static void _WorkEstDeInit(PVRSRV_RGXDEV_INFO *psDevInfo,
+void _WorkEstDeInit(PVRSRV_RGXDEV_INFO *psDevInfo,
WORKLOAD_MATCHING_DATA *psWorkloadMatchingData)
{
HASH_TABLE *psWorkloadHashTable;
void WorkEstCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo);
+void _WorkEstInit(PVRSRV_RGXDEV_INFO *psDevInfo,
+ WORKLOAD_MATCHING_DATA *psWorkloadMatchingData,
+ HASH_FUNC *pfnWorkEstHashFunc,
+ HASH_KEY_COMP *pfnWorkEstHashCompare);
+
+void _WorkEstDeInit(PVRSRV_RGXDEV_INFO *psDevInfo,
+ WORKLOAD_MATCHING_DATA *psWorkloadMatchingData);
+
+inline IMG_UINT32 _WorkEstDoHash(IMG_UINT32 ui32Input);
+
#endif /* RGXWORKEST_H */
+++ /dev/null
-/*************************************************************************/ /*!
-@File
-@Title RGX compute functionality
-@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
-@Description Header for the RGX compute functionality
-@License Dual MIT/GPLv2
-
-The contents of this file are subject to the MIT license as set out below.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-Alternatively, the contents of this file may be used under the terms of
-the GNU General Public License Version 2 ("GPL") in which case the provisions
-of GPL are applicable instead of those above.
-
-If you wish to allow use of your version of this file only under the terms of
-GPL, and not to allow others to use your version of this file under the terms
-of the MIT license, indicate your decision by deleting the provisions above
-and replace them with the notice and other provisions required by GPL as set
-out in the file called "GPL-COPYING" included in this distribution. If you do
-not delete the provisions above, a recipient may use your version of this file
-under the terms of either the MIT license or GPL.
-
-This License is also included in this distribution in the file called
-"MIT-COPYING".
-
-EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
-PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
-BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
-PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/ /**************************************************************************/
-
-#if !defined(RGXCOMPUTE_H)
-#define RGXCOMPUTE_H
-
-#include "devicemem.h"
-#include "device.h"
-#include "rgxfwutils.h"
-#include "rgx_fwif_resetframework.h"
-#include "rgxdebug.h"
-#include "pvr_notifier.h"
-
-#include "sync_server.h"
-#include "sync_internal.h"
-#include "connection_server.h"
-
-
-typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT;
-
-/*!
-*******************************************************************************
- @Function PVRSRVRGXCreateComputeContextKM
-
- @Description
-
-@Return PVRSRV_ERROR
-******************************************************************************/
-PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32Priority,
- IMG_UINT32 ui32FrameworkRegisterSize,
- IMG_PBYTE pbyFrameworkRegisters,
- IMG_HANDLE hMemCtxPrivData,
- IMG_UINT32 ui32StaticComputecontextStateSize,
- IMG_PBYTE pStaticComputecontextState,
- IMG_UINT32 ui32PackedCCBSizeU88,
- IMG_UINT32 ui32ContextFlags,
- IMG_UINT64 ui64RobustnessAddress,
- IMG_UINT32 ui32MaxDeadlineMS,
- RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext);
-
-/*!
-*******************************************************************************
- @Function PVRSRVRGXDestroyComputeContextKM
-
- @Description
- Server-side implementation of RGXDestroyComputeContext
-
- @Return PVRSRV_ERROR
-******************************************************************************/
-PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
-
-
-/*!
-*******************************************************************************
- @Function PVRSRVRGXKickCDMKM
-
- @Description
- Server-side implementation of RGXKickCDM
-
- @Return PVRSRV_ERROR
-******************************************************************************/
-PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
- IMG_UINT32 ui32ClientUpdateCount,
- SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock,
- IMG_UINT32 *paui32ClientUpdateSyncOffset,
- IMG_UINT32 *paui32ClientUpdateValue,
- PVRSRV_FENCE iCheckFence,
- PVRSRV_TIMELINE iUpdateTimeline,
- PVRSRV_FENCE *piUpdateFence,
- IMG_CHAR pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
- IMG_UINT32 ui32CmdSize,
- IMG_PBYTE pui8DMCmd,
- IMG_UINT32 ui32PDumpFlags,
- IMG_UINT32 ui32ExtJobRef,
- IMG_UINT32 ui32SyncPMRCount,
- IMG_UINT32 *paui32SyncPMRFlags,
- PMR **ppsSyncPMRs,
- IMG_UINT32 ui32NumWorkgroups,
- IMG_UINT32 ui32NumWorkitems,
- IMG_UINT64 ui64DeadlineInus);
-
-/*!
-*******************************************************************************
- @Function PVRSRVRGXFlushComputeDataKM
-
- @Description
- Server-side implementation of RGXFlushComputeData
-
- @Input psComputeContext - Compute context to flush
-
- @Return PVRSRV_ERROR
-******************************************************************************/
-PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
-
-/*!
-*******************************************************************************
-
- @Function PVRSRVRGXNotifyComputeWriteOffsetUpdateKM
- @Description Server-side implementation of RGXNotifyComputeWriteOffsetUpdate
-
- @Input psComputeContext - Compute context to flush
-
- @Return PVRSRV_ERROR
-
-******************************************************************************/
-PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
-
-PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDeviceNode,
- RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
- IMG_UINT32 ui32Priority);
-
-PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
- RGX_CONTEXT_PROPERTY eContextProperty,
- IMG_UINT64 ui64Input,
- IMG_UINT64 *pui64Output);
-
-PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 *ui32Error);
-
-/* Debug - Dump debug info of compute contexts on this device */
-void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
- DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
- void *pvDumpDebugFile,
- IMG_UINT32 ui32VerbLevel);
-
-/* Debug/Watchdog - check if client compute contexts are stalled */
-IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
-
-#endif /* RGXCOMPUTE_H */
#include "fwtrace_string.h"
#include "rgxfwimageutils.h"
#include "fwload.h"
+#include "debug_common.h"
#include "rgxta3d.h"
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
#include "rgxkicksync.h"
+#endif
#include "rgxcompute.h"
#include "rgxtransfer.h"
#include "rgxtdmtransfer.h"
PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion), \
PVRVERSION_UNPACK_MIN((x).ui32DDKVersion), \
(x).ui32DDKBuild, \
- ((x).ui32BuildOptions & OPTIONS_DEBUG_MASK) ? "debug":"release",\
+ ((x).ui32BuildOptions & OPTIONS_DEBUG_EN) ? "debug":"release", \
(x).ui32BuildOptions);
#define DD_SUMMARY_INDENT ""
#undef X
};
-typedef struct _IMG_FLAGS2DESC_
-{
- IMG_UINT32 uiFlag;
- const IMG_CHAR *pszLabel;
-} IMG_FLAGS2DESC;
-
static const IMG_FLAGS2DESC asCswOpts2Description[] =
{
{RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, " Fast CSW profile;"},
"DVG"
};
-#if !defined(SUPPORT_TRUSTED_DEVICE)
#if !defined(NO_HARDWARE)
/* Translation of MIPS exception encoding */
typedef struct _MIPS_EXCEPTION_ENCODING_
return apsMIPSExcCodes[ui32ExcCode].pszStr;
}
#endif
-#endif /* !defined(SUPPORT_TRUSTED_DEVICE) */
typedef struct _RGXMIPSFW_C0_DEBUG_TBL_ENTRY_
{
const IMG_CHAR * pszExplanation;
} RGXMIPSFW_C0_DEBUG_TBL_ENTRY;
-#if !defined(SUPPORT_TRUSTED_DEVICE)
#if !defined(NO_HARDWARE)
static const RGXMIPSFW_C0_DEBUG_TBL_ENTRY sMIPS_C0_DebugTable[] =
{
{ (IMG_UINT32)RGXMIPSFW_C0_DEBUG_DBD, "Debug exception occurred in branch delay slot" }
};
#endif
-#endif /* !defined(SUPPORT_TRUSTED_DEVICE) */
static const IMG_CHAR * const apszFwOsStateName[RGXFW_CONNECTION_FW_STATE_COUNT] =
{
};
#endif
+#if !defined(NO_HARDWARE)
static PVRSRV_ERROR
RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset,
IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask)
/* Read the value */
return RGXReadFWModuleAddr(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal);
}
+#endif /* !defined(NO_HARDWARE) */
#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
static PVRSRV_ERROR _ValidateWithFWModule(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
}
#endif
+#if !defined(NO_HARDWARE)
static PVRSRV_ERROR _ValidateFWImage(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
void *pvDumpDebugFile,
PVRSRV_RGXDEV_INFO *psDevInfo)
return PVRSRV_OK;
#endif
}
+#endif /* !defined(NO_HARDWARE) */
#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG)
PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo)
The function will query DevicememHistory for information about the faulting page, as well
as the page before and after.
+ @Input psDeviceNode - The device which this allocation search should be made on
@Input uiPID - The process ID to search for allocations belonging to
@Input sFaultDevVAddr - The device address to search for allocations at/before/after
@Input asQueryOut - Storage for the query results
@Return IMG_BOOL - IMG_TRUE if any results were found for this page fault
******************************************************************************/
-static IMG_BOOL _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr,
+static IMG_BOOL _GetDevicememHistoryData(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PID uiPID,
+ IMG_DEV_VIRTADDR sFaultDevVAddr,
DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT],
IMG_UINT32 ui32PageSizeBytes)
{
sQueryIn.uiPID = uiPID;
}
+ sQueryIn.psDevNode = psDeviceNode;
/* Query the DevicememHistory for all allocations in the previous page... */
sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - ui32PageSizeBytes;
if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING],
******************************************************************************/
static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ PVRSRV_DEVICE_NODE *psDevNode,
void *pvDumpDebugFile,
FAULT_INFO *psInfo,
const IMG_CHAR* pszIndent)
else
{
PVR_DUMPDEBUG_LOG("%s No matching Devmem History for fault address", pszIndent);
+ DevicememHistoryDumpRecordStats(psDevNode, pfnDumpDebugPrintf, pvDumpDebugFile);
+ PVR_DUMPDEBUG_LOG("%s Records Searched -"
+ " PP:%"IMG_UINT64_FMTSPEC
+ " FP:%"IMG_UINT64_FMTSPEC
+ " NP:%"IMG_UINT64_FMTSPEC,
+ pszIndent,
+ psInfo->asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING].ui64SearchCount,
+ psInfo->asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED].ui64SearchCount,
+ psInfo->asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_NEXT].ui64SearchCount);
}
}
else
{
/* get any DevicememHistory data for the faulting address */
- bHits = _GetDevicememHistoryData(sProcessInfo.uiPID,
+ bHits = _GetDevicememHistoryData(psDevInfo->psDeviceNode,
+ sProcessInfo.uiPID,
sFaultDevVAddr,
psInfo->asQueryOut,
ui32PageSizeBytes);
-#if !defined(SUPPORT_TRUSTED_DEVICE)
#if !defined(NO_HARDWARE)
static PVRSRV_ERROR _RGXMipsExtraDebug(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_MIPS_STATE *psMIPSState)
{
void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
IMG_UINT32 ui32RegRead;
IMG_UINT32 eError = PVRSRV_OK;
- IMG_UINT32 *pui32NMIMemoryPointer;
IMG_UINT32 volatile *pui32SyncFlag;
- IMG_DEVMEM_OFFSET_T uiNMIMemoryBootOffset;
-
- /* Map the FW data area to the kernel */
- eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc,
- (void **)&pui32NMIMemoryPointer);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to acquire NMI shared memory area (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- goto map_error_fail;
- }
-
- /* Calculate offset to the boot/NMI data page */
- uiNMIMemoryBootOffset = RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA));
-
- /* Jump to the NMI shared data area within the page above */
- pui32NMIMemoryPointer += uiNMIMemoryBootOffset + RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_NMI_SHARED_DATA_BASE);
/* Acquire the NMI operations lock */
OSLockAcquire(psDevInfo->hNMILock);
/* Make sure the synchronisation flag is set to 0 */
- pui32SyncFlag = &pui32NMIMemoryPointer[RGXMIPSFW_NMI_SYNC_FLAG_OFFSET];
+ pui32SyncFlag = &psDevInfo->psRGXFWIfSysInit->sMIPSState.ui32Sync;
*pui32SyncFlag = 0;
/* Readback performed as a part of memory barrier */
ui32RegRead = 0;
/* Copy state */
- OSDeviceMemCopy(psMIPSState, pui32NMIMemoryPointer + RGXMIPSFW_NMI_STATE_OFFSET, sizeof(*psMIPSState));
+ OSDeviceMemCopy(psMIPSState, &psDevInfo->psRGXFWIfSysInit->sMIPSState, sizeof(*psMIPSState));
--(psMIPSState->ui32ErrorEPC);
--(psMIPSState->ui32EPC);
fail:
/* Release the NMI operations lock */
OSLockRelease(psDevInfo->hNMILock);
- DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
-map_error_fail:
return eError;
}
const IMG_UINT32 ui32ExcCode = RGXMIPSFW_C0_CAUSE_EXCCODE(ui32Cause);
const IMG_CHAR * const pszException = _GetMIPSExcString(ui32ExcCode);
- if (ui32ErrorState == RGXMIPSFW_NMI_ERROR_STATE_SET &&
+ if (ui32ErrorState != 0 &&
pszException != NULL)
{
PVR_DUMPDEBUG_LOG(INDENT "Cause exception: %s", pszException);
}
}
-#endif /* !defined(NO_HARDWARE) */
-#endif /* !defined(SUPPORT_TRUSTED_DEVICE) */
static inline IMG_CHAR const *_GetRISCVException(IMG_UINT32 ui32Mcause)
{
return NULL;
}
}
-
-/*
- Appends flags strings to a null-terminated string buffer - each flag
- description string starts with a space.
-*/
-static void _Flags2Description(IMG_CHAR *psDesc,
- IMG_UINT32 ui32DescSize,
- const IMG_FLAGS2DESC *psConvTable,
- IMG_UINT32 ui32TableSize,
- IMG_UINT32 ui32Flags)
-{
- IMG_UINT32 ui32Idx;
-
- for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++)
- {
- if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag)
- {
- OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize);
- }
- }
-}
+#endif /* !defined(NO_HARDWARE) */
/*
Writes flags strings to an uninitialised buffer.
OSStringLCopy(psDesc, szCswLabel, ui32DescSize);
- _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags);
- _Flags2Description(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags);
+ DebugCommonFlagStrings(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags);
+ DebugCommonFlagStrings(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags);
}
static void _GetFwOsFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags)
OSStringLCopy(psDesc, szCswLabel, ui32DescSize);
- _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags);
+ DebugCommonFlagStrings(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags);
}
}
else
{
- _Flags2Description(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE,
+ DebugCommonFlagStrings(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE,
asDmState2Description, ARRAY_SIZE(asDmState2Description),
ui32HWRRecoveryFlags);
}
OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber);
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
{
- PVR_DUMPDEBUG_LOG(" %s Core = %u, PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
+ PVR_DUMPDEBUG_LOG(" %s Core = %u, PID = %u / %s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
aui8RecoveryNum,
psHWRInfo->ui32CoreID,
psHWRInfo->ui32PID,
+ psHWRInfo->szProcName,
psHWRInfo->ui32FrameNum,
psHWRInfo->ui32ActiveHWRTData,
psHWRInfo->ui32EventStatus,
}
else
{
- PVR_DUMPDEBUG_LOG(" %s PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
+ PVR_DUMPDEBUG_LOG(" %s PID = %u / %s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
aui8RecoveryNum,
psHWRInfo->ui32PID,
+ psHWRInfo->szProcName,
psHWRInfo->ui32FrameNum,
psHWRInfo->ui32ActiveHWRTData,
psHWRInfo->ui32EventStatus,
sFaultDevVAddr.uiAddr <<= 4; /* align shift */
ui32PC = (psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >>
RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT;
-#if defined(SUPPORT_TRUSTED_DEVICE)
- ui32PC = ui32PC - 1;
-#endif
bPMFault = (ui32PC <= 8);
sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress;
}
if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
{
- _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT);
+ _PrintFaultInfo(pfnDumpDebugPrintf, psDevInfo->psDeviceNode, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT);
}
OSLockRelease(psDevInfo->hDebugFaultInfoLock);
IMG_CHAR *pszState, *pszReason;
const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
- IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32DriverID;
const RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
/* space for the current clock speed and 3 previous */
RGXFWIF_TIME_CORR asTimeCorrs[4];
#if !defined(NO_HARDWARE)
/* Determine the type virtualisation support used */
-#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
if (!PVRSRV_VZ_MODE_IS(NATIVE))
{
#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with dynamic Fw heap allocation");
#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
}
-#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */
-#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1))
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1))
if (!PVRSRV_VZ_MODE_IS(NATIVE))
{
RGXFWIF_CONNECTION_FW_STATE eFwState = KM_GET_FW_CONNECTION(psDevInfo);
}
#endif
-#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
if (!PVRSRV_VZ_MODE_IS(NATIVE))
{
IMG_UINT32 ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo);
if (!PVRSRV_VZ_MODE_IS(GUEST))
{
IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE];
- IMG_BOOL bOsIsolationEnabled = IMG_FALSE;
+ IMG_BOOL bDriverIsolationEnabled = IMG_FALSE;
+ IMG_UINT32 ui32HostIsolationGroup;
if (psFwSysData == NULL)
{
sHwrStateDescription[0] = '\0';
- _Flags2Description(sHwrStateDescription, RGX_DEBUG_STR_SIZE,
+ DebugCommonFlagStrings(sHwrStateDescription, RGX_DEBUG_STR_SIZE,
asHwrState2Description, ARRAY_SIZE(asHwrState2Description),
psFwSysData->ui32HWRStateFlags);
PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x:%s)", pszState, pszReason, psFwSysData->ui32HWRStateFlags, sHwrStateDescription);
PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)",
- pszPowStateName[psFwSysData->ePowState],
- (psDevInfo->pvAPMISRData)?"enabled":"disabled",
- psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle,
- psDevInfo->ui32ActivePMReqDenied,
- psDevInfo->ui32ActivePMReqNonIdle,
- psDevInfo->ui32ActivePMReqRetry,
- psDevInfo->ui32ActivePMReqTotal -
- psDevInfo->ui32ActivePMReqOk -
- psDevInfo->ui32ActivePMReqDenied -
- psDevInfo->ui32ActivePMReqRetry -
- psDevInfo->ui32ActivePMReqNonIdle,
- psDevInfo->ui32ActivePMReqTotal,
- psRuntimeCfg->ui32ActivePMLatencyms);
+ (psFwSysData->ePowState < ARRAY_SIZE(pszPowStateName) ? pszPowStateName[psFwSysData->ePowState] : "???"),
+ (psDevInfo->pvAPMISRData)?"enabled":"disabled",
+ psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle,
+ psDevInfo->ui32ActivePMReqDenied,
+ psDevInfo->ui32ActivePMReqNonIdle,
+ psDevInfo->ui32ActivePMReqRetry,
+ psDevInfo->ui32ActivePMReqTotal -
+ psDevInfo->ui32ActivePMReqOk -
+ psDevInfo->ui32ActivePMReqDenied -
+ psDevInfo->ui32ActivePMReqRetry -
+ psDevInfo->ui32ActivePMReqNonIdle,
+ psDevInfo->ui32ActivePMReqTotal,
+ psRuntimeCfg->ui32ActivePMLatencyms);
ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges);
RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs));
asTimeCorrs[3].ui64OSTimeStamp);
}
- for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+ ui32HostIsolationGroup = psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[RGXFW_HOST_DRIVER_ID];
+
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
{
- RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32OSid];
+ RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32DriverID];
+ IMG_UINT32 ui32IsolationGroup = psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID];
+ IMG_BOOL bMTSEnabled = IMG_FALSE;
- IMG_BOOL bMTSEnabled = (RGX_IS_BRN_SUPPORTED(psDevInfo, 64502) || !RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ?
- IMG_TRUE : ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32OSid)) != 0);
+#if !defined(NO_HARDWARE)
+ if (bRGXPoweredON)
+ {
+ bMTSEnabled = (RGX_IS_BRN_SUPPORTED(psDevInfo, 64502) || !RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ?
+ IMG_TRUE : ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32DriverID)) != 0);
+ }
+#endif
- PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %d;%s %s", ui32OSid,
+ PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %u; Isolation group: %u; %s", ui32DriverID,
apszFwOsStateName[sFwRunFlags.bfOsState],
(sFwRunFlags.bfFLOk) ? "Ok" : "Not Ok",
(sFwRunFlags.bfFLGrowPending) ? "; Grow Request Pending" : "",
- psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid],
- (sFwRunFlags.bfIsolatedOS) ? " Isolated;" : "",
+ psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverPriority[ui32DriverID],
+ ui32IsolationGroup,
(bMTSEnabled) ? "MTS on;" : "MTS off;"
);
- bOsIsolationEnabled |= sFwRunFlags.bfIsolatedOS;
+ if (ui32IsolationGroup != ui32HostIsolationGroup)
+ {
+ bDriverIsolationEnabled = IMG_TRUE;
+ }
}
#if defined(PVR_ENABLE_PHR)
IMG_CHAR sPHRConfigDescription[RGX_DEBUG_STR_SIZE];
sPHRConfigDescription[0] = '\0';
- _Flags2Description(sPHRConfigDescription, RGX_DEBUG_STR_SIZE,
+ DebugCommonFlagStrings(sPHRConfigDescription, RGX_DEBUG_STR_SIZE,
asPHRConfig2Description, ARRAY_SIZE(asPHRConfig2Description),
BIT_ULL(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode));
}
}
- if (bOsIsolationEnabled)
+ if (bDriverIsolationEnabled)
{
PVR_DUMPDEBUG_LOG("RGX Hard Context Switch deadline: %u ms", psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS);
}
#endif
}
+#if !defined(NO_HARDWARE)
static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
void *pvDumpDebugFile,
PVRSRV_RGXDEV_INFO *psDevInfo)
X(RGX_CR_META_SP_MSLVIRQENABLE) \
X(RGX_CR_META_SP_MSLVIRQLEVEL)
- IMG_UINT32 ui32Idx, ui32RegIdx;
+ IMG_UINT32 ui32Idx;
IMG_UINT32 ui32RegVal;
IMG_UINT32 ui32RegAddr;
#undef X
};
- const IMG_UINT32 aui32Debug2RegAddr[] = {0xA28, 0x0A30, 0x0A38};
-
PVR_DUMPDEBUG_LOG("META Slave Port extra debug:");
- /* dump first set of Slave Port debug registers */
+ /* dump set of Slave Port debug registers */
for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++)
{
const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx];
ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
PVR_DUMPDEBUG_LOG(" * %s: 0x%8.8X", pszRegName, ui32RegVal);
}
-
- /* dump second set of Slave Port debug registers */
- for (ui32Idx = 0; ui32Idx < 4; ui32Idx++)
- {
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx);
- ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20);
- PVR_DUMPDEBUG_LOG(" * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal);
-
- }
-
- for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++)
- {
- ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx];
- for (ui32Idx = 0; ui32Idx < 2; ui32Idx++)
- {
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx);
- ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
- PVR_DUMPDEBUG_LOG(" * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal);
- }
- }
-
}
+#endif /* !defined(NO_HARDWARE) */
/*
* Array of all the Firmware Trace log IDs used to convert the trace data.
/* Print the decoded log for each thread... */
for (tid = 0; tid < RGXFW_THREAD_NUM; tid++)
{
- volatile IMG_UINT32 *pui32FWWrapCount = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum);
+ volatile IMG_UINT32 *pui32FWWrapCount = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32WrapCount);
volatile IMG_UINT32 *pui32FWTracePtr = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer);
IMG_UINT32 *pui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
IMG_UINT32 ui32HostWrapCount = *pui32FWWrapCount;
}
}
-#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
-void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
- void *pvDumpDebugFile,
- PVRSRV_RGXDEV_INFO *psDevInfo)
-{
- const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
-
- /* Print the power monitoring counters... */
- if (psFwSysData != NULL)
- {
- const IMG_UINT32 *pui32TraceBuf = psFwSysData->sPowerMonBuf.pui32TraceBuffer;
- IMG_UINT32 ui32TracePtr = 0; //psFwSysData->sPowerMonBuf.ui32TracePointer;
- IMG_UINT32 ui32PowerMonBufSizeInDWords = psFwSysData->ui32PowerMonBufSizeInDWords;
- IMG_UINT32 ui32Count = 0;
- IMG_UINT64 ui64Timestamp;
-
- if (pui32TraceBuf == NULL)
- {
- /* power monitoring buffer not yet allocated */
- return;
- }
-
- if (pui32TraceBuf[ui32TracePtr] != RGX_CR_TIMER)
- {
- PVR_DPF((PVR_DBG_WARNING, "Power monitoring data not available."));
- return;
- }
- ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords]) << 32 |
- (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords]);
-
- /* Update the trace pointer... */
- ui32TracePtr = (ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords;
- ui32Count = (ui32Count + 3);
-
- PVR_DPF((PVR_DBG_WARNING, "Dumping power monitoring buffer: CPUVAddr = %p, pointer = 0x%x, size = 0x%x",
- pui32TraceBuf,
- ui32TracePtr,
- ui32PowerMonBufSizeInDWords));
-
- while (ui32Count < ui32PowerMonBufSizeInDWords)
- {
- /* power monitoring data is (register, value) dword pairs */
- PVR_DUMPDEBUG_LOG("%" IMG_UINT64_FMTSPEC ":POWMON 0x%08x 0x%08x 0x%08x 0x%08x",
- ui64Timestamp,
- pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords],
- pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords],
- pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords],
- pui32TraceBuf[(ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords]);
-
- if (pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID ||
- pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID)
- {
- /* end of buffer */
- break;
- }
-
- /* Update the trace pointer... */
- ui32TracePtr = (ui32TracePtr + 4) % ui32PowerMonBufSizeInDWords;
- ui32Count = (ui32Count + 4);
- }
- }
-}
-#endif
-
static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState)
{
switch (eDevState)
{
- case PVRSRV_DEVICE_STATE_INIT:
+ case PVRSRV_DEVICE_STATE_CREATING:
+ return "Creating";
+ case PVRSRV_DEVICE_STATE_CREATED:
return "Initialising";
case PVRSRV_DEVICE_STATE_ACTIVE:
return "Active";
/* Helper macros to emit data */
#define REG32_FMTSPEC "%-30s: 0x%08X"
-#define REG64_FMTSPEC "%-30s: 0x%016" IMG_UINT64_FMTSPECx
+#define REG64_FMTSPEC "%-30s: 0x%016" IMG_UINT64_FMTSPECX
#define DDLOG32(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R));
#define DDLOG64(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R));
#define DDLOG32_DPX(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R));
#define DDLOG64_DPX(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R));
#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V);
-#if !defined(SUPPORT_TRUSTED_DEVICE)
#if !defined(NO_HARDWARE)
+static RGX_MIPS_REMAP_ENTRY RGXDecodeMIPSRemap(IMG_UINT64 ui64RemapReg)
+{
+ RGX_MIPS_REMAP_ENTRY sRemapInfo;
+
+ sRemapInfo.ui32RemapAddrIn =
+ (ui64RemapReg & ~RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK)
+ >> RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT;
+
+ sRemapInfo.ui32RemapAddrOut =
+ (ui64RemapReg & ~RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK)
+ >> RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT;
+
+ sRemapInfo.ui32RemapRegionSize =
+ (ui64RemapReg & ~RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK)
+ >> RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT;
+
+ return sRemapInfo;
+}
+
static void RGXDumpMIPSState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
void *pvDumpDebugFile,
PVRSRV_RGXDEV_INFO *psDevInfo)
{
IMG_UINT32 ui32Idx;
+ RGX_MIPS_REMAP_ENTRY *psMipsRemaps = NULL;
IMG_BOOL bCheckBRN63553WA =
RGX_IS_BRN_SUPPORTED(psDevInfo, 63553) &&
IMG_BOOL bUseRemapRanges = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32;
+ if (bUseRemapRanges)
+ {
+ psMipsRemaps = OSAllocMem(sizeof(RGX_MIPS_REMAP_ENTRY) * RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES);
+ PVR_LOG_RETURN_VOID_IF_FALSE(psMipsRemaps != NULL, "psMipsRemaps alloc failed.");
+ }
+
PVR_DUMPDEBUG_LOG("TLB :");
for (ui32Idx = 0; ui32Idx < ARRAY_SIZE(sMIPSState.asTLB); ui32Idx++)
{
- RGX_MIPS_REMAP_ENTRY *psRemapEntry0 = NULL;
- RGX_MIPS_REMAP_ENTRY *psRemapEntry1 = NULL;
-
if (bUseRemapRanges)
{
- psRemapEntry0 = &sMIPSState.asRemap[ui32Idx];
- psRemapEntry1 = &sMIPSState.asRemap[ui32Idx+16];
+ psMipsRemaps[ui32Idx] =
+ RGXDecodeMIPSRemap(sMIPSState.aui64Remap[ui32Idx]);
+
+ psMipsRemaps[ui32Idx+RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] =
+ RGXDecodeMIPSRemap(sMIPSState.aui64Remap[ui32Idx+RGXMIPSFW_NUMBER_OF_TLB_ENTRIES]);
}
_RGXMipsDumpTLBEntry(pfnDumpDebugPrintf,
pvDumpDebugFile,
&sMIPSState.asTLB[ui32Idx],
- psRemapEntry0,
- psRemapEntry1,
+ (bUseRemapRanges) ? &psMipsRemaps[ui32Idx] : NULL,
+ (bUseRemapRanges) ? &psMipsRemaps[ui32Idx+RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL,
ui32Idx);
if (bCheckBRN63553WA)
_CheckMipsTLBDuplicatePAs(pfnDumpDebugPrintf,
pvDumpDebugFile,
sMIPSState.asTLB,
- bUseRemapRanges ? sMIPSState.asRemap : NULL);
+ bUseRemapRanges ? psMipsRemaps : NULL);
if (bUseRemapRanges)
{
sMIPSState.ui32UnmappedAddress);
}
}
+
+ if (psMipsRemaps != NULL)
+ {
+ OSFreeMem(psMipsRemaps);
+ }
}
/* Check FW code corruption in case of known errors */
}
PVR_DUMPDEBUG_LOG("--------------------------------");
}
-#endif
-#endif /* !defined(SUPPORT_TRUSTED_DEVICE) */
static PVRSRV_ERROR RGXDumpRISCVState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
void *pvDumpDebugFile,
return eError;
}
+#endif /* !defined(NO_HARDWARE) */
PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
void *pvDumpDebugFile,
PVRSRV_RGXDEV_INFO *psDevInfo)
{
+#if defined(NO_HARDWARE)
+ PVR_DUMPDEBUG_LOG("------[ RGX registers ]------");
+ PVR_DUMPDEBUG_LOG("(Not supported for NO_HARDWARE builds)");
+
+ return PVRSRV_OK;
+#else /* !defined(NO_HARDWARE) */
IMG_UINT32 ui32Meta = (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0;
IMG_UINT32 ui32TACycles, ui323DCycles, ui32TAOr3DCycles, ui32TAAnd3DCycles;
IMG_UINT32 ui32RegVal;
DDLOG64(SLC_STATUS2);
}
- DDLOG32(SLC_CTRL_BYPASS);
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+ {
+ DDLOG64(SLC_CTRL_BYPASS);
+ }
+ else
+ {
+ DDLOG32(SLC_CTRL_BYPASS);
+ }
DDLOG64(SLC_CTRL_MISC);
}
else
DDLOG64(MIPS_WRAPPER_CONFIG);
DDLOG32(MIPS_EXCEPTION_STATUS);
-#if defined(SUPPORT_TRUSTED_DEVICE)
- PVR_DUMPDEBUG_LOG("MIPS extra debug not available with SUPPORT_TRUSTED_DEVICE.");
-#elif !defined(NO_HARDWARE)
RGXDumpMIPSState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
-#endif
}
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR))
_RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
return eError;
+#endif /* defined(NO_HARDWARE) */
}
#undef REG32_FMTSPEC
}
if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) ||
- (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED)))
+ (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_DRIVERS_SUPPORTED)))
{
PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)",
- (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount);
+ (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_DRIVERS_SUPPORTED), ui8FwOsCount);
}
PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d Start ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID);
PVR_DUMPDEBUG_LOG("------[ RGX Info ]------");
PVR_DUMPDEBUG_LOG("Device Node (Info): %p (%p)", psDevInfo->psDeviceNode, psDevInfo);
+ DevicememHistoryDumpRecordStats(psDevInfo->psDeviceNode, pfnDumpDebugPrintf, pvDumpDebugFile);
PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B,
psDevInfo->sDevFeatureCfg.ui32V,
psDevInfo->sDevFeatureCfg.ui32N,
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Dump out the Workload estimation CCB. */
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
+ /* Dump out the Workload estimation CCB. */
const RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl;
if (psWorkEstCCBCtl != NULL)
{
PVR_DUMPDEBUG_LOG(MSG_IRQ_CNT_TYPE "%u: FW IRQ count = %u", ui32idx, ui32IrqCnt);
#if defined(RGX_FW_IRQ_OS_COUNTERS)
- if (ui32idx == RGXFW_HOST_OS)
+ if (ui32idx == RGXFW_HOST_DRIVER_ID)
#endif
{
PVR_DUMPDEBUG_LOG("Last sampled IRQ count in LISR = %u", psDevInfo->aui32SampleIRQCount[ui32idx]);
}
else
{
- PVR_DUMPDEBUG_LOG("------[ Stalled FWCtxs ]------");
+ PVR_DUMPDEBUG_LOG("------[ FWCtxs Next CMD ]------");
}
DumpTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
-
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
-
+#endif
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE))
{
DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
*/
#if defined(RGX_FW_IRQ_OS_COUNTERS)
-#define for_each_irq_cnt(ui32idx) \
- for (ui32idx = 0; ui32idx < RGX_NUM_OS_SUPPORTED; ui32idx++)
+#define for_each_irq_cnt(ui32idx) FOREACH_SUPPORTED_DRIVER(ui32idx)
#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \
do { \
- extern const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OS]; \
+ extern const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OSIDS]; \
ui32Dest = PVRSRV_VZ_MODE_IS(GUEST) ? 0 : OSReadHWReg32((psRgxDevInfo)->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[ui32idx]); \
} while (false)
" %u FW IRQ count = %u", ui32idx, ui32IrqCnt));
#if defined(RGX_FW_IRQ_OS_COUNTERS)
- if (ui32idx == RGXFW_HOST_OS)
+ if (ui32idx == RGXFW_HOST_DRIVER_ID)
#endif
{
PVR_DPF((DBGPRIV_VERBOSE, "Last sampled IRQ count in LISR = %u",
void *pvDumpDebugFile,
PVRSRV_RGXDEV_INFO *psDevInfo);
-#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
-void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
- void *pvDumpDebugFile,
- PVRSRV_RGXDEV_INFO *psDevInfo);
-#endif
-
#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG)
/*!
*******************************************************************************
******************************************************************************
* Global flags for driver validation
*****************************************************************************/
-#define RGX_VAL_KZ_SIG_CHECK_NOERR_EN (0x10U) /*!< Enable KZ signature check. Signatures must match */
-#define RGX_VAL_KZ_SIG_CHECK_ERR_EN (0x20U) /*!< Enable KZ signature check. Signatures must not match */
-#define RGX_VAL_SIG_CHECK_ERR_EN (0U) /*!< Not supported on Rogue cores */
+#define RGX_VAL_FBDC_SIG_CHECK_NOERR_EN (0U) /*!< Not supported on Rogue cores */
+#define RGX_VAL_FBDC_SIG_CHECK_ERR_EN (0U) /*!< Not supported on Rogue cores */
+#define RGX_VAL_WGP_SIG_CHECK_NOERR_EN (0x10U) /*!< Enable WGP signature check. Signatures must match */
+#define RGX_VAL_WGP_SIG_CHECK_ERR_EN (0x20U) /*!< Enable WGP signature check. Signatures must not match */
+#define RGX_VAL_TRP_SIG_CHECK_NOERR_EN (0U) /*!< Not supported on Rogue cores */
+#define RGX_VAL_TRP_SIG_CHECK_ERR_EN (0U) /*!< Not supported on Rogue cores */
+
typedef struct _GPU_FREQ_TRACKING_DATA_
{
IMG_UINT64 ui64GpuStatBlocked; /* GPU blocked statistic */
IMG_UINT64 ui64GpuStatIdle; /* GPU idle statistic */
IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */
+
+ IMG_UINT64 aaui64DMOSStatActive[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS active statistic */
+ IMG_UINT64 aaui64DMOSStatBlocked[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS blocked statistic */
+ IMG_UINT64 aaui64DMOSStatIdle[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS idle statistic */
+ IMG_UINT64 aaui64DMOSStatCumulative[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS sum of active/blocked/idle stats */
+
IMG_UINT64 ui64TimeStamp; /* Timestamp of the most recent sample of the GPU stats */
} RGXFWIF_GPU_UTIL_STATS;
RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl; /*!< structure containing trace control data and actual trace buffer */
DEVMEM_MEMDESC *psRGXFWIfFwSysDataMemDesc; /*!< memdesc of the firmware-shared system data structure */
- RGXFWIF_SYSDATA *psRGXFWIfFwSysData; /*!< structure containing trace control data and actual trace buffer */
+ RGXFWIF_SYSDATA *psRGXFWIfFwSysData; /*!< structure containing km-firmware shared system data */
DEVMEM_MEMDESC *psRGXFWIfFwOsDataMemDesc; /*!< memdesc of the firmware-shared os structure */
- RGXFWIF_OSDATA *psRGXFWIfFwOsData; /*!< structure containing trace control data and actual trace buffer */
+ RGXFWIF_OSDATA *psRGXFWIfFwOsData; /*!< structure containing km-firmware shared os data */
#if defined(SUPPORT_TBI_INTERFACE)
DEVMEM_MEMDESC *psRGXFWIfTBIBufferMemDesc; /*!< memdesc of actual FW TBI buffer */
DEVMEM_MEMDESC *psRGXFWIfRuntimeCfgMemDesc;
RGXFWIF_RUNTIME_CFG *psRGXFWIfRuntimeCfg;
- /* Additional guest firmware memory context info */
- DEVMEM_HEAP *psGuestFirmwareRawHeap[RGX_NUM_OS_SUPPORTED];
- DEVMEM_MEMDESC *psGuestFirmwareRawMemDesc[RGX_NUM_OS_SUPPORTED];
+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY)
+ DEVMEM_MEMDESC *psRGXFWIfActiveContextBufDesc;
+ RGXFWIF_ACTIVE_CONTEXT_BUF_DATA *psRGXFWIfActiveContextBuf;
+#endif
+
+ /* Premapped firmware memory context info */
+ DEVMEM_HEAP *psPremappedFwRawHeap[RGX_NUM_DRIVERS_SUPPORTED];
+ DEVMEM_MEMDESC *psPremappedFwRawMemDesc[RGX_NUM_DRIVERS_SUPPORTED];
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
/* Array to store data needed for workload estimation when a workload
* and loss/freeing of FW & Host resources while in
* use in another thread e.g. MSIR. */
- IMG_UINT64 ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */
- IMG_HANDLE hHWPerfStream; /*! TL Stream buffer (L2) for firmware event stream */
- IMG_UINT32 ui32L2BufMaxPacketSize;/*!< Max allowed packet size in FW HWPerf TL (L2) buffer */
+ IMG_UINT64 ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */
+ IMG_HANDLE hHWPerfStream; /*! TL Stream buffer (L2) for firmware event stream */
+ IMG_UINT32 ui32L2BufMaxPacketSize; /*!< Max allowed packet size in FW HWPerf TL (L2) buffer */
IMG_BOOL bSuspendHWPerfL2DataCopy; /*! Flag to indicate if copying HWPerf data is suspended */
+ IMG_BOOL bHWPerfHasRun; /*! Flag to indicate that the HWPerf was enabled. Used by FTrace
+ to determine if HWPerf has been enabled outside of FTrace module. */
IMG_UINT32 ui32HWPerfHostFilter; /*! Event filter for HWPerfHost stream (settable by AppHint) */
POS_LOCK hLockHWPerfHostStream; /*! Lock guarding access to HWPerfHost stream from multiple threads */
IMG_UINT32 ui32ECCRAMErrInjInterval;
#endif
- IMG_UINT32 ui32Log2Non4KPgSize; /* Page size of Non4k heap in log2 form */
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+ PMR *psGenHeapSecMem; /*!< An allocation of secure memory mapped to
+ the general devmem heap. The allocation is
+ created and mapped at driver init. It's used for
+ various purposes. See rgx_fwif_km.h for all use cases. */
+#endif
} PVRSRV_RGXDEV_INFO;
#include "rgxmmudefs_km.h"
#include "rgxmipsmmuinit.h"
#include "rgxta3d.h"
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
#include "rgxkicksync.h"
+#endif
#include "rgxutils.h"
#include "rgxtimecorr.h"
#include "sync_internal.h"
#include "sync_checkpoint_external.h"
#include "tlstream.h"
#include "devicemem_server_utils.h"
-#include "htbuffer.h"
+#include "htbserver.h"
#include "rgx_bvnc_defs_km.h"
#include "info_page.h"
#endif
#if defined(RGX_FW_IRQ_OS_COUNTERS)
-const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OS] = {IRQ_COUNTER_STORAGE_REGS};
+const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OSIDS] = {IRQ_COUNTER_STORAGE_REGS};
#endif
/*
*/
#if defined(FPGA) || defined(EMULATOR) || defined(VIRTUAL_PLATFORM) || defined(PDUMP)
#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (480000)
-#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (1000000)
+#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (10800000)
#else
#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (40000)
-#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (90000)
+#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (600000)
#endif
/* Workload Estimation Firmware CCB length */
static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value)
{
- /* ensure memory is flushed before kicking MTS */
+ /* Ensure any uncached/WC memory writes are flushed from CPU write buffers
+ * before kicking MTS.
+ */
OSWriteMemoryBarrier(NULL);
+ /* This should *NOT* happen. Try to trace what caused this and avoid a NPE
+ * with the Write/Read at the foot of the function.
+ */
+ PVR_ASSERT((psDevInfo != NULL));
+ if (psDevInfo == NULL)
+ {
+ return;
+ }
+
+ /* Kick MTS to wake firmware. */
OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value);
- /* ensure the MTS kick goes through before continuing */
-#if !defined(NO_HARDWARE) && !defined(INTEGRITY_OS)
- OSWriteMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + RGX_CR_MTS_SCHEDULE);
-#else
- OSWriteMemoryBarrier(NULL);
-#endif
+ /* Uncached device/IO mapping will ensure MTS kick leaves CPU, read back
+ * will ensure it reaches the regbank via inter-connects (AXI, PCIe etc)
+ * before continuing.
+ */
+ (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE);
}
/*************************************************************************/ /*!
IMG_UINT32 ui32CCBAllocSizeLog2,
IMG_UINT32 ui32CCBMaxAllocSizeLog2,
IMG_UINT32 ui32ContextFlags,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32MaxDeadlineMS,
IMG_UINT64 ui64RobustnessAddress,
RGX_COMMON_CONTEXT_INFO *psInfo,
RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext;
IMG_UINT32 ui32FWCommonContextOffset;
IMG_UINT8 *pui8Ptr;
- IMG_INT32 i32Priority = (IMG_INT32)ui32Priority;
PVRSRV_ERROR eError;
/*
psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset);
psFWCommonContext->eDM = eDM;
+ BITMASK_SET(psFWCommonContext->ui32MiscFlags, RGXFWIF_CONTEXT_MISC_FLAGS_HAS_DEFER_COUNT);
/* Set the firmware CCB device addresses in the firmware common context */
eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCB,
/* Store a references to Server Common Context and PID for notifications back from the FW. */
psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID;
psFWCommonContext->ui32PID = OSGetCurrentClientProcessIDKM();
+ OSCachedMemCopy(psFWCommonContext->szProcName, psConnection->pszProcName, RGXFW_PROCESS_NAME_LEN);
/* Set the firmware GPU context state buffer */
psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc;
trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(),
aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ psDeviceNode->sDevId.ui32InternalID,
ui32FWAddr);
}
#endif
return PVRSRV_ERROR_INVALID_PARAMS;
}
+PRGXFWIF_FWCOMMONCONTEXT RGXGetFWCommonContextAddrFromServerCommonCtx(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DLLIST_NODE *psNode)
+{
+ RGX_SERVER_COMMON_CONTEXT *psThisContext =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+ return FWCommonContextGetFWAddress(psThisContext);
+}
+
PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
IMG_UINT32 ui32ContextFlags)
{
OSFreeKMAppHintState(pvAppHintState);
pvAppHintState = NULL;
+ if (psTraceBufCtl->ui32TraceBufSizeInDWords < RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS ||
+ psTraceBufCtl->ui32TraceBufSizeInDWords > RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Requested trace buffer size (%u) out of its minimum (%u) & maximum (%u) range. Exiting error.",
+ __func__,
+ psTraceBufCtl->ui32TraceBufSizeInDWords,
+ RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS,
+ RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS));
+ eError = PVRSRV_ERROR_OUT_OF_RANGE;
+ goto exit_error;
+ }
+
uiTraceBufSizeInBytes = psTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32);
for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++)
fail:
RGXTraceBufferDeinit(psDevInfo);
+exit_error:
return eError;
}
"( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN);
PDUMPCOMMENT(psDevInfo->psDeviceNode,
"( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN);
+#if defined(SUPPORT_VALIDATION)
PDUMPCOMMENT(psDevInfo->psDeviceNode,
"( Enable generic DM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN);
+#endif /* defined(SUPPORT_VALIDATION) */
PDUMPCOMMENT(psDevInfo->psDeviceNode,
"( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST);
PDUMPCOMMENT(psDevInfo->psDeviceNode,
PDUMPCOMMENT(psDevInfo->psDeviceNode,
"( SPU power state mask change Enable: 0x%08x)", RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- PDUMPCOMMENT(psDevInfo->psDeviceNode,
- "( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ PDUMPCOMMENT(psDevInfo->psDeviceNode,
+ "( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST);
#if defined(SUPPORT_PDVFS)
- PDUMPCOMMENT(psDevInfo->psDeviceNode,
- "( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS);
+ PDUMPCOMMENT(psDevInfo->psDeviceNode,
+ "( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS);
#endif /* defined(SUPPORT_PDVFS) */
+ }
#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */
PDUMPCOMMENT(psDevInfo->psDeviceNode,
"( CDM Arbitration Mode (task demand=b'01, round robin=b'10): 0x%08x)", RGXFWIF_INICFG_CDM_ARBITRATION_MASK);
PDUMP_FLAGS_CONTINUOUS);
PDUMPCOMMENT(psDevInfo->psDeviceNode,
- "( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))",
+ "( PID filter PID/DriverID list (Up to %u entries. Terminate with a zero PID))",
RGXFWIF_PID_FILTER_MAX_NUM_PIDS);
{
IMG_UINT32 i;
const IMG_DEVMEM_OFFSET_T uiPIDOff
= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].uiPID);
- const IMG_DEVMEM_OFFSET_T uiOSIDOff
- = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32OSID);
+ const IMG_DEVMEM_OFFSET_T uiDriverIDOff
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32DriverID);
PDUMPCOMMENT(psDevInfo->psDeviceNode,
- "(PID and OSID pair %u)", i);
+ "(PID and DriverID pair %u)", i);
PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID)");
DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc,
0,
PDUMP_FLAGS_CONTINUOUS);
- PDUMPCOMMENT(psDevInfo->psDeviceNode, "(OSID)");
+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "(DriverID)");
DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc,
- uiOSIDOff,
+ uiDriverIDOff,
0,
PDUMP_FLAGS_CONTINUOUS);
}
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST;
-#if defined(SUPPORT_PDVFS)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo;
- IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg;
+ ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST;
+#if defined(SUPPORT_PDVFS)
+ {
+ RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo;
+ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg;
- /* Pro-active DVFS depends on Workload Estimation */
- psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo;
- psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
- PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table");
+ /* Pro-active DVFS depends on Workload Estimation */
+ psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo;
+ psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+ PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table");
- if (psDVFSDeviceCfg->pasOPPTable != NULL)
- {
- if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))
+ if (psDVFSDeviceCfg->pasOPPTable != NULL)
{
- PVR_DPF((PVR_DBG_ERROR,
- "%s: OPP Table too large: Size = %u, Maximum size = %lu",
- __func__,
- psDVFSDeviceCfg->ui32OPPTableSize,
- (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))));
- eError = PVRSRV_ERROR_INVALID_PARAMS;
- goto fail;
- }
+ if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: OPP Table too large: Size = %u, Maximum size = %lu",
+ __func__,
+ psDVFSDeviceCfg->ui32OPPTableSize,
+ (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail;
+ }
- OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues,
- psDVFSDeviceCfg->pasOPPTable,
- sizeof(psPDVFSOPPInfo->asOPPValues));
+ OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues,
+ psDVFSDeviceCfg->pasOPPTable,
+ sizeof(psPDVFSOPPInfo->asOPPValues));
- psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1;
+ psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1;
- ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS;
+ ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS;
+ }
}
- }
#endif /* defined(SUPPORT_PDVFS) */
+ }
#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */
/* FW trace control structure */
switch (ui32EnablePollOnChecksumErrorStatus)
{
case 0: /* no checking */ break;
- case 3: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_NOERR_EN; break;
- case 4: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_ERR_EN; break;
+ case 3: psDevInfo->ui32ValidationFlags |= RGX_VAL_WGP_SIG_CHECK_NOERR_EN; break;
+ case 4: psDevInfo->ui32ValidationFlags |= RGX_VAL_WGP_SIG_CHECK_ERR_EN; break;
default:
PVR_DPF((PVR_DBG_WARNING, "Unsupported value in EnablePollOnChecksumErrorStatus (%d)", ui32EnablePollOnChecksumErrorStatus));
break;
psFwSysInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer;
#endif /* defined(SUPPORT_TBI_INTERFACE) */
- /* Allocate shared buffer for GPU utilisation */
+ /* Allocate shared buffer for GPU utilisation.
+ * Enable FIRMWARE_CACHED to reduce read latency in the FW.
+ * The FW flushes the cache after any writes.
+ */
eError = RGXSetupFwAllocation(psDevInfo,
- RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS &
+ (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) &
RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
sizeof(RGXFWIF_GPU_UTIL_FWCB),
"FwGPUUtilisationBuffer",
}
eError = RGXSetupFwAllocation(psDevInfo,
- RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS &
+ RGX_FWCOMCTX_ALLOCFLAGS &
RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
ui32HWPerfCountersDataSize,
"FwHWPerfControlStructure",
#if defined(SUPPORT_SECURITY_VALIDATION)
{
PVRSRV_MEMALLOCFLAGS_T uiFlags = RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS;
- PVRSRV_SET_PHYS_HEAP_HINT(GPU_SECURE, uiFlags);
+ PVRSRV_SET_PHYS_HEAP_HINT(FW_PRIV_DATA, uiFlags);
PDUMPCOMMENT(psDeviceNode, "Allocate non-secure buffer for security validation test");
eError = DevmemFwAllocateExportable(psDeviceNode,
if (!psDeviceNode->bAutoVzFwIsUp)
{
- IMG_UINT32 ui32OSIndex;
+ IMG_UINT32 ui32DriverID;
RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
if (PVRSRV_VZ_MODE_IS(NATIVE))
{
- psRuntimeCfg->aui32OSidPriority[RGXFW_HOST_OS] = 0;
+ psRuntimeCfg->aui32DriverPriority[RGXFW_HOST_DRIVER_ID] = 0;
+ psRuntimeCfg->aui32DriverIsolationGroup[RGXFW_HOST_DRIVER_ID] = RGX_DRIVERID_0_DEFAULT_ISOLATION_GROUP;
}
else
{
- for (ui32OSIndex = 0; ui32OSIndex < RGX_NUM_OS_SUPPORTED; ui32OSIndex++)
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
{
- const IMG_INT32 ai32DefaultOsPriority[RGXFW_MAX_NUM_OS] =
- {RGX_OSID_0_DEFAULT_PRIORITY, RGX_OSID_1_DEFAULT_PRIORITY, RGX_OSID_2_DEFAULT_PRIORITY, RGX_OSID_3_DEFAULT_PRIORITY,
- RGX_OSID_4_DEFAULT_PRIORITY, RGX_OSID_5_DEFAULT_PRIORITY, RGX_OSID_6_DEFAULT_PRIORITY, RGX_OSID_7_DEFAULT_PRIORITY};
+ const IMG_INT32 ai32DefaultPriorities[RGXFW_MAX_NUM_OSIDS] =
+ {RGX_DRIVERID_0_DEFAULT_PRIORITY,
+#if (RGXFW_MAX_NUM_OSIDS > 1)
+ RGX_DRIVERID_1_DEFAULT_PRIORITY,
+#if (RGXFW_MAX_NUM_OSIDS > 2)
+ RGX_DRIVERID_2_DEFAULT_PRIORITY,
+ RGX_DRIVERID_3_DEFAULT_PRIORITY,
+ RGX_DRIVERID_4_DEFAULT_PRIORITY,
+ RGX_DRIVERID_5_DEFAULT_PRIORITY,
+ RGX_DRIVERID_6_DEFAULT_PRIORITY,
+ RGX_DRIVERID_7_DEFAULT_PRIORITY
+#if (RGXFW_MAX_NUM_OSIDS > 8)
+#error "Support for more than 8 OSIDs not implemented."
+#endif
+#endif
+#endif
+ };
+
+ const IMG_UINT32 aui32DefaultIsolationGroups[RGXFW_MAX_NUM_OSIDS] =
+ {RGX_DRIVERID_0_DEFAULT_ISOLATION_GROUP,
+#if (RGXFW_MAX_NUM_OSIDS > 1)
+ RGX_DRIVERID_1_DEFAULT_ISOLATION_GROUP,
+#if (RGXFW_MAX_NUM_OSIDS > 2)
+ RGX_DRIVERID_2_DEFAULT_ISOLATION_GROUP,
+ RGX_DRIVERID_3_DEFAULT_ISOLATION_GROUP,
+ RGX_DRIVERID_4_DEFAULT_ISOLATION_GROUP,
+ RGX_DRIVERID_5_DEFAULT_ISOLATION_GROUP,
+ RGX_DRIVERID_6_DEFAULT_ISOLATION_GROUP,
+ RGX_DRIVERID_7_DEFAULT_ISOLATION_GROUP,
+#endif
+#endif
+ };
/* Set up initial priorities between different OSes */
- psRuntimeCfg->aui32OSidPriority[ui32OSIndex] = (IMG_UINT32)ai32DefaultOsPriority[ui32OSIndex];
+ psRuntimeCfg->aui32DriverPriority[ui32DriverID] = (IMG_UINT32)ai32DefaultPriorities[ui32DriverID];
+ psRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID] = aui32DefaultIsolationGroups[ui32DriverID];
}
}
psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlagsExt = ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_ALL;
/* Initialise GPU utilisation buffer */
- psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord =
- RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE);
+ {
+ IMG_UINT64 ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(), RGXFWIF_GPU_UTIL_STATE_IDLE);
+ RGXFWIF_DM eDM;
+
+ psDevInfo->psRGXFWIfGpuUtilFWCb->ui64GpuLastWord = ui64LastWord;
+
+ for (eDM = 0; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++)
+ {
+ IMG_UINT32 ui32DriverID;
+
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ psDevInfo->psRGXFWIfGpuUtilFWCb->aaui64DMOSLastWord[eDM][ui32DriverID] = ui64LastWord;
+ }
+ }
+ }
/* init HWPERF data */
psDevInfo->psRGXFWIfFwSysData->ui32HWPerfRIdx = 0;
psDevInfo->psRGXFWIfFwSysData->ui32FirstDropOrdinal = 0;
psDevInfo->psRGXFWIfFwSysData->ui32LastDropOrdinal = 0;
+ // flush write buffers for psRGXFWIfFwSysData
+ OSWriteMemoryBarrier(psDevInfo->psRGXFWIfFwSysData);
+
/*Send through the BVNC Feature Flags*/
eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, &psFwSysInitScratch->sBvncKmFeatureFlags);
PVR_LOG_GOTO_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags", fail);
sFwOsInitScratch.ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit;
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Set up Workload Estimation firmware CCB */
- eError = RGXSetupCCB(psDevInfo,
- &psDevInfo->psWorkEstFirmwareCCBCtl,
- &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
- &psDevInfo->psWorkEstFirmwareCCB,
- &psDevInfo->psWorkEstFirmwareCCBMemDesc,
- &sFwOsInitScratch.psWorkEstFirmwareCCBCtl,
- &sFwOsInitScratch.psWorkEstFirmwareCCB,
- RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2,
- sizeof(RGXFWIF_WORKEST_FWCCB_CMD),
- RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
- "FwWEstCCB");
- PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Set up Workload Estimation firmware CCB */
+ eError = RGXSetupCCB(psDevInfo,
+ &psDevInfo->psWorkEstFirmwareCCBCtl,
+ &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+ &psDevInfo->psWorkEstFirmwareCCB,
+ &psDevInfo->psWorkEstFirmwareCCBMemDesc,
+ &sFwOsInitScratch.psWorkEstFirmwareCCBCtl,
+ &sFwOsInitScratch.psWorkEstFirmwareCCB,
+ RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2,
+ sizeof(RGXFWIF_WORKEST_FWCCB_CMD),
+ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
+ "FwWEstCCB");
+ PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail);
+ }
#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */
/* Initialise the compatibility check data */
psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL;
}
#endif
+
+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY)
+ if (psDevInfo->psRGXFWIfActiveContextBufDesc)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfActiveContextBufDesc);
+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfActiveContextBufDesc);
+ psDevInfo->psRGXFWIfActiveContextBufDesc = NULL;
+ }
+#endif
}
/*!
&psDevInfo->psFirmwareCCBMemDesc);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- RGXFreeCCB(psDevInfo,
- &psDevInfo->psWorkEstFirmwareCCBCtl,
- &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
- &psDevInfo->psWorkEstFirmwareCCB,
- &psDevInfo->psWorkEstFirmwareCCBMemDesc);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ RGXFreeCCB(psDevInfo,
+ &psDevInfo->psWorkEstFirmwareCCBCtl,
+ &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+ &psDevInfo->psWorkEstFirmwareCCB,
+ &psDevInfo->psWorkEstFirmwareCCBMemDesc);
+ }
#endif
if (psDevInfo->psPowSyncPrim != NULL)
{
return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CORECLKSPEEDCHANGE_DATA);
}
- case RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE:
+ case RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE:
case RGXFWIF_KCCB_CMD_WDG_CFG:
case RGXFWIF_KCCB_CMD_PHR_CFG:
case RGXFWIF_KCCB_CMD_HEALTH_CHECK:
IMG_BOOL bGPUHasWorkWaiting;
bGPUHasWorkWaiting =
- (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED);
+ (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64GpuLastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED);
if (!bGPUHasWorkWaiting)
{
RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl;
IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB;
-#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
PVR_LOG_RETURN_VOID_IF_FALSE(PVRSRV_VZ_MODE_IS(NATIVE) ||
(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) &&
KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)),
{
case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS:
{
- PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp);
+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,i32AdjustmentValue,0,0,0,0,0,pidTmp);
break;
}
case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY:
{
- PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp);
+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,i32AdjustmentValue,0,0,0,0,pidTmp);
break;
}
case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES:
{
- PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp);
+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,i32AdjustmentValue,0,0,0,pidTmp);
break;
}
case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES:
{
- PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp);
+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,i32AdjustmentValue,0,0,pidTmp);
break;
}
case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES:
{
- PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp);
+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,i32AdjustmentValue,0,pidTmp);
break;
}
case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES:
{
- PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp);
+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,0,i32AdjustmentValue,pidTmp);
break;
}
}
PVRSRV_DEV_POWER_STATE ePowerState;
RGXFWIF_KCCB_CMD sStateFlagCmd = { 0 };
PVRSRV_DEVICE_NODE *psDeviceNode;
- RGXFWIF_SYSDATA *psSysData;
+ RGXFWIF_SYSDATA *psFwSysData;
IMG_UINT32 ui32kCCBCommandSlot;
IMG_BOOL bWaitForFwUpdate = IMG_FALSE;
return PVRSRV_ERROR_INVALID_PARAMS;
}
psDeviceNode = psDevInfo->psDeviceNode;
- psSysData = psDevInfo->psRGXFWIfFwSysData;
+ psFwSysData = psDevInfo->psRGXFWIfFwSysData;
- if (NULL == psSysData)
+ if (NULL == psFwSysData)
{
PVR_DPF((PVR_DBG_ERROR,
"%s: Fw Sys Config is not mapped into CPU space", __func__));
ui32Config = ui32Config & RGXFWIF_INICFG_ALL;
if (bSetNotClear)
{
- psSysData->ui32ConfigFlags |= ui32Config;
+ psFwSysData->ui32ConfigFlags |= ui32Config;
}
else
{
- psSysData->ui32ConfigFlags &= ~ui32Config;
+ psFwSysData->ui32ConfigFlags &= ~ui32Config;
}
+ OSWriteMemoryBarrier(&psFwSysData->ui32ConfigFlags);
/* return current/new value to caller */
if (pui32ConfigState)
{
- *pui32ConfigState = psSysData->ui32ConfigFlags;
+ *pui32ConfigState = psFwSysData->ui32ConfigFlags;
}
- OSMemoryBarrier(&psSysData->ui32ConfigFlags);
+ OSMemoryBarrier(&psFwSysData->ui32ConfigFlags);
eError = PVRSRVPowerLock(psDeviceNode);
PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock");
PDUMP_FLAGS_CONTINUOUS);
}
-PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32OSid,
+PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+ /*
+ * Catbase-0 (FW MMU context) pointing to unmapped mem to make
+ * FW crash from its memory context
+ */
+ RGXWriteKernelMMUPC64(&psDevInfo->sLayerParams,
+ FWCORE_MEM_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV),
+ RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT,
+ RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT,
+ ((0xDEADBEEF
+ >> RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT)
+ << RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT)
+ & ~RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32DriverID,
RGXFWIF_OS_STATE_CHANGE eOSOnlineState)
{
PVRSRV_ERROR eError = PVRSRV_OK;
RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE;
- sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid;
+ sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32DriverID = ui32DriverID;
sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState;
#if defined(SUPPORT_AUTOVZ)
/* Guests and Host going offline should wait for confirmation
* from the Firmware of the state change. If this fails, break
* the connection on the OS Driver's end as backup. */
- if (PVRSRV_VZ_MODE_IS(GUEST) || (ui32OSid == RGXFW_HOST_OS))
+ if (PVRSRV_VZ_MODE_IS(GUEST) || (ui32DriverID == RGXFW_HOST_DRIVER_ID))
{
LOOP_UNTIL_TIMEOUT(SECONDS_TO_MICROSECONDS/2)
{
else if (psFwSysData)
{
const volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags =
- (const volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32OSid];
+ (const volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32DriverID];
/* Attempt several times until the FW manages to offload the OS */
LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
/* read the OS state */
OSMemoryBarrier(NULL);
- /* check if FW finished offloading the OSID and is stopped */
+ /* check if FW finished offloading the driver and is stopped */
if (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE)
{
eError = PVRSRV_OK;
return eError;
}
-PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
- IMG_UINT32 ui32OSid,
- IMG_UINT32 ui32Priority)
-{
- PVRSRV_ERROR eError;
- RGXFWIF_KCCB_CMD sOSidPriorityCmd = { 0 };
-
- PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
-
- sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE;
- psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid] = ui32Priority;
- OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid]);
-
-#if defined(PDUMP)
- PDUMPCOMMENT(psDevInfo->psDeviceNode,
- "Updating the priority of OSID%u inside RGXFWIfRuntimeCfg", ui32OSid);
- DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
- offsetof(RGXFWIF_RUNTIME_CFG, aui32OSidPriority) + (ui32OSid * sizeof(ui32Priority)),
- ui32Priority ,
- PDUMP_FLAGS_CONTINUOUS);
-#endif
-
- LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
- {
- eError = RGXScheduleCommand(psDevInfo,
- RGXFWIF_DM_GP,
- &sOSidPriorityCmd,
- PDUMP_FLAGS_CONTINUOUS);
- if (eError != PVRSRV_ERROR_RETRY)
- {
- break;
- }
- OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
- } END_LOOP_UNTIL_TIMEOUT();
-
- return eError;
-}
-
PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
CONNECTION_DATA *psConnection,
PVRSRV_RGXDEV_INFO *psDevInfo,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
RGXFWIF_DM eDM)
{
IMG_UINT32 ui32CmdSize;
RGXFWIF_CCB_CMD_HEADER *psCmdHeader;
RGXFWIF_CMD_PRIORITY *psCmd;
PVRSRV_ERROR eError;
- IMG_INT32 i32Priority = (IMG_INT32)ui32Priority;
RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psContext);
eError = _CheckPriority(psDevInfo, i32Priority, psContext->eRequestor);
sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
sPriorityCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
{
ui32StalledClientMask |= CheckForStalledClientTransferCtxt(psDevInfo);
ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo);
-
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo);
-
+#endif
if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
{
ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo);
/* Add up the total number of interrupts issued, sampled/received and missed... */
#if defined(RGX_FW_IRQ_OS_COUNTERS)
/* Only the Host OS has a sample count, so only one counter to check. */
- ui32LISRCount += psDevInfo->aui32SampleIRQCount[RGXFW_HOST_OS];
- ui32FWCount += OSReadHWReg32(psDevInfo->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[RGXFW_HOST_OS]);
+ ui32LISRCount += psDevInfo->aui32SampleIRQCount[RGXFW_HOST_DRIVER_ID];
+ ui32FWCount += OSReadHWReg32(psDevInfo->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[RGXFW_HOST_DRIVER_ID]);
#else
IMG_UINT32 ui32Index;
_RGXUpdateHealthStatus_Exit:
OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus);
OSAtomicWrite(&psDevNode->eHealthReason, eNewReason);
- RGXSRV_HWPERF_DEVICE_INFO(psDevInfo, RGX_HWPERF_DEV_INFO_EV_HEALTH, eNewStatus, eNewReason);
+ RGXSRV_HWPERF_DEVICE_INFO_HEALTH(psDevInfo, eNewStatus, eNewReason);
/*
* Attempt to service the HWPerf buffer to regularly transport idle/periodic
}
else
{
- /* Otherwise, only dump first stalled command in the CCB */
- DumpStalledCCBCommand(psCurrentServerCommonContext->sFWCommonContextFWAddr,
+ /* Otherwise, only dump first command in the CCB */
+ DumpFirstCCBCmd(psCurrentServerCommonContext->sFWCommonContextFWAddr,
psCurrentServerCommonContext->psClientCCB,
pfnDumpDebugPrintf,
pvDumpDebugFile);
psHWRInfoBuf->ui32WriteIndex = 0;
psHWRInfoBuf->ui32DDReqCount = 0;
+ OSWriteMemoryBarrier(&psHWRInfoBuf->ui32DDReqCount);
+
return PVRSRV_OK;
}
@Function RGXFwRawHeapAllocMap
- @Description Register firmware heap for the specified guest OSID
+ @Description Register firmware heap for the specified driver
@Input psDeviceNode - device node
- @Input ui32OSID - Guest OSID
+ @Input ui32DriverID - Guest driver
@Input sDevPAddr - Heap address
@Input ui64DevPSize - Heap size
******************************************************************************/
PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32DriverID,
IMG_DEV_PHYADDR sDevPAddr,
IMG_UINT64 ui64DevPSize)
{
IMG_CHAR szRegionRAName[RA_MAX_NAME_LENGTH];
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
PVRSRV_MEMALLOCFLAGS_T uiRawFwHeapAllocFlags = (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS |
- PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32OSID));
- PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig,
- PHYS_HEAP_USAGE_FW_MAIN);
+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32DriverID));
+ PHYS_HEAP_CONFIG *psFwHeapConfig = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_SHARED);
PHYS_HEAP_CONFIG sFwHeapConfig;
- PVRSRV_VZ_RET_IF_NOT_MODE(HOST, PVRSRV_OK);
+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
- if (psFwMainConfig == NULL)
+ if (psFwHeapConfig == NULL)
{
PVR_DPF((PVR_DBG_ERROR, "FW_MAIN heap config not found."));
return PVRSRV_ERROR_NOT_SUPPORTED;
}
- OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID);
+ OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID);
if (!ui64DevPSize ||
!sDevPAddr.uiAddr ||
- ui32OSID >= RGX_NUM_OS_SUPPORTED ||
+ ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED ||
ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE)
{
PVR_DPF((PVR_DBG_ERROR, "Invalid parameters for %s", szRegionRAName));
return PVRSRV_ERROR_INVALID_PARAMS;
}
- sFwHeapConfig = *psFwMainConfig;
+ sFwHeapConfig = *psFwHeapConfig;
sFwHeapConfig.sStartAddr.uiAddr = 0;
sFwHeapConfig.sCardBase.uiAddr = sDevPAddr.uiAddr;
sFwHeapConfig.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE;
sFwHeapConfig.eType = PHYS_HEAP_TYPE_LMA;
+ sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_PREMAP;
- eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, szRegionRAName, &psDeviceNode->apsFWPremapPhysHeap[ui32OSID]);
- PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32OSID);
+ eError = PhysmemCreateHeapLMA(psDeviceNode,
+ RGXPhysHeapGetLMAPolicy(sFwHeapConfig.ui32UsageFlags),
+ &sFwHeapConfig,
+ szRegionRAName,
+ &psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]);
+ PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32DriverID);
- eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32OSID]);
- PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32OSID);
+ eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]);
+ PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32DriverID);
- psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID] = psDeviceNode->apsFWPremapPhysHeap[ui32OSID];
+ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32DriverID] = psDeviceNode->apsFWPremapPhysHeap[ui32DriverID];
- PDUMPCOMMENT(psDeviceNode, "Allocate and map raw firmware heap for OSID: [%d]", ui32OSID);
+ PDUMPCOMMENT(psDeviceNode, "Allocate and map raw firmware heap for DriverID: [%d]", ui32DriverID);
-#if (RGX_NUM_OS_SUPPORTED > 1)
+#if (RGX_NUM_DRIVERS_SUPPORTED > 1)
/* don't clear the heap of other guests on allocation */
- uiRawFwHeapAllocFlags &= (ui32OSID > RGXFW_HOST_OS) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL);
+ uiRawFwHeapAllocFlags &= (ui32DriverID > RGXFW_HOST_DRIVER_ID) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL);
#endif
/* if the firmware is already powered up, consider the firmware heaps are pre-mapped. */
if (psDeviceNode->bAutoVzFwIsUp)
{
uiRawFwHeapAllocFlags &= RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp);
- DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE);
+ DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_TRUE);
}
eError = DevmemFwAllocate(psDevInfo,
RGX_FIRMWARE_RAW_HEAP_SIZE,
uiRawFwHeapAllocFlags,
- psDevInfo->psGuestFirmwareRawHeap[ui32OSID]->pszName,
- &psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
+ psDevInfo->psPremappedFwRawHeap[ui32DriverID]->pszName,
+ &psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]);
PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate");
/* Mark this devmem heap as premapped so allocations will not require device mapping. */
- DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE);
+ DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_TRUE);
- if (ui32OSID == RGXFW_HOST_OS)
+ if (ui32DriverID == RGXFW_HOST_DRIVER_ID)
{
/* if the Host's raw fw heap is premapped, mark its main & config sub-heaps accordingly
* No memory allocated from these sub-heaps will be individually mapped into the device's
@Function RGXFwRawHeapUnmapFree
- @Description Unregister firmware heap for the specified guest OSID
+ @Description Unregister firmware heap for the specified guest driver
@Input psDeviceNode - device node
- @Input ui32OSID - Guest OSID
+ @Input ui32DriverID
******************************************************************************/
void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32OSID)
+ IMG_UINT32 ui32DriverID)
{
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
/* remove the premap status, so the heap can be unmapped and freed */
- if (psDevInfo->psGuestFirmwareRawHeap[ui32OSID])
+ if (psDevInfo->psPremappedFwRawHeap[ui32DriverID])
{
- DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_FALSE);
+ DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_FALSE);
}
- if (psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID])
+ if (psDevInfo->psPremappedFwRawMemDesc[ui32DriverID])
{
- DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
- psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID] = NULL;
+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]);
+ psDevInfo->psPremappedFwRawMemDesc[ui32DriverID] = NULL;
}
}
MMU_FAULT_DATA sFaultData = {0U};
MMU_CONTEXT *psFwMMUCtx = psDevInfo->psKernelMMUCtx;
IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_BASE & UINT_MAX);
- IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (RGX_NUM_OS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE);
- IMG_UINT32 ui32OSID = (ui32FwVA - ui32FwHeapBase) / RGX_FIRMWARE_RAW_HEAP_SIZE;
+ IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (RGX_NUM_DRIVERS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE);
+ IMG_UINT32 ui32DriverID = (ui32FwVA - ui32FwHeapBase) / RGX_FIRMWARE_RAW_HEAP_SIZE;
IMG_UINT32 ui32HeapId;
PHYS_HEAP *psPhysHeap;
OSGetPageSize() : BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT);
IMG_UINT32 ui32PageOffset = (ui32FwVA & (ui32FwPageSize - 1));
- PVR_LOG_GOTO_IF_INVALID_PARAM((ui32OSID < RGX_NUM_OS_SUPPORTED),
+ PVR_LOG_GOTO_IF_INVALID_PARAM((ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED),
eError, ErrorExit);
PVR_LOG_GOTO_IF_INVALID_PARAM(((psCpuPA != NULL) ||
(ui32FwVA < ui32FwHeapEnd)),
eError, ErrorExit);
- ui32HeapId = (ui32OSID == RGXFW_HOST_OS) ?
- PVRSRV_PHYS_HEAP_FW_MAIN : (PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID);
+ ui32HeapId = (ui32DriverID == RGXFW_HOST_DRIVER_ID) ?
+ PVRSRV_PHYS_HEAP_FW_MAIN : (PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32DriverID);
psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[ui32HeapId];
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
(ui64RawPTE & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK));
/* Only the Host's Firmware heap is present in the Host's CPU IPA space */
- if (ui32OSID == RGXFW_HOST_OS)
+ if (ui32DriverID == RGXFW_HOST_DRIVER_ID)
{
PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPA, &sDevPA);
}
return eError;
}
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+/*!
+*******************************************************************************
+@Function RGXIsValidWorkloadEstCCBCommand
+
+@Description Checks if command type can be used for workload estimation
+
+@Input eType Command type to check
+
+
+@Return IMG_BOOL
+******************************************************************************/
+INLINE IMG_BOOL RGXIsValidWorkloadEstCCBCommand(RGXFWIF_CCB_CMD_TYPE eType)
+{
+ switch (eType)
+ {
+ case RGXFWIF_CCB_CMD_TYPE_GEOM:
+ case RGXFWIF_CCB_CMD_TYPE_3D:
+ case RGXFWIF_CCB_CMD_TYPE_CDM:
+ case RGXFWIF_CCB_CMD_TYPE_TQ_TDM:
+ return IMG_TRUE;
+ default:
+ PVR_ASSERT(IMG_FALSE);
+ return IMG_FALSE;
+ }
+}
+#endif
+
/******************************************************************************
End of file (rgxfwutils.c)
******************************************************************************/
#include "devicemem_utils.h"
#include "rgxmem.h"
-#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "FwRawOSID%d" /*!< RGX Raw Firmware Heap identifier */
+#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "FwRawDriverID%d" /*!< RGX Raw Firmware Heap identifier */
static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo,
PVRSRV_MEMALLOCFLAGS_T *puiFlags,
switch (ePhysHeap)
{
-#if defined(SUPPORT_SECURITY_VALIDATION)
- /* call with GPU_SECURE from RGXSetupFwSysData */
- case PVRSRV_PHYS_HEAP_GPU_SECURE:
-#endif
case PVRSRV_PHYS_HEAP_FW_CODE:
case PVRSRV_PHYS_HEAP_FW_PRIV_DATA:
case PVRSRV_PHYS_HEAP_FW_MAIN:
case PVRSRV_PHYS_HEAP_FW_PREMAP6:
case PVRSRV_PHYS_HEAP_FW_PREMAP7:
{
- IMG_UINT32 ui32OSID = ePhysHeap - PVRSRV_PHYS_HEAP_FW_PREMAP0;
+ IMG_UINT32 ui32DriverID = ePhysHeap - PVRSRV_PHYS_HEAP_FW_PREMAP0;
- PVR_LOG_RETURN_IF_INVALID_PARAM(ui32OSID < RGX_NUM_OS_SUPPORTED, "ui32OSID");
- *ppsFwHeap = psDevInfo->psGuestFirmwareRawHeap[ui32OSID];
+ PVR_LOG_RETURN_IF_INVALID_PARAM(ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED, "ui32DriverID");
+ *ppsFwHeap = psDevInfo->psPremappedFwRawHeap[ui32DriverID];
break;
}
default:
PVR_DPF_RETURN_RC(eError);
}
-#define MIPS_CACHE_LINE_SIZE_IN_BYTES 16
- uiAlign = (psFwHeap == psDevInfo->psFirmwareConfigHeap) ?
- (RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY) :
-/*
- * Aligning fw based allocations for MIPS based rogue cores at cache line boundary(16 bytes) instead of SLC(64 bytes)
- * to have more compact memory with less wastage and hopefully save some tlb misses.
- */
- (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ? MIPS_CACHE_LINE_SIZE_IN_BYTES
- : GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)));
+ if (psFwHeap == psDevInfo->psFirmwareConfigHeap)
+ {
+ /*
+ * All structures allocated from the Firmware Config subheap must start at the same pre-determined
+ * offsets, regardless of the system's page size (e.g. 4k,16k,64k). The alignment requirement is
+ * satisfied for virtual addresses during the mapping stage. Physical allocations do not take
+ * alignment into consideration.
+ * VZ drivers usually preallocate and premap the entire Firmware heap range. Any allocations from
+ * this heap are physical alloc only, having their device VAs derived from their PAs. This makes
+ * it impossible to fulfil alignment requirements.
+ * To work around this limitation, allocation sizes are rounded to the nearest multiple of 64kb,
+ * regardless of the actual size of object.
+ */
+ uiAlign = RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY;
+
+ uiSize = PVR_ALIGN(uiSize, RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY);
+ }
+ else
+ {
+ /* Aligning fw based allocations for MIPS based rogue cores at cache line boundary(16 bytes) instead
+ of SLC(64 bytes) to have more compact memory with less waste and hopefully save some tlb misses. */
+ #define MIPS_CACHE_LINE_SIZE_IN_BYTES 16
+ uiAlign = RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ? MIPS_CACHE_LINE_SIZE_IN_BYTES
+ : GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS));
+ }
eError = DevmemAllocateAndMap(psFwHeap,
uiSize,
is set
*/
eError = DevmemMapToDevice(*ppsMemDescPtr,
- psDevInfo->psFirmwareMainHeap,
+ psFwHeap,
&sTmpDevVAddr);
if (eError != PVRSRV_OK)
{
static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
eError = DevmemAllocateSparse(psDevInfo->psDeviceNode,
uiSize,
- uiChunkSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
pui32MappingTable,
@Input ui32CCBAllocSizeLog2 Size of the CCB for this context
@Input ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context
@Input ui32ContextFlags Flags which specify properties of the context
-@Input ui32Priority Priority of the context
+@Input i32Priority Priority of the context
@Input ui32MaxDeadlineMS Max deadline limit in MS that the workload can run
@Input ui64RobustnessAddress Address for FW to signal a context reset
@Input psInfo Structure that contains extra info
IMG_UINT32 ui32CCBAllocSizeLog2,
IMG_UINT32 ui32CCBMaxAllocSizeLog2,
IMG_UINT32 ui32ContextFlags,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32MaxDeadlineMS,
IMG_UINT64 ui64RobustnessAddress,
RGX_COMMON_CONTEXT_INFO *psInfo,
SERVER_MMU_CONTEXT *psServerMMUContext,
PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr);
+PRGXFWIF_FWCOMMONCONTEXT RGXGetFWCommonContextAddrFromServerCommonCtx(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DLLIST_NODE *psNode);
+
PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
IMG_UINT32 ui32ContextFlags);
/*!
PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
CONNECTION_DATA *psConnection,
PVRSRV_RGXDEV_INFO *psDevInfo,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
RGXFWIF_DM eDM);
/*!
PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
IMG_UINT32 ui32HCSDeadlineMs);
-/*!
-*******************************************************************************
-@Function RGXFWChangeOSidPriority
-
-@Description Requests the Firmware to change the priority of an operating
- system. Higher priority number equals higher priority on the
- scheduling system.
-
-@Input psDevInfo pointer to device info
-@Input ui32OSid The OSid whose priority is to be altered
-@Input ui32Priority The new priority number for the specified OSid
-
-@Return PVRSRV_ERROR
-******************************************************************************/
-PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
- IMG_UINT32 ui32OSid,
- IMG_UINT32 ui32Priority);
-
/*!
*******************************************************************************
@Function RGXFWHealthCheckCmd
@Description Requests the Firmware to change the guest OS Online states.
This should be initiated by the VMM when a guest VM comes
online or goes offline. If offline, the FW offloads any current
- resource from that OSID. The request is repeated until the FW
- has had time to free all the resources or has waited for
+ resource from that DriverID. The request is repeated until the
+ FW has had time to free all the resources or has waited for
workloads to finish.
@Input psDevInfo pointer to device info
-@Input ui32OSid The Guest OSid whose state is being altered
+@Input ui32DriverID The driver whose state is being altered
@Input eOSOnlineState The new state (Online or Offline)
@Return PVRSRV_ERROR
******************************************************************************/
PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo,
- IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32DriverID,
RGXFWIF_OS_STATE_CHANGE eOSOnlineState);
#if defined(SUPPORT_AUTOVZ)
Otherwise, a PVRSRV error code
******************************************************************************/
PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32DriverID,
IMG_DEV_PHYADDR sDevPAddr,
IMG_UINT64 ui64DevPSize);
@Description Unregister and unmap from device, a raw firmware physheap
******************************************************************************/
void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32OSID);
+ IMG_UINT32 ui32DriverID);
/*!
*******************************************************************************
IMG_DEV_PHYADDR *psDevPA,
IMG_UINT64 *pui64RawPTE);
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+/*!
+*******************************************************************************
+@Function RGXIsValidWorkloadEstCCBCommand
+
+@Description Checks if command type can be used for workload estimation
+
+@Input eType Command type to check
+
+@Return IMG_BOOL
+******************************************************************************/
+INLINE IMG_BOOL RGXIsValidWorkloadEstCCBCommand(RGXFWIF_CCB_CMD_TYPE eType);
+
+#endif
+
+/*!
+*******************************************************************************
+@Function RGXFWInjectFault
+
+@Description Injecting firmware fault to validate recovery through Host
+
+@Input psDevInfo Pointer to device info
+
+@Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo);
+
#if defined(SUPPORT_AUTOVZ_HW_REGS) && !defined(SUPPORT_AUTOVZ)
#error "VZ build configuration error: use of OS scratch registers supported only in AutoVz drivers."
#endif
#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteDeviceMem32WithWMB((volatile IMG_UINT32 *) &psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken, val)
#endif /* defined(SUPPORT_AUTOVZ) */
-#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)))
+#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED == 1)))
/* native, static-vz and AutoVz using shared memory */
#define KM_GET_FW_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionFwState)
#define KM_GET_OS_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState)
#define KM_GET_FW_CONNECTION(psDevInfo) (RGXFW_CONNECTION_FW_ACTIVE)
#define KM_GET_OS_CONNECTION(psDevInfo) (RGXFW_CONNECTION_OS_ACTIVE)
#define KM_SET_OS_CONNECTION(val, psDevInfo)
-#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_OS_SUPPORTED == 1) */
+#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED == 1) */
#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */
-#if defined(SUPPORT_AUTOVZ)
-#define RGX_FIRST_RAW_HEAP_OSID RGXFW_HOST_OS
+#if defined(RGX_PREMAP_FW_HEAPS)
+#define RGX_FIRST_RAW_HEAP_DRIVER_ID RGXFW_HOST_DRIVER_ID
#else
-#define RGX_FIRST_RAW_HEAP_OSID RGXFW_GUEST_OSID_START
+#define RGX_FIRST_RAW_HEAP_DRIVER_ID RGXFW_GUEST_DRIVER_ID_START
#endif
#define KM_OS_CONNECTION_IS(val, psDevInfo) (KM_GET_OS_CONNECTION(psDevInfo) == RGXFW_CONNECTION_OS_##val)
static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1),
"FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)");
-static inline IMG_UINT32
-RGXHWPerfGetPackets(IMG_UINT32 ui32BytesExp,
- IMG_UINT32 ui32AllowedSize,
- RGX_PHWPERF_V2_PACKET_HDR psCurPkt )
-{
- IMG_UINT32 sizeSum = 0;
-
- /* Traverse the array to find how many packets will fit in the available space. */
- while ( sizeSum < ui32BytesExp &&
- sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize )
- {
- sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt);
- psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt);
- }
-
- return sizeSum;
-}
-
-static inline void
-RGXSuspendHWPerfL2DataCopy(PVRSRV_RGXDEV_INFO* psDeviceInfo,
- IMG_BOOL bIsReaderConnected)
-{
- if (!bIsReaderConnected)
- {
- PVR_DPF((PVR_DBG_WARNING, "%s : HWPerf FW events enabled but host buffer for FW events is full "
- "and no reader is currently connected, suspending event collection. "
- "Connect a reader or restart driver to avoid event loss.", __func__));
- psDeviceInfo->bSuspendHWPerfL2DataCopy = IMG_TRUE;
- }
-}
-
/******************************************************************************
* RGX HW Performance Profiling Server API(s)
*****************************************************************************/
}
#ifdef SUPPORT_WORKLOAD_ESTIMATION
- /* Not a part of BVNC feature line and so doesn't need the feature supported check */
- psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION;
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Not a part of BVNC feature line and so doesn't need the feature supported check */
+ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION;
+ }
#endif
/* Define the HW counter block counts. */
OSDeviceMemSet(&psBlocks[uiCount], 0, (RGX_HWPERF_MAX_BVNC_BLOCK_LEN - uiCount) * sizeof(*psBlocks));
}
+ /* The GPU core count is overwritten by the FW */
+ psBVNC->ui16BvncGPUCores = 0;
+
return PVRSRV_OK;
}
return PVRSRV_OK;
}
+/*************************************************************************/ /*!
+@Function GetHWPerfBlockTypeByID
+@Description Lookup function to obtain a block type descriptor for a given
+ counter block identifier.
+@Input psDevInfo A pointer to current device info.
+@Input ui32BlockID The block ID for which a type
+ descriptor should be retrieved.
+@Return RGXFW_HWPERF_CNTBLK_TYPE_MODEL Block type descriptor.
+*/ /**************************************************************************/
+static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *
+GetHWPerfBlockTypeByID(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32BlockID)
+{
+ IMG_UINT32 ui32CntBlkModelLen;
+ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel;
+ IMG_UINT32 ui32TableIdx = 0xFFFF;
+ RGX_HWPERF_CNTBLK_RT_INFO sRtInfo; /* Only used to satisfy pfnIsBlkPresent requirements. */
+
+#if defined(HWPERF_UNIFIED)
+ IMG_UINT32 uiBlockID = (IMG_UINT32)(ui32BlockID & ~(RGX_CNTBLK_ID_UNIT_ALL_MASK|RGX_CNTBLK_ID_DA_MASK));
+#else
+ IMG_UINT32 uiBlockID = (IMG_UINT32)(ui32BlockID & ~RGX_CNTBLK_ID_UNIT_ALL_MASK);
+#endif
+
+ ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel);
+
+ /* Is it a direct block? */
+ if (uiBlockID < RGX_CNTBLK_ID_DIRECT_LAST)
+ {
+ ui32TableIdx = uiBlockID;
+ }
+ /* Is it an indirect block */
+ else if ((uiBlockID > RGX_CNTBLK_ID_DIRECT_LAST) && (uiBlockID < RGX_CNTBLK_ID_LAST))
+ {
+ ui32TableIdx = RGX_CNTBLK_ID_DIRECT_LAST + (((uiBlockID & ~RGX_CNTBLK_ID_UNIT_ALL_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT) - 1U);
+ }
+ /* Unknown mapping from CNTBLK_ID to Table index */
+ else
+ {
+ return NULL;
+ }
+
+ PVR_ASSERT(ui32TableIdx < ui32CntBlkModelLen);
+
+ if (psDevInfo == NULL)
+ {
+ PVR_LOG(("psDevInfo invalid"));
+ return NULL;
+ }
+
+ if ((ui32TableIdx < ui32CntBlkModelLen) &&
+ (asCntBlkTypeModel[ui32TableIdx].pfnIsBlkPresent(&asCntBlkTypeModel[ui32TableIdx], psDevInfo, &sRtInfo) != IMG_FALSE))
+ {
+ return &asCntBlkTypeModel[ui32TableIdx];
+ }
+
+ /* Fall through, block not valid from run-time validation */
+ return NULL;
+}
+
+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCountersKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ const IMG_UINT32 ui32BlockID,
+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters)
+{
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+ eError = RGXAcquireHWPerfCtlCPUAddr(psDeviceNode, &psHWPerfCtl);
+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXAcquireHWPerfCtlCPUAddr");
+
+ eError = PVRSRVRGXGetConfiguredHWPerfMuxCounters(psDeviceNode,
+ psHWPerfCtl,
+ ui32BlockID,
+ psConfiguredMuxCounters);
+ PVR_LOG_IF_ERROR(eError, "PVRSRVRGXGetConfiguredHWPerfMuxCounters");
+
+ RGXReleaseHWPerfCtlCPUAddr(psDeviceNode);
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCounters(PVRSRV_DEVICE_NODE *psDevNode,
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl,
+ IMG_UINT32 ui32BlockID,
+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = NULL;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_RETURN_IF_FALSE(psDevNode != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_RETURN_IF_FALSE(psHWPerfCtl != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_RETURN_IF_FALSE(psConfiguredMuxCounters != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+
+ psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice;
+ PVR_RETURN_IF_FALSE(psDevInfo != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+
+ if ((ui32BlockID & ~RGX_CNTBLK_ID_UNIT_ALL_MASK) < RGX_CNTBLK_ID_LAST)
+ {
+ RGXFWIF_HWPERF_CTL_BLK *psBlock = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfCtl);
+ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *psBlkTypeDesc;
+ IMG_UINT32 i, ui32LastCountIdx = 0, ui8CurCountIdx = 0;
+ RGX_HWPERF_CONFIG_MUX_CNTBLK sBlockConfig;
+
+ PVR_RETURN_IF_ERROR(PVRSRVPowerLock(psDevNode));
+
+ if (psBlock == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Block ID (0x%04x) was invalid.", ui32BlockID));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error);
+ }
+
+ if (!psBlock->ui32Enabled || !psBlock->ui32Valid)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Block (0x%04x) is not %s",
+ ui32BlockID,
+ !psBlock->ui32Enabled ? "enabled." : "configured."));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error);
+ }
+
+ psBlkTypeDesc = GetHWPerfBlockTypeByID(psDevInfo, psBlock->eBlockID);
+ if (psBlkTypeDesc == NULL)
+ {
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error);
+ }
+
+ sBlockConfig.ui16BlockID = psBlock->eBlockID;
+ sBlockConfig.ui8Mode = 0;
+
+ for (i = 0; ((psBlock->uiCounterMask >> i) != 0) &&
+ (ui8CurCountIdx < psBlkTypeDesc->ui8NumCounters); i++)
+ {
+ if (psBlock->uiCounterMask & (1 << i))
+ {
+ IMG_UINT8 ui8Mode = 0;
+
+ ui8Mode = (psBlock->aui64CounterCfg[i] >> psBlkTypeDesc->ui8SelectRegModeShift) & 1U;
+ sBlockConfig.ui8Mode |= ui8Mode << ui32LastCountIdx;
+
+ sBlockConfig.aui8GroupSelect[ui32LastCountIdx] =
+ (psBlock->aui64CounterCfg[i] >> RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT) & 0x1F;
+
+ sBlockConfig.aui16BitSelect[ui32LastCountIdx] =
+ (psBlock->aui64CounterCfg[i] >> RGX_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT) & 0x7FFF;
+
+#if defined(RGX_FEATURE_PERF_COUNTER_BATCH)
+ sBlockConfig.aui32BatchMax[ui32LastCountIdx] =
+ (psBlock->aui64CounterCfg[i] >> RGX_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT) & 0x1FFF;
+
+ sBlockConfig.aui32BatchMin[ui32LastCountIdx] =
+ (psBlock->aui64CounterCfg[i] >> RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT) & 0x1FFF;
+#endif
+ ui32LastCountIdx++;
+ ui8CurCountIdx++;
+ }
+ }
+
+ sBlockConfig.ui8CounterSelect = (1 << ui32LastCountIdx) - 1;
+ *psConfiguredMuxCounters = sBlockConfig;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Block ID (0x%04x) was invalid.", ui32BlockID));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, InvalidIDError);
+ }
+
+Error:
+ PVRSRVPowerUnlock(psDevNode);
+
+InvalidIDError:
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCounters(PVRSRV_DEVICE_NODE *psDevNode,
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl,
+ IMG_UINT32 ui32BlockID,
+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters)
+{
+ RGX_HWPERF_CONFIG_CNTBLK sBlockConfig;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_RETURN_IF_FALSE(psDevNode != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_RETURN_IF_FALSE(psHWPerfCtl != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_RETURN_IF_FALSE(psConfiguredCounters != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+
+ if ((ui32BlockID & RGX_CNTBLK_ID_CUSTOM_MASK) >= RGX_CNTBLK_ID_LAST)
+ {
+ /* Validate block ID */
+ switch (ui32BlockID)
+ {
+ case RGX_CNTBLK_ID_CUSTOM0:
+ case RGX_CNTBLK_ID_CUSTOM1:
+ case RGX_CNTBLK_ID_CUSTOM2:
+ case RGX_CNTBLK_ID_CUSTOM3:
+ case RGX_CNTBLK_ID_CUSTOM4_FW:
+ {
+ PVR_RETURN_IF_ERROR(PVRSRVPowerLock(psDevNode));
+
+ /* Check to see if this block is enabled */
+ if (psHWPerfCtl->ui32SelectedCountersBlockMask & (1 << (ui32BlockID & 0x0F)))
+ {
+ RGXFW_HWPERF_SELECT *psBlock = &psHWPerfCtl->SelCntr[ui32BlockID & 0x0F];
+
+ sBlockConfig.ui16BlockID = ui32BlockID;
+ sBlockConfig.ui16NumCounters = psBlock->ui32NumSelectedCounters;
+
+ for (i = 0; i < psBlock->ui32NumSelectedCounters; i++)
+ {
+ sBlockConfig.ui16Counters[i] = psBlock->aui32SelectedCountersIDs[i];
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Block (0x%04x) is not enabled.", ui32BlockID));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error);
+ }
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Block ID (0x%04x) was invalid.", ui32BlockID));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, InvalidIDError);
+ }
+ }
+ }
+#if defined(HWPERF_UNIFIED)
+ else if ((ui32BlockID & RGX_CNTBLK_ID_DA_MASK) == RGX_CNTBLK_ID_DA_MASK)
+ {
+ RGXFWIF_HWPERF_DA_BLK *psBlock = rgxfw_hwperf_get_da_block_ctl(ui32BlockID, psHWPerfCtl);
+
+ PVR_RETURN_IF_ERROR(PVRSRVPowerLock(psDevNode));
+
+ if (psBlock == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Block ID (0x%04x) was invalid.", ui32BlockID));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error);
+ }
+
+ if (!psBlock->uiEnabled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Block (0x%04x) is not enabled.", ui32BlockID));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error);
+ }
+
+ sBlockConfig.ui16BlockID = psBlock->eBlockID;
+ sBlockConfig.ui16NumCounters = psBlock->uiNumCounters;
+
+ for (i = 0; i < psBlock->uiNumCounters; i++)
+ {
+ sBlockConfig.ui16Counters[i] = psBlock->aui32Counters[i];
+ }
+ }
+#endif
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Block ID (0x%04x) was invalid.", ui32BlockID));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, InvalidIDError);
+ }
+
+Error:
+ PVRSRVPowerUnlock(psDevNode);
+
+InvalidIDError:
+ if (eError == PVRSRV_OK)
+ {
+ *psConfiguredCounters = sBlockConfig;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocks(PVRSRV_DEVICE_NODE *psDevNode,
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl,
+ IMG_UINT32 ui32ArrayLen,
+ IMG_UINT32 *pui32BlockCount,
+ IMG_UINT32 *pui32EnabledBlockIDs)
+{
+ IMG_UINT32 ui32LastIdx = 0;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_RETURN_IF_FALSE(psDevNode != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_RETURN_IF_FALSE(psHWPerfCtl != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_RETURN_IF_FALSE(pui32BlockCount != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+
+ *pui32BlockCount = 0;
+
+ if (ui32ArrayLen > 0 && pui32EnabledBlockIDs == NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "ui32ArrayLen is greater than 0 but pui32EnabledBlockIDs is NULL"));
+ }
+
+ PVR_RETURN_IF_ERROR(PVRSRVPowerLock(psDevNode));
+
+ for (i = 0; i < RGX_HWPERF_MAX_MUX_BLKS; i++)
+ {
+ if (psHWPerfCtl->sBlkCfg[i].ui32Enabled && psHWPerfCtl->sBlkCfg[i].ui32Valid)
+ {
+ *pui32BlockCount += 1;
+
+ if (pui32EnabledBlockIDs == NULL)
+ {
+ continue;
+ }
+
+ if (ui32LastIdx + 1 > ui32ArrayLen)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ui32ArrayLen less than the number of enabled blocks."));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, Error);
+ }
+
+ pui32EnabledBlockIDs[ui32LastIdx] = psHWPerfCtl->sBlkCfg[i].eBlockID;
+ ui32LastIdx++;
+ }
+ }
+
+ for (i = 0; i < RGX_HWPERF_MAX_CUSTOM_BLKS; i++)
+ {
+ if (psHWPerfCtl->ui32SelectedCountersBlockMask == 0)
+ {
+ break;
+ }
+
+ if (psHWPerfCtl->ui32SelectedCountersBlockMask & (1 << i))
+ {
+ *pui32BlockCount += 1;
+
+ if (pui32EnabledBlockIDs == NULL)
+ {
+ continue;
+ }
+
+ if (ui32LastIdx + 1 > ui32ArrayLen)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ui32ArrayLen less than the number of enabled blocks."));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, Error);
+ }
+
+ pui32EnabledBlockIDs[ui32LastIdx] = RGX_CNTBLK_ID_CUSTOM0 + i;
+ ui32LastIdx++;
+ }
+ }
+
+#if defined(HWPERF_UNIFIED)
+ for (i = 0; i < RGX_HWPERF_MAX_DA_BLKS; i++)
+ {
+ if (psHWPerfCtl->sDABlkCfg[i].uiEnabled)
+ {
+ *pui32BlockCount += 1;
+
+ if (pui32EnabledBlockIDs == NULL)
+ {
+ continue;
+ }
+
+ if (ui32LastIdx > ui32ArrayLen)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ui32ArrayLen less than the number of enabled blocks."));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, Error);
+ }
+
+ pui32EnabledBlockIDs[ui32LastIdx] = psHWPerfCtl->sDABlkCfg[i].eBlockID;
+ ui32LastIdx++;
+ }
+ }
+#endif
+
+Error:
+ PVRSRVPowerUnlock(psDevNode);
+ return eError;
+}
+
/******************************************************************************
End of file (rgxhwperf.c)
******************************************************************************/
#ifndef RGXHWPERF_H_
#define RGXHWPERF_H_
+#include "rgx_fwif_hwperf.h"
#include "rgxhwperf_common.h"
/******************************************************************************
IMG_UINT32 ui32ArrayLen,
RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs);
+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCountersKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ const IMG_UINT32 ui32BlockID,
+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters);
+
+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfMuxCounters(PVRSRV_DEVICE_NODE *psDevNode,
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl,
+ IMG_UINT32 ui32BlockID,
+ RGX_HWPERF_CONFIG_MUX_CNTBLK *psConfiguredMuxCounters);
+
+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCounters(PVRSRV_DEVICE_NODE *psDevNode,
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl,
+ IMG_UINT32 ui32BlockID,
+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters);
+
+PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocks(PVRSRV_DEVICE_NODE *psDevNode,
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl,
+ IMG_UINT32 ui32ArrayLength,
+ IMG_UINT32 *pui32BlockCount,
+ IMG_UINT32 *pui32EnabledBlockIDs);
+
#endif /* RGXHWPERF_H_ */
#include "rgxpower.h"
#include "tlstream.h"
#include "pvrsrv_tlstreams.h"
+#include "pvr_ricommon.h"
#include "rgxinit.h"
#include "rgxbvnc.h"
static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode);
static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode);
-#if (RGX_NUM_OS_SUPPORTED > 1)
-static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid);
+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1)
+static PVRSRV_ERROR RGXInitFwRawHeap(PVRSRV_RGXDEV_INFO *psDevInfo, DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32DriverID);
static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap);
-#endif
+#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */
/* Services internal heap identification used in this file only */
#define RGX_FIRMWARE_MAIN_HEAP_IDENT "FwMain" /*!< RGX Main Firmware Heap identifier */
IMG_UINT32 ui32IrqCnt;
#if defined(RGX_FW_IRQ_OS_COUNTERS)
- if PVRSRV_VZ_MODE_IS(GUEST)
+ if (PVRSRV_VZ_MODE_IS(GUEST))
{
bReturnVal = IMG_TRUE;
}
else
{
- get_irq_cnt_val(ui32IrqCnt, RGXFW_HOST_OS, psDevInfo);
+ get_irq_cnt_val(ui32IrqCnt, RGXFW_HOST_DRIVER_ID, psDevInfo);
if (ui32IrqCnt != aui32SampleIrqCount[RGXFW_THREAD_0])
{
RGXFWIF_GPU_UTIL_STATS *psReturnStats)
{
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
- volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ const volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+ IMG_UINT64 (*paaui64DMOSTmpCounters)[RGX_NUM_DRIVERS_SUPPORTED][RGXFWIF_GPU_UTIL_STATE_NUM];
+ IMG_UINT64 (*paui64DMOSTmpLastWord)[RGX_NUM_DRIVERS_SUPPORTED];
+ IMG_UINT64 (*paui64DMOSTmpLastState)[RGX_NUM_DRIVERS_SUPPORTED];
+ IMG_UINT64 (*paui64DMOSTmpLastPeriod)[RGX_NUM_DRIVERS_SUPPORTED];
+ IMG_UINT64 (*paui64DMOSTmpLastTime)[RGX_NUM_DRIVERS_SUPPORTED];
IMG_UINT64 ui64TimeNow;
IMG_UINT32 ui32Attempts;
IMG_UINT32 ui32Remainder;
+ IMG_UINT32 ui32DriverID;
+ IMG_UINT32 ui32MaxDMCount;
+ RGXFWIF_DM eDM;
/***** (1) Initialise return stats *****/
psReturnStats->ui64GpuStatBlocked = 0;
psReturnStats->ui64GpuStatCumulative = 0;
+ memset(psReturnStats->aaui64DMOSStatIdle, 0, sizeof(psReturnStats->aaui64DMOSStatIdle));
+ memset(psReturnStats->aaui64DMOSStatActive, 0, sizeof(psReturnStats->aaui64DMOSStatActive));
+ memset(psReturnStats->aaui64DMOSStatBlocked, 0, sizeof(psReturnStats->aaui64DMOSStatBlocked));
+ memset(psReturnStats->aaui64DMOSStatCumulative, 0, sizeof(psReturnStats->aaui64DMOSStatCumulative));
+
if (hGpuUtilUser == NULL)
{
return PVRSRV_ERROR_INVALID_PARAMS;
}
psAggregateStats = hGpuUtilUser;
+ ui32MaxDMCount = psDevInfo->sDevFeatureCfg.ui32MAXDMCount;
+
+ /* Allocate temporary counters used in the attempts loop */
+ paaui64DMOSTmpCounters = OSAllocMem(sizeof(*paaui64DMOSTmpCounters) * ui32MaxDMCount);
+ PVR_LOG_GOTO_IF_FALSE(paaui64DMOSTmpCounters != NULL, "OSAllocMem:1", failTmpCountersAlloc);
+ paui64DMOSTmpLastWord = OSAllocMem(sizeof(*paui64DMOSTmpLastWord) * ui32MaxDMCount);
+ PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastWord != NULL, "OSAllocMem:2", failTmpLastWordAlloc);
+ paui64DMOSTmpLastState = OSAllocMem(sizeof(*paui64DMOSTmpLastState) * ui32MaxDMCount);
+ PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastState != NULL, "OSAllocMem:3", failTmpLastStateAlloc);
+ paui64DMOSTmpLastPeriod = OSAllocMem(sizeof(*paui64DMOSTmpLastPeriod) * ui32MaxDMCount);
+ PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastPeriod != NULL, "OSAllocMem:4", failTmpLastPeriodAlloc);
+ paui64DMOSTmpLastTime = OSAllocMem(sizeof(*paui64DMOSTmpLastTime) * ui32MaxDMCount);
+ PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastTime != NULL, "OSAllocMem:5", failTmpLastTimeAlloc);
/* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */
for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++)
{
- IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0};
- IMG_UINT64 ui64LastPeriod = 0, ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0;
+ const volatile IMG_UINT64 *pui64GpuStatsCounters = &psUtilFWCb->aui64GpuStatsCounters[0];
+ const volatile IMG_UINT64 (*paui64DMOSLastWord)[RGXFW_MAX_NUM_OSIDS] = &psUtilFWCb->aaui64DMOSLastWord[0];
+ const volatile IMG_UINT64 (*paaui64DMOSStatsCounters)[RGXFW_MAX_NUM_OSIDS][RGXFWIF_GPU_UTIL_STATE_NUM] = &psUtilFWCb->aaaui64DMOSStatsCounters[0];
+
+ IMG_UINT64 aui64GpuTmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0};
+ IMG_UINT64 ui64GpuLastPeriod = 0, ui64GpuLastWord = 0, ui64GpuLastState = 0, ui64GpuLastTime = 0;
IMG_UINT32 i = 0;
* First attempt at detecting if the FW is in the middle of an update.
* This should also help if the FW is in the middle of a 64 bit variable update.
*/
- while (((ui64LastWord != psUtilFWCb->ui64LastWord) ||
- (aui64TmpCounters[ui64LastState] !=
- psUtilFWCb->aui64StatsCounters[ui64LastState])) &&
+ while (((ui64GpuLastWord != psUtilFWCb->ui64GpuLastWord) ||
+ (aui64GpuTmpCounters[ui64GpuLastState] !=
+ pui64GpuStatsCounters[ui64GpuLastState])) &&
(i < MAX_ITERATIONS))
{
- ui64LastWord = psUtilFWCb->ui64LastWord;
- ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord);
- aui64TmpCounters[GPU_IDLE] = psUtilFWCb->aui64StatsCounters[GPU_IDLE];
- aui64TmpCounters[GPU_ACTIVE] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE];
- aui64TmpCounters[GPU_BLOCKED] = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED];
+ ui64GpuLastWord = psUtilFWCb->ui64GpuLastWord;
+ ui64GpuLastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64GpuLastWord);
+ aui64GpuTmpCounters[GPU_IDLE] = pui64GpuStatsCounters[GPU_IDLE];
+ aui64GpuTmpCounters[GPU_ACTIVE] = pui64GpuStatsCounters[GPU_ACTIVE];
+ aui64GpuTmpCounters[GPU_BLOCKED] = pui64GpuStatsCounters[GPU_BLOCKED];
+
+ for (eDM = 0; eDM < ui32MaxDMCount; eDM++)
+ {
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ paui64DMOSTmpLastWord[eDM][ui32DriverID] = paui64DMOSLastWord[eDM][ui32DriverID];
+ paui64DMOSTmpLastState[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_STATE(paui64DMOSTmpLastWord[eDM][ui32DriverID]);
+ paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_IDLE] = paaui64DMOSStatsCounters[eDM][ui32DriverID][GPU_IDLE];
+ paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_ACTIVE] = paaui64DMOSStatsCounters[eDM][ui32DriverID][GPU_ACTIVE];
+ paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_BLOCKED] = paaui64DMOSStatsCounters[eDM][ui32DriverID][GPU_BLOCKED];
+ }
+ }
+
i++;
}
{
PVR_DPF((PVR_DBG_WARNING,
"RGXGetGpuUtilStats could not get reliable data after trying %u times", i));
+
+ OSFreeMem(paaui64DMOSTmpCounters);
+ OSFreeMem(paui64DMOSTmpLastWord);
+ OSFreeMem(paui64DMOSTmpLastState);
+ OSFreeMem(paui64DMOSTmpLastPeriod);
+ OSFreeMem(paui64DMOSTmpLastTime);
+
return PVRSRV_ERROR_TIMEOUT;
}
/* Update temp counters to account for the time since the last update to the shared ones */
OSMemoryBarrier(NULL); /* Ensure the current time is read after the loop above */
ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDeviceNode));
- ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord);
- ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
- aui64TmpCounters[ui64LastState] += ui64LastPeriod;
+
+ ui64GpuLastTime = RGXFWIF_GPU_UTIL_GET_TIME(ui64GpuLastWord);
+ ui64GpuLastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64GpuLastTime);
+ aui64GpuTmpCounters[ui64GpuLastState] += ui64GpuLastPeriod;
/* Get statistics for a user since its last request */
- psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE],
+ psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64GpuTmpCounters[GPU_IDLE],
psAggregateStats->ui64GpuStatIdle);
- psReturnStats->ui64GpuStatActive = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE],
+ psReturnStats->ui64GpuStatActive = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64GpuTmpCounters[GPU_ACTIVE],
psAggregateStats->ui64GpuStatActive);
- psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED],
+ psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64GpuTmpCounters[GPU_BLOCKED],
psAggregateStats->ui64GpuStatBlocked);
psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatIdle +
psReturnStats->ui64GpuStatActive +
psReturnStats->ui64GpuStatBlocked;
+ for (eDM = 0; eDM < ui32MaxDMCount; eDM++)
+ {
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ paui64DMOSTmpLastTime[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_TIME(paui64DMOSTmpLastWord[eDM][ui32DriverID]);
+ paui64DMOSTmpLastPeriod[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, paui64DMOSTmpLastTime[eDM][ui32DriverID]);
+ paaui64DMOSTmpCounters[eDM][ui32DriverID][paui64DMOSTmpLastState[eDM][ui32DriverID]] += paui64DMOSTmpLastPeriod[eDM][ui32DriverID];
+
+ /* Get statistics for a user since its last request */
+ psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_IDLE],
+ psAggregateStats->aaui64DMOSStatIdle[eDM][ui32DriverID]);
+ psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_ACTIVE],
+ psAggregateStats->aaui64DMOSStatActive[eDM][ui32DriverID]);
+ psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_BLOCKED],
+ psAggregateStats->aaui64DMOSStatBlocked[eDM][ui32DriverID]);
+ psReturnStats->aaui64DMOSStatCumulative[eDM][ui32DriverID] = psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID] +
+ psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID] +
+ psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID];
+ }
+ }
+
if (psAggregateStats->ui64TimeStamp != 0)
{
IMG_UINT64 ui64TimeSinceLastCall = ui64TimeNow - psAggregateStats->ui64TimeStamp;
break;
}
-
/***** (4) Update aggregate stats for the current user *****/
psAggregateStats->ui64GpuStatIdle += psReturnStats->ui64GpuStatIdle;
psAggregateStats->ui64GpuStatBlocked += psReturnStats->ui64GpuStatBlocked;
psAggregateStats->ui64TimeStamp = ui64TimeNow;
+ for (eDM = 0; eDM < ui32MaxDMCount; eDM++)
+ {
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ psAggregateStats->aaui64DMOSStatIdle[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID];
+ psAggregateStats->aaui64DMOSStatActive[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID];
+ psAggregateStats->aaui64DMOSStatBlocked[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID];
+ }
+ }
/***** (5) Convert return stats to microseconds *****/
psReturnStats->ui64GpuStatBlocked = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder);
psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder);
+ for (eDM = 0; eDM < ui32MaxDMCount; eDM++)
+ {
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID] = OSDivide64(psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID], 1000, &ui32Remainder);
+ psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID] = OSDivide64(psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID], 1000, &ui32Remainder);
+ psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID] = OSDivide64(psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID], 1000, &ui32Remainder);
+ psReturnStats->aaui64DMOSStatCumulative[eDM][ui32DriverID] = OSDivide64(psReturnStats->aaui64DMOSStatCumulative[eDM][ui32DriverID], 1000, &ui32Remainder);
+ }
+ }
+
+ OSFreeMem(paui64DMOSTmpLastTime);
+failTmpLastTimeAlloc:
+ OSFreeMem(paui64DMOSTmpLastPeriod);
+failTmpLastPeriodAlloc:
+ OSFreeMem(paui64DMOSTmpLastState);
+failTmpLastStateAlloc:
+ OSFreeMem(paui64DMOSTmpLastWord);
+failTmpLastWordAlloc:
+ OSFreeMem(paaui64DMOSTmpCounters);
+
+failTmpCountersAlloc:
/* Check that the return stats make sense */
if (psReturnStats->ui64GpuStatCumulative == 0)
{
- /* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD
- * returned 0. This could happen if the GPU frequency value
+ /* We can enter here only if allocating the temporary stats
+ * buffers failed, or all the RGXFWIF_GPU_UTIL_GET_PERIOD
+ * returned 0. The latter could happen if the GPU frequency value
* is not well calibrated and the FW is updating the GPU state
* while the Host is reading it.
* When such an event happens frequently, timers or the aggregate
/* NoStats used since this may be called outside of the register/de-register
* process calls which track memory use. */
- psAggregateStats = OSAllocMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS));
+ psAggregateStats = OSAllocZMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS));
if (psAggregateStats == NULL)
{
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
- psAggregateStats->ui64GpuStatIdle = 0;
- psAggregateStats->ui64GpuStatActive = 0;
- psAggregateStats->ui64GpuStatBlocked = 0;
- psAggregateStats->ui64TimeStamp = 0;
-
- /* Not used */
- psAggregateStats->bValid = IMG_FALSE;
- psAggregateStats->ui64GpuStatCumulative = 0;
-
*phGpuUtilUser = psAggregateStats;
return PVRSRV_OK;
RGXTimeCorrRestartPeriodic(psDeviceNode);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Process Workload Estimation Specific commands from the FW */
- WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Process Workload Estimation Specific commands from the FW */
+ WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice);
+ }
#endif
if (psDevInfo->pvAPMISRData == NULL)
* that is not used for mapping.
* To program the Device's BIF with the correct PC address, use the base
* address of the carveout reserved for MMU mappings as Kernel MMU PC Address */
-#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR)
- sKernelMMUCtxPCAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR;
-#else
- PHYS_HEAP_CONFIG *psFwHeapCfg = FindPhysHeapConfig(psDevConfig,
- PHYS_HEAP_USAGE_FW_MAIN);
- eError = (psFwHeapCfg != NULL) ? PVRSRV_OK : PVRSRV_ERROR_PHYSHEAP_CONFIG;
- PVR_LOG_RETURN_IF_ERROR(eError, "FindPhysHeapConfig(PHYS_HEAP_USAGE_FW_MAIN)");
-
- sKernelMMUCtxPCAddr.uiAddr = psFwHeapCfg->sCardBase.uiAddr +
- (RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED);
-#endif /* PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR */
+ IMG_DEV_PHYADDR sDevPAddr;
+ PHYS_HEAP *psFwPageTableHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT];
+
+ PVR_LOG_RETURN_IF_FALSE((NULL != psFwPageTableHeap),
+ "Firmware Page Table heap not defined.",
+ PVRSRV_ERROR_INVALID_HEAP);
+
+ PhysHeapGetDevPAddr(psFwPageTableHeap, &sDevPAddr);
+ sKernelMMUCtxPCAddr.uiAddr = sDevPAddr.uiAddr;
}
else
{
return IMG_FALSE;
}
+/*
+ RGXGetTFBCLossyGroup
+*/
+static IMG_UINT32 RGXGetTFBCLossyGroup(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT))
+ {
+ IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP;
+ IMG_UINT32 ui32TFBCCompressionControlGroup = ui32AppHintDefault;
+
+#if defined(SUPPORT_VALIDATION)
+ void *pvAppHintState = NULL;
+ OSCreateKMAppHintState(&pvAppHintState);
+ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TFBCCompressionControlGroup,
+ &ui32AppHintDefault, &ui32TFBCCompressionControlGroup);
+ OSFreeKMAppHintState(pvAppHintState);
+#endif
+
+ return ui32TFBCCompressionControlGroup;
+ }
+
+ return 0;
+}
+
/*
RGXDevMMUAttributes
*/
static MMU_DEVICEATTRIBS *RGXDevMMUAttributes(PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_BOOL bKernelMemoryCtx)
+ IMG_BOOL bKernelFWMemoryCtx)
{
MMU_DEVICEATTRIBS *psMMUDevAttrs;
if ((psDeviceNode->pfnCheckDeviceFeature) &&
PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS))
{
- psMMUDevAttrs = bKernelMemoryCtx ?
+ psMMUDevAttrs = bKernelFWMemoryCtx ?
psDeviceNode->psFirmwareMMUDevAttrs :
psDeviceNode->psMMUDevAttrs;
}
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Initialise work estimation lock */
- eError = OSLockCreate(&psDevInfo->hWorkEstLock);
- PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Initialise work estimation lock */
+ eError = OSLockCreate(&psDevInfo->hWorkEstLock);
+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit);
+ }
#endif
/* Initialise lists of ZSBuffers */
IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) ||
(eActivePMConf == RGX_ACTIVEPM_FORCE_ON);
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ)
+ /* The AutoVz driver enables a virtualisation watchdog not compatible with APM */
if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE)))
{
- PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in virtualization mode", __func__));
+ PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in AutoVz mode", __func__));
bEnableAPM = IMG_FALSE;
}
-#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ)
- /* The AutoVz driver enable a virtualisation watchdog not compatible with APM */
PVR_ASSERT(bEnableAPM == IMG_FALSE);
#endif
PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVTQLoadShaders", ErrorExit);
}
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+ eError = OSAllocateSecBuf(psDeviceNode, RGXFWIF_KM_GENERAL_HEAP_TOTAL_BYTES, "SharedSecMem", &psDevInfo->psGenHeapSecMem);
+ PVR_LOG_GOTO_IF_ERROR(eError, "OSAllocateSecBuf", ErrorExit);
+#endif
+
psDevInfo->bDevInit2Done = IMG_TRUE;
return PVRSRV_OK;
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
PVRSRV_ERROR eError;
-#if defined(SUPPORT_AUTOVZ)
+#if defined(RGX_PREMAP_FW_HEAPS)
PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap;
- if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp))
+ if ((!PVRSRV_VZ_MODE_IS(GUEST)) && (!psDeviceNode->bAutoVzFwIsUp))
{
+ PHYS_HEAP *psFwPageTableHeap =
+ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT];
+
+ PVR_LOG_GOTO_IF_INVALID_PARAM((psFwPageTableHeap != NULL),
+ eError, failed_to_create_ctx);
+
/* Temporarily swap the MMU and default GPU physheap to allow the page
* tables of all memory mapped by the FwKernel context to be placed
* in a dedicated memory carveout. This should allow the firmware mappings to
* persist after a Host kernel crash or driver reset. */
-
- psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap;
+ psDeviceNode->psMMUPhysHeap = psFwPageTableHeap;
}
#endif
psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
/* Create the memory context for the firmware. */
- eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_META,
+ eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_FORFW,
&psDevInfo->psKernelDevmemCtx);
if (eError != PVRSRV_OK)
{
goto failed_to_find_heap;
}
-#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
- if (PVRSRV_VZ_MODE_IS(HOST))
+#if (defined(RGX_PREMAP_FW_HEAPS)) || (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1))
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- IMG_UINT32 ui32OSID;
- for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+ IMG_UINT32 ui32DriverID;
+
+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID)
{
IMG_CHAR szHeapName[RA_MAX_NAME_LENGTH];
- OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID);
+ OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID);
eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName,
- &psDevInfo->psGuestFirmwareRawHeap[ui32OSID]);
+ &psDevInfo->psPremappedFwRawHeap[ui32DriverID]);
PVR_LOG_GOTO_IF_ERROR(eError, "DevmemFindHeapByName", failed_to_find_heap);
}
}
#endif
-#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
- if (PVRSRV_VZ_MODE_IS(HOST))
+#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
IMG_DEV_PHYADDR sPhysHeapBase;
- IMG_UINT32 ui32OSID;
+ IMG_UINT32 ui32DriverID;
- eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN], &sPhysHeapBase);
+ eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM], &sPhysHeapBase);
PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", failed_to_find_heap);
- for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID)
{
- IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)};
+ IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32DriverID * RGX_FIRMWARE_RAW_HEAP_SIZE)};
eError = RGXFwRawHeapAllocMap(psDeviceNode,
- ui32OSID,
+ ui32DriverID,
sRawFwHeapBase,
RGX_FIRMWARE_RAW_HEAP_SIZE);
if (eError != PVRSRV_OK)
{
- for (; ui32OSID > RGX_FIRST_RAW_HEAP_OSID; ui32OSID--)
+ for (; ui32DriverID > RGX_FIRST_RAW_HEAP_DRIVER_ID; ui32DriverID--)
{
- RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID);
+ RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID);
}
PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", failed_to_find_heap);
}
}
-#if defined(SUPPORT_AUTOVZ)
+#if defined(RGX_PREMAP_FW_HEAPS)
/* restore default Px setup */
psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap;
#endif
}
-#else
+#endif /* defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
+
+#if !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+ /* On setups with dynamically mapped Guest heaps, the Guest makes
+ * a PVZ call to the Host to request the mapping during init. */
if (PVRSRV_VZ_MODE_IS(GUEST))
{
eError = PvzClientMapDevPhysHeap(psDeviceNode->psDevConfig);
PVR_LOG_GOTO_IF_ERROR(eError, "PvzClientMapDevPhysHeap", failed_to_find_heap);
}
-#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
+#endif /* !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
if (PVRSRV_VZ_MODE_IS(GUEST))
{
{
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
PVRSRV_ERROR eError;
+#if defined(RGX_PREMAP_FW_HEAPS)
+ PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap;
+#endif
-#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
- if (PVRSRV_VZ_MODE_IS(HOST))
+#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
-#if defined(SUPPORT_AUTOVZ)
- PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap;
-
- psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap;
+#if defined(RGX_PREMAP_FW_HEAPS)
+ psDeviceNode->psMMUPhysHeap =
+ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT];
if (!psDeviceNode->bAutoVzFwIsUp)
#endif
{
- IMG_UINT32 ui32OSID;
+ IMG_UINT32 ui32DriverID;
- for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID)
{
- RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID);
+ RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID);
}
}
-#if defined(SUPPORT_AUTOVZ)
- psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap;
-#endif
}
-#else
+#endif /* defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
+
+#if !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
if (PVRSRV_VZ_MODE_IS(GUEST))
{
(void) PvzClientUnmapDevPhysHeap(psDeviceNode->psDevConfig);
eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
PVR_ASSERT(eError == PVRSRV_OK);
}
+
+#if defined(RGX_PREMAP_FW_HEAPS)
+ psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap;
+#endif
}
static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode,
{
ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart;
#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
- /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
- ui32BuildOptionsMismatch &= OPTIONS_STRICT;
+ /*Mask non-critical options out as we do support combining them in UM & KM */
+ ui32BuildOptionsMismatch &= FW_OPTIONS_STRICT;
#endif
if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
{
ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport;
if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) ||
- (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED)))
+ (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_DRIVERS_SUPPORTED)))
{
PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)",
- __func__, (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount));
+ __func__, (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_DRIVERS_SUPPORTED), ui8FwOsCount));
}
#endif /* defined(NO_HARDWARE) */
"TrampolineRegion",
&pasTrampoline[i]->hPdumpPages,
#endif
+ PVR_SYS_ALLOC_PID,
&pasTrampoline[i]->sPages,
&pasTrampoline[i]->sPhysAddr);
if (PVRSRV_OK != eError)
PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
#if defined(DEBUG)
void *pvAppHintState = NULL;
- IMG_UINT32 ui32AppHintDefault;
+ IMG_BOOL bAppHintDefault;
IMG_BOOL bEnableFWPoisonOnFree = IMG_FALSE;
#endif
#if defined(DEBUG)
OSCreateKMAppHintState(&pvAppHintState);
- ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFWPOISONONFREE;
+ bAppHintDefault = PVRSRV_APPHINT_ENABLEFWPOISONONFREE;
OSGetKMAppHintBOOL(psDeviceNode,
pvAppHintState,
EnableFWPoisonOnFree,
- &ui32AppHintDefault,
+ &bAppHintDefault,
&bEnableFWPoisonOnFree);
OSFreeKMAppHintState(pvAppHintState);
psDevInfo->bDevInit2Done = IMG_FALSE;
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+ if (psDevInfo->psGenHeapSecMem != NULL)
+ {
+ OSFreeSecBuf(psDevInfo->psGenHeapSecMem);
+ }
+#endif
+
#if defined(RGX_FEATURE_COMPUTE_ONLY_BIT_MASK)
if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE_ONLY))
#endif
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* De-init work estimation lock */
- if (psDevInfo->hWorkEstLock != NULL)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- OSLockDestroy(psDevInfo->hWorkEstLock);
+ /* De-init work estimation lock */
+ if (psDevInfo->hWorkEstLock != NULL)
+ {
+ OSLockDestroy(psDevInfo->hWorkEstLock);
+ }
}
#endif
PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
PVRSRV_ERROR eError;
DEVICE_MEMORY_INFO *psDevMemoryInfo;
- IMG_UINT32 ui32Temp=0;
if (!psDevInfo)
{
DeviceDepBridgeDeInit(psDevInfo);
-#if defined(PDUMP)
- DevmemIntFreeDefBackingPage(psDeviceNode,
- &psDeviceNode->sDummyPage,
- DUMMY_PAGE);
- DevmemIntFreeDefBackingPage(psDeviceNode,
- &psDeviceNode->sDevZeroPage,
- DEV_ZERO_PAGE);
-#endif
-
-#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
- if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
- {
- OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0);
- PVR_UNREFERENCED_PARAMETER(ui32Temp);
- }
- else
-#else
- {
- /*Delete the Dummy page related info */
- ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter);
- if (0 != ui32Temp)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Dummy page reference counter is non zero (%u)",
- __func__,
- ui32Temp));
- PVR_ASSERT(0);
- }
- }
-#endif
-
- /*Delete the Dummy page related info */
- ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDevZeroPage.atRefCounter);
- if (0 != ui32Temp)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Zero page reference counter is non zero (%u)",
- __func__,
- ui32Temp));
- }
-
-#if defined(PDUMP)
- if (NULL != psDeviceNode->sDummyPage.hPdumpPg)
- {
- PDUMPCOMMENT(psDeviceNode, "Error dummy page handle is still active");
- }
-
- if (NULL != psDeviceNode->sDevZeroPage.hPdumpPg)
- {
- PDUMPCOMMENT(psDeviceNode, "Error Zero page handle is still active");
- }
-#endif
-
- /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */
- OSLockDestroy(psDeviceNode->sDummyPage.psPgLock);
-
- /* Destroy the zero page lock */
- OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock);
-
#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
OSLockDestroy(psDevInfo->hCounterDumpingLock);
#endif
IMG_DEVMEM_SIZE_T uiHeapReservedRegionLength;
IMG_UINT32 ui32Log2ImportAlignment;
PFN_IS_PRESENT pfnIsHeapPresent;
+ PFN_HEAP_INIT pfnInit;
+ PFN_HEAP_DEINIT pfnDeInit;
IMG_UINT32 ui32HeapInstanceFlags;
};
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+/* Private data struct for general heap. */
+typedef struct RGX_GENERAL_HEAP_DATA_TAG
+{
+ DEVMEMINT_RESERVATION *psSecMemReservation;
+ DEVMEMINT_MAPPING *psSecMemMapping;
+} RGX_GENERAL_HEAP_DATA;
+
+/* Init callback function for general heap. */
+static PVRSRV_ERROR GeneralHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEMINT_HEAP *psDevmemHeap,
+ IMG_HANDLE *phPrivData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ RGX_GENERAL_HEAP_DATA *psHeapData;
+ IMG_DEV_VIRTADDR sCarveOutAddr;
+ PVRSRV_ERROR eError;
+
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemHeap, "psDevmemHeap");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(phPrivData, "phPrivData");
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ psHeapData = OSAllocMem(sizeof(*psHeapData));
+ PVR_LOG_RETURN_IF_NOMEM(psHeapData, "psHeapData");
+
+ /* Map the per device secure mem PMR allocation to the general devmem heap carveout. */
+ sCarveOutAddr = DevmemIntHeapGetBaseAddr(psDevmemHeap);
+ sCarveOutAddr.uiAddr += RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET;
+
+ eError = DevmemIntReserveRange(psDevmemHeap,
+ sCarveOutAddr,
+ RGXFWIF_KM_GENERAL_HEAP_TOTAL_BYTES,
+ &psHeapData->psSecMemReservation);
+ PVR_GOTO_IF_ERROR(eError, ErrorFreeHeapData);
+
+ eError = DevmemIntMapPMR(psDevmemHeap, psHeapData->psSecMemReservation, psDevInfo->psGenHeapSecMem,
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE
+ | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE,
+ &psHeapData->psSecMemMapping);
+ PVR_GOTO_IF_ERROR(eError, ErrorUnreserve);
+
+ *phPrivData = (IMG_HANDLE)psHeapData;
+
+ return PVRSRV_OK;
+
+ErrorUnreserve:
+ DevmemIntUnreserveRange(psHeapData->psSecMemReservation);
+ErrorFreeHeapData:
+ OSFreeMem(psHeapData);
+
+ return eError;
+}
+
+/* Deinit callback function for general heap. */
+static void GeneralHeapDeInit(IMG_HANDLE hPrivData)
+{
+ RGX_GENERAL_HEAP_DATA *psHeapData = (RGX_GENERAL_HEAP_DATA*)hPrivData;
+
+ PVR_ASSERT(hPrivData);
+
+ DevmemIntUnmapPMR(psHeapData->psSecMemMapping);
+ DevmemIntUnreserveRange(psHeapData->psSecMemReservation);
+
+ OSFreeMem(psHeapData);
+}
+#else
+/* Callbacks not used */
+#define GeneralHeapInit NULL
+#define GeneralHeapDeInit NULL
+#endif
+
/* Feature Present function prototypes */
static IMG_BOOL BRN65273IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo)
static const RGX_HEAP_INFO gasRGXHeapLayoutApp[] =
{
- /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnPresent HeapInstanceFlags */
- {RGX_GENERAL_SVM_HEAP_IDENT, RGX_GENERAL_SVM_HEAP_BASE, RGX_GENERAL_SVM_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE },
- {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_HEAP_BASE, RGX_GENERAL_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, BRN65273IsPresent, HEAP_INST_DEFAULT_VALUE },
- {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_BRN_65273_HEAP_BASE, RGX_GENERAL_BRN_65273_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, BRN65273IsPresent, HEAP_INST_BRN_ALT_VALUE },
- {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_HEAP_BASE, RGX_GENERAL_NON4K_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG },
- {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE, RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_BRN_ALT_VALUE | HEAP_INST_NON4K_FLAG },
- {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_HEAP_BASE, RGX_PDSCODEDATA_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, BRN65273IsPresent, HEAP_INST_DEFAULT_VALUE },
- {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_BRN_65273_HEAP_BASE, RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, BRN65273IsPresent, HEAP_INST_BRN_ALT_VALUE },
- {RGX_RGNHDR_BRN_63142_HEAP_IDENT, RGX_RGNHDR_BRN_63142_HEAP_BASE, RGX_RGNHDR_BRN_63142_HEAP_SIZE, 0, 0, BRN63142IsPresent, HEAP_INST_BRN_DEP_VALUE },
- {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_HEAP_BASE, RGX_USCCODE_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, BRN65273IsPresent, HEAP_INST_DEFAULT_VALUE },
- {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_BRN_65273_HEAP_BASE, RGX_USCCODE_BRN_65273_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, BRN65273IsPresent, HEAP_INST_BRN_ALT_VALUE },
- {RGX_TQ3DPARAMETERS_HEAP_IDENT, RGX_TQ3DPARAMETERS_HEAP_BASE, RGX_TQ3DPARAMETERS_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_DEFAULT_VALUE },
- {RGX_TQ3DPARAMETERS_HEAP_IDENT, RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE, RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_BRN_ALT_VALUE },
- {RGX_VK_CAPT_REPLAY_HEAP_IDENT, RGX_VK_CAPT_REPLAY_HEAP_BASE, RGX_VK_CAPT_REPLAY_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE },
- {RGX_SIGNALS_HEAP_IDENT, RGX_SIGNALS_HEAP_BASE, RGX_SIGNALS_HEAP_SIZE, 0, 0, SignalSnoopingIsPresent, HEAP_INST_FEAT_DEP_VALUE},
- {RGX_FBCDC_HEAP_IDENT, RGX_FBCDC_HEAP_BASE, RGX_FBCDC_HEAP_SIZE, 0, 0, FBCDescriptorIsPresent, HEAP_INST_FEAT_DEP_VALUE},
- {RGX_FBCDC_LARGE_HEAP_IDENT, RGX_FBCDC_LARGE_HEAP_BASE, RGX_FBCDC_LARGE_HEAP_SIZE, 0, 0, FBCLargeDescriptorIsPresent, HEAP_INST_FEAT_DEP_VALUE},
- {RGX_CMP_MISSION_RMW_HEAP_IDENT, RGX_CMP_MISSION_RMW_HEAP_BASE, RGX_CMP_MISSION_RMW_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE },
- {RGX_CMP_SAFETY_RMW_HEAP_IDENT, RGX_CMP_SAFETY_RMW_HEAP_BASE, RGX_CMP_SAFETY_RMW_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE },
- {RGX_TEXTURE_STATE_HEAP_IDENT, RGX_TEXTURE_STATE_HEAP_BASE, RGX_TEXTURE_STATE_HEAP_SIZE, 0, 0, TextureStateIsPresent, HEAP_INST_FEAT_DEP_VALUE},
- {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_HEAP_BASE, RGX_VISIBILITY_TEST_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_DEFAULT_VALUE },
- {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_BRN_65273_HEAP_BASE, RGX_VISIBILITY_TEST_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_BRN_ALT_VALUE },
- {RGX_MMU_INIA_BRN_65273_HEAP_IDENT, RGX_MMU_INIA_BRN_65273_HEAP_BASE, RGX_MMU_INIA_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_BRN_DEP_VALUE },
- {RGX_MMU_INIB_BRN_65273_HEAP_IDENT, RGX_MMU_INIB_BRN_65273_HEAP_BASE, RGX_MMU_INIB_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_BRN_DEP_VALUE }
+ /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnPresent pfnInit pfnDeInit HeapInstanceFlags */
+ {RGX_GENERAL_SVM_HEAP_IDENT, RGX_GENERAL_SVM_HEAP_BASE, RGX_GENERAL_SVM_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE },
+ {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_HEAP_BASE, RGX_GENERAL_HEAP_SIZE, RGX_HEAP_GENERAL_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, GeneralHeapInit, GeneralHeapDeInit, HEAP_INST_DEFAULT_VALUE },
+ {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_BRN_65273_HEAP_BASE, RGX_GENERAL_BRN_65273_HEAP_SIZE, RGX_HEAP_GENERAL_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE },
+ {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_HEAP_BASE, RGX_GENERAL_NON4K_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG },
+ {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE, RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE | HEAP_INST_NON4K_FLAG },
+ {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_HEAP_BASE, RGX_PDSCODEDATA_HEAP_SIZE, RGX_HEAP_PDS_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE },
+ {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_BRN_65273_HEAP_BASE, RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE, RGX_HEAP_PDS_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE },
+ {RGX_RGNHDR_BRN_63142_HEAP_IDENT, RGX_RGNHDR_BRN_63142_HEAP_BASE, RGX_RGNHDR_BRN_63142_HEAP_SIZE, 0, 0, BRN63142IsPresent, NULL, NULL, HEAP_INST_BRN_DEP_VALUE },
+ {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_HEAP_BASE, RGX_USCCODE_HEAP_SIZE, RGX_HEAP_USC_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE },
+ {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_BRN_65273_HEAP_BASE, RGX_USCCODE_BRN_65273_HEAP_SIZE, RGX_HEAP_USC_RESERVED_TOTAL_SIZE, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE },
+ {RGX_TQ3DPARAMETERS_HEAP_IDENT, RGX_TQ3DPARAMETERS_HEAP_BASE, RGX_TQ3DPARAMETERS_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE },
+ {RGX_TQ3DPARAMETERS_HEAP_IDENT, RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE, RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE },
+ {RGX_VK_CAPT_REPLAY_HEAP_IDENT, RGX_VK_CAPT_REPLAY_HEAP_BASE, RGX_VK_CAPT_REPLAY_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE },
+ {RGX_SIGNALS_HEAP_IDENT, RGX_SIGNALS_HEAP_BASE, RGX_SIGNALS_HEAP_SIZE, 0, 0, SignalSnoopingIsPresent, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE},
+ {RGX_FBCDC_HEAP_IDENT, RGX_FBCDC_HEAP_BASE, RGX_FBCDC_HEAP_SIZE, 0, 0, FBCDescriptorIsPresent, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE},
+ {RGX_FBCDC_LARGE_HEAP_IDENT, RGX_FBCDC_LARGE_HEAP_BASE, RGX_FBCDC_LARGE_HEAP_SIZE, 0, 0, FBCLargeDescriptorIsPresent, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE},
+ {RGX_CMP_MISSION_RMW_HEAP_IDENT, RGX_CMP_MISSION_RMW_HEAP_BASE, RGX_CMP_MISSION_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE },
+ {RGX_CMP_SAFETY_RMW_HEAP_IDENT, RGX_CMP_SAFETY_RMW_HEAP_BASE, RGX_CMP_SAFETY_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE },
+ {RGX_TEXTURE_STATE_HEAP_IDENT, RGX_TEXTURE_STATE_HEAP_BASE, RGX_TEXTURE_STATE_HEAP_SIZE, 0, 0, TextureStateIsPresent, NULL, NULL, HEAP_INST_FEAT_DEP_VALUE},
+ {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_HEAP_BASE, RGX_VISIBILITY_TEST_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE },
+ {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_BRN_65273_HEAP_BASE, RGX_VISIBILITY_TEST_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE },
+ {RGX_MMU_INIA_BRN_65273_HEAP_IDENT, RGX_MMU_INIA_BRN_65273_HEAP_BASE, RGX_MMU_INIA_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_DEP_VALUE },
+ {RGX_MMU_INIB_BRN_65273_HEAP_IDENT, RGX_MMU_INIB_BRN_65273_HEAP_BASE, RGX_MMU_INIB_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, NULL, NULL, HEAP_INST_BRN_DEP_VALUE }
};
static const RGX_HEAP_INFO gasRGXHeapLayoutFW[] =
{
- /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent HeapInstanceFlags*/
- {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE, 0, 0, FWBRN65101IsPresent, HEAP_INST_DEFAULT_VALUE},
- {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL, 0, 0, FWBRN65101IsPresent, HEAP_INST_DEFAULT_VALUE},
- {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101, 0, 0, FWBRN65101IsPresent, HEAP_INST_BRN_ALT_VALUE},
- {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_CONFIG_HEAP_BASE, RGX_FIRMWARE_CONFIG_HEAP_SIZE, 0, 0, FWVZConfigPresent, HEAP_INST_DEFAULT_VALUE},
+ /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent pfnInit pfnDeInit HeapInstanceFlags*/
+ {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE, 0, 0, FWBRN65101IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL, 0, 0, FWBRN65101IsPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101, 0, 0, FWBRN65101IsPresent, NULL, NULL, HEAP_INST_BRN_ALT_VALUE},
+ {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_CONFIG_HEAP_BASE, RGX_FIRMWARE_CONFIG_HEAP_SIZE, 0, 0, FWVZConfigPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
};
/* Generic counting method. */
}
/* Generic heap instantiator */
static void _InstantiateRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo,
- const RGX_HEAP_INFO pksHeapInfo[],
- IMG_UINT32 ui32HeapListSize,
- DEVMEM_HEAP_BLUEPRINT **psDeviceMemoryHeapCursor)
+ const RGX_HEAP_INFO pksHeapInfo[],
+ IMG_UINT32 ui32HeapListSize,
+ const IMG_UINT32 ui32Log2RgxDefaultPageShift,
+ DEVMEM_HEAP_BLUEPRINT **psDeviceMemoryHeapCursor)
{
IMG_UINT32 i;
/* We now have a list of the heaps to include and so we should loop over this
*/
for (i = 0; i < ui32HeapListSize; i++)
{
- IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift());
IMG_UINT32 ui32Log2DataPageSize = 0;
-
const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i];
if (psHeapInfo->pfnIsHeapPresent)
if (psHeapInfo->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG)
{
- ui32Log2DataPageSize = psDevInfo->ui32Log2Non4KPgSize;
+ ui32Log2DataPageSize = psDevInfo->psDeviceNode->ui32RGXLog2Non4KPgSize;
}
else
{
}
HeapCfgBlueprintInit(psHeapInfo->pszName,
- psHeapInfo->ui64HeapBase,
- psHeapInfo->uiHeapLength,
- psHeapInfo->uiHeapReservedRegionLength,
- ui32Log2DataPageSize,
- psHeapInfo->ui32Log2ImportAlignment,
- *psDeviceMemoryHeapCursor);
+ psHeapInfo->ui64HeapBase,
+ psHeapInfo->uiHeapLength,
+ psHeapInfo->uiHeapReservedRegionLength,
+ ui32Log2DataPageSize,
+ psHeapInfo->ui32Log2ImportAlignment,
+ psHeapInfo->pfnInit,
+ psHeapInfo->pfnDeInit,
+ *psDeviceMemoryHeapCursor);
(*psDeviceMemoryHeapCursor)++;
}
PVRSRV_ERROR eError;
DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor;
- IMG_UINT32 ui32HeapListSize = ARRAY_SIZE(gasRGXHeapLayoutApp);
+ IMG_UINT32 ui32AppHeapListSize = ARRAY_SIZE(gasRGXHeapLayoutApp);
IMG_UINT32 ui32FWHeapListSize = ARRAY_SIZE(gasRGXHeapLayoutFW);
IMG_UINT32 ui32CountedHeapSize;
- IMG_UINT32 ui32HeapCount = 0;
+ IMG_UINT32 ui32AppHeapCount = 0;
IMG_UINT32 ui32FWHeapCount = 0;
+ IMG_UINT32 ui32Log2DefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift());
+
+ if (ui32Log2DefaultPageShift == 0)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+#if defined(FIX_HW_BRN_71317_BIT_MASK)
+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71317))
+ {
+ if (ui32Log2DefaultPageShift == RGX_HEAP_2MB_PAGE_SHIFT
+ || ui32Log2DefaultPageShift == RGX_HEAP_1MB_PAGE_SHIFT)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OS page size too large for device virtual heaps. "
+ "Maximum page size supported is 256KB when BRN71317 is present."));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ }
+#endif
+
/* Count heaps required for the app heaps */
_CountRequiredHeaps(psDevInfo,
gasRGXHeapLayoutApp,
- ui32HeapListSize,
- &ui32HeapCount);
+ ui32AppHeapListSize,
+ &ui32AppHeapCount);
/* Count heaps required for the FW heaps */
_CountRequiredHeaps(psDevInfo,
ui32FWHeapListSize,
&ui32FWHeapCount);
- ui32CountedHeapSize = (ui32HeapCount + ui32FWHeapCount + RGX_NUM_OS_SUPPORTED);
+ ui32CountedHeapSize = (ui32AppHeapCount + ui32FWHeapCount + RGX_NUM_DRIVERS_SUPPORTED);
psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * ui32CountedHeapSize);
PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeap, eError, e0);
/* Instantiate App Heaps */
_InstantiateRequiredHeaps(psDevInfo,
- gasRGXHeapLayoutApp,
- ui32HeapListSize,
- &psDeviceMemoryHeapCursor);
+ gasRGXHeapLayoutApp,
+ ui32AppHeapListSize,
+ ui32Log2DefaultPageShift,
+ &psDeviceMemoryHeapCursor);
/* Instantiate FW Heaps */
_InstantiateRequiredHeaps(psDevInfo,
- gasRGXHeapLayoutFW,
- ui32FWHeapListSize,
- &psDeviceMemoryHeapCursor);
+ gasRGXHeapLayoutFW,
+ ui32FWHeapListSize,
+ ui32Log2DefaultPageShift,
+ &psDeviceMemoryHeapCursor);
/* set the heap count */
psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap);
/* Check we have allocated the correct # of heaps, minus any VZ heaps as these
* have not been created at this point
*/
- PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_OS_SUPPORTED));
+ PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_DRIVERS_SUPPORTED));
/*
In the new heap setup, we initialise 2 configurations:
psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = RGX_FIRMWARE_NUMBER_OF_FW_HEAPS;
psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS;
-#if (RGX_NUM_OS_SUPPORTED > 1)
- if (PVRSRV_VZ_MODE_IS(HOST))
+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32DriverID;
/* Create additional raw firmware heaps */
- for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID)
{
- if (RGXInitFwRawHeap(psDeviceMemoryHeapCursor, ui32OSid) != PVRSRV_OK)
+ eError = RGXInitFwRawHeap(psDevInfo, psDeviceMemoryHeapCursor, ui32DriverID);
+ if (eError != PVRSRV_OK)
{
/* if any allocation fails, free previously allocated heaps and abandon initialisation */
- for (; ui32OSid > RGX_FIRST_RAW_HEAP_OSID; ui32OSid--)
+ for (; ui32DriverID > RGX_FIRST_RAW_HEAP_DRIVER_ID; ui32DriverID--)
{
RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor);
psDeviceMemoryHeapCursor--;
}
- eError = PVRSRV_ERROR_OUT_OF_MEMORY;
goto e1;
}
psDeviceMemoryHeapCursor++;
}
}
-#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */
return PVRSRV_OK;
e1:
static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo)
{
-#if (RGX_NUM_OS_SUPPORTED > 1)
- if (PVRSRV_VZ_MODE_IS(HOST))
+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32DriverID;
DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap;
/* Delete all guest firmware heaps */
- for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID)
{
RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor);
psDeviceMemoryHeapCursor++;
}
}
-#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */
OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray);
OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap);
}
-static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+static PVRSRV_ERROR RGXInitSharedFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
{
PVRSRV_ERROR eError = PVRSRV_OK;
PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
- PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig,
- PHYS_HEAP_USAGE_FW_MAIN);
+ PHYS_HEAP_CONFIG *psSysHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_SHARED);
-#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
/* VZ heap validation */
if (PVRSRV_VZ_MODE_IS(GUEST))
{
- PVR_LOG_RETURN_IF_FALSE(psFwMainConfig != NULL,
+ PVR_LOG_RETURN_IF_FALSE(psSysHeapCfg != NULL,
"FW Main heap is required for VZ Guest.",
PVRSRV_ERROR_PHYSHEAP_CONFIG);
}
#endif
- if (psFwMainConfig != NULL)
+ if (psSysHeapCfg != NULL)
{
- /* Check FW_MAIN for multiple usage flags. Because FW_MAIN is divided
+ /* Check FW_SHARED for multiple usage flags. Because FW_SHARED is divided
into subheaps, shared usage with other heaps is not allowed. */
- PVR_LOG_RETURN_IF_FALSE(psFwMainConfig->ui32UsageFlags == PHYS_HEAP_USAGE_FW_MAIN,
- "FW Main phys heap config specified with more than one usage. FW Main must be FW Main only.",
+ PVR_LOG_RETURN_IF_FALSE(psSysHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_SHARED,
+ "FW_SHARED phys heap config not specified with more than one usage."
+ "FW_SHARED heap must be exclusively used as FW_SHARED.",
PVRSRV_ERROR_PHYSHEAP_CONFIG);
}
- if (psFwMainConfig == NULL)
+ if (psSysHeapCfg == NULL)
{
PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap not set", __func__));
+ /* Nothing to do. Default to the physheap fallback option */
}
- else if (psFwMainConfig->eType == PHYS_HEAP_TYPE_UMA)
+ else if (psSysHeapCfg->eType == PHYS_HEAP_TYPE_UMA)
{
PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses OS System memory (UMA)", __func__));
+
+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode,
+ psSysHeapCfg,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap");
+
+ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG] = psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN];
}
else /* PHYS_HEAP_TYPE_LMA or PHYS_HEAP_TYPE_DMA */
{
- IMG_UINT64 uFwMainSubHeapSize;
- PHYS_HEAP_CONFIG sFwHeapConfig;
+ PHYS_HEAP_CONFIG sFwMainHeapCfg, sFwCfgHeapCfg;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses local memory managed by the driver (LMA)", __func__));
+
+
+ /* Subheap layout: Main + (optional MIPS reserved range) + Config */
+ sFwMainHeapCfg = *psSysHeapCfg;
+
+ /* reserve space for the Config heap */
+ sFwMainHeapCfg.uiSize -= RGX_FIRMWARE_CONFIG_HEAP_SIZE;
/* MIPS Firmware must reserve some space in its Host/Native heap for GPU memory mappings */
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST)))
#if defined(FIX_HW_BRN_65101_BIT_MASK)
if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101))
{
- uFwMainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101;
+ sFwMainHeapCfg.uiSize -= RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101;
}
else
#endif
{
- uFwMainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL;
+ sFwMainHeapCfg.uiSize -= RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL;
}
}
+
+ eError = PhysmemCreateHeapLMA(psDeviceNode,
+ RGXPhysHeapGetLMAPolicy(sFwMainHeapCfg.ui32UsageFlags),
+ &sFwMainHeapCfg,
+ "Fw Main subheap",
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MAIN", ErrorDeinit);
+
+ sFwCfgHeapCfg = *psSysHeapCfg;
+ sFwCfgHeapCfg.sStartAddr.uiAddr += psSysHeapCfg->uiSize - RGX_FIRMWARE_CONFIG_HEAP_SIZE;
+ sFwCfgHeapCfg.sCardBase.uiAddr += psSysHeapCfg->uiSize - RGX_FIRMWARE_CONFIG_HEAP_SIZE;
+
+ sFwCfgHeapCfg.uiSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE;
+
+ eError = PhysmemCreateHeapLMA(psDeviceNode,
+ RGXPhysHeapGetLMAPolicy(sFwCfgHeapCfg.ui32UsageFlags),
+ &sFwCfgHeapCfg,
+ "Fw Cfg subheap",
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:CFG", ErrorDeinit);
+ }
+
+ /* Acquire FW heaps */
+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_MAIN", ErrorDeinit);
+
+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CONFIG", ErrorDeinit);
+
+ return eError;
+
+ErrorDeinit:
+ PVR_ASSERT(IMG_FALSE);
+
+ return eError;
+}
+
+static PVRSRV_ERROR RGXInitPrivateFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PHYS_HEAP_CONFIG *psFwCodeHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_CODE);
+ PHYS_HEAP_CONFIG *psFwDataHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_PRIV_DATA);
+ PHYS_HEAP_CONFIG *psFwPrivateHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_PRIVATE);
+ PHYS_HEAP_CONFIG sFwPrivateTempCfg;
+
+ if (psFwPrivateHeapCfg != NULL)
+ {
+ PVR_LOG_RETURN_IF_FALSE((psFwCodeHeapCfg == NULL) && (psFwDataHeapCfg == NULL),
+ "FW_PRIVATE and the FW_CODE & FW_PRIV_DATA usage flags "
+ "achieve the same goal and are mutually exclusive.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+
+ /* Fw code and data are both allocated from this unified heap */
+ sFwPrivateTempCfg = *psFwPrivateHeapCfg;
+ sFwPrivateTempCfg.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA;
+
+ psFwCodeHeapCfg = &sFwPrivateTempCfg;
+ psFwDataHeapCfg = &sFwPrivateTempCfg;
+ }
+
+ if ((psFwCodeHeapCfg == NULL) || (psFwDataHeapCfg == NULL))
+ {
+ if (psFwCodeHeapCfg != psFwDataHeapCfg)
+ {
+ /* Private Firmware code and data heaps must be either both defined
+ * or both undefined. There is no point in isolating one but not
+ * the other.*/
+ eError = PVRSRV_ERROR_PHYSHEAP_CONFIG;
+ PVR_LOG_GOTO_IF_ERROR(eError, "PrivateFwPhysHeap check", ErrorDeinit);
+ }
else
{
- uFwMainSubHeapSize = RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE;
+ /* No dedicated heaps, default to the physheap fallback option */
}
+ }
+ else if (psFwCodeHeapCfg == psFwDataHeapCfg)
+ {
+ if (psFwCodeHeapCfg->ui32UsageFlags ==
+ (PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA))
+ {
+ /* Fw code and private data allocations come from the same system heap
+ * Instantiate one physheap and share it between them. */
- PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses local memory managed by the driver (LMA)", __func__));
+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode,
+ psFwCodeHeapCfg,
+ NULL);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig");
+ }
+ else
+ {
+ /* Not an exclusive heap, can be used for other purposes (e.g. secure buffers).
+ * Expect the PVR layer to have already created a heap for the other uses. */
+ }
+ }
+ else
+ {
+ /*
+ * Separating private Firmware code and data is allowed for backwards compatibility
+ * purposes. New platforms should use the unified FW_PRIVATE heap instead.
+ *
+ * Early security implementations on Rogue cores required separate FW_PRIV_DATA
+ * and FW_CODE heaps, as access permissions to Firmware were granted differently
+ * based on the transaction types (code or data).
+ */
+ PVR_LOG_RETURN_IF_FALSE((psFwCodeHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_CODE) &&
+ (psFwDataHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_PRIV_DATA),
+ "Dedicated private heaps for Fw code and "
+ "data must have one usage flag exclusively.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
- PVR_LOG_GOTO_IF_FALSE(psFwMainConfig->uiSize >= RGX_FIRMWARE_RAW_HEAP_SIZE,
- "Invalid firmware physical heap size.", ErrorDeinit);
+ /* Dedicated Fw code heap */
+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode,
+ psFwCodeHeapCfg,
+ NULL);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap");
- /* Now we construct RAs to manage the FW heaps */
+ /* Dedicated Fw private data heap */
+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode,
+ psFwDataHeapCfg,
+ NULL);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap");
+ }
-#if defined(SUPPORT_AUTOVZ)
- if (PVRSRV_VZ_MODE_IS(HOST))
+#if defined(RGX_PREMAP_FW_HEAPS) && defined(SUPPORT_TRUSTED_DEVICE)
+ /* When premapping distinct private and shared Firmware phys heaps
+ * inside the same virtual devmem heap, their sizes must add up to
+ * the fixed RGX_FIRMWARE_RAW_HEAP_SIZE for the premapping to work */
{
- /* 1 Mb can hold the maximum amount of page tables for the memory shared between the firmware and all KM drivers:
- * MAX(RAW_HEAP_SIZE) = 32 Mb; MAX(NUMBER_OS) = 8; Total shared memory = 256 Mb;
- * MMU objects required: 65536 PTEs; 16 PDEs; 1 PCE; */
- IMG_UINT64 uMaxFwMmuPageTableSize = 1 * 1024 * 1024;
-
- sFwHeapConfig = *psFwMainConfig;
-
- /* By default the firmware MMU's page tables are allocated from the same carveout memory as the firmware heap.
- * If a different base address is specified for this reserved range, use the overriding define instead. */
-#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR)
- sFwHeapConfig.sStartAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR;
- sFwHeapConfig.sCardBase.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR;
-#else
- sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED;
- sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED;
-#endif
+ PHYS_HEAP_CONFIG *psFwSharedHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_SHARED);
+ IMG_UINT64 ui64FwPrivateHeapSize;
- sFwHeapConfig.uiSize = uMaxFwMmuPageTableSize;
- sFwHeapConfig.ui32UsageFlags = 0;
+ PVR_LOG_GOTO_IF_FALSE((psFwCodeHeapCfg != NULL) && (psFwDataHeapCfg != NULL),
+ "Security support requires Fw code and data memory be"
+ " separate from the heap shared with the kernel driver.", ErrorDeinit);
+
+ if (psFwCodeHeapCfg != psFwDataHeapCfg)
+ {
+ /* Private Firmware allocations come from 2 different heaps */
+ ui64FwPrivateHeapSize = psFwCodeHeapCfg->uiSize + psFwDataHeapCfg->uiSize;
+ }
+ else
+ {
+ /* Private Firmware allocations come from a single heap */
+ ui64FwPrivateHeapSize = psFwCodeHeapCfg->uiSize;
+ }
- eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw MMU subheap",
- &psDeviceNode->psFwMMUReservedPhysHeap);
- PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MMU", ErrorDeinit);
+ PVR_LOG_GOTO_IF_FALSE((psFwSharedHeapCfg->uiSize +
+ ui64FwPrivateHeapSize) ==
+ RGX_FIRMWARE_RAW_HEAP_SIZE,
+ "Invalid firmware physical heap size.", ErrorDeinit);
}
#endif
- /* Subheap layout: Main + (optional MIPS reserved range) + Config */
- sFwHeapConfig = *psFwMainConfig;
- sFwHeapConfig.uiSize = uFwMainSubHeapSize;
- sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_MAIN;
+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CODE", ErrorDeinit);
- eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw Main subheap", &psDeviceNode->psFWMainPhysHeap);
- PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MAIN", ErrorDeinit);
+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_DATA", ErrorDeinit);
- sFwHeapConfig = *psFwMainConfig;
- sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE;
- sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE;
- sFwHeapConfig.uiSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE;
- sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CONFIG;
+ return eError;
- eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw Cfg subheap", &psDeviceNode->psFWCfgPhysHeap);
- PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:CFG", ErrorDeinit);
- }
+ErrorDeinit:
+ PVR_ASSERT(IMG_FALSE);
- /* Acquire FW heaps */
- eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode,
- &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]);
- PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_MAIN", ErrorDeinit);
+ return eError;
+}
- eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode,
- &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]);
- PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CONFIG", ErrorDeinit);
+static PVRSRV_ERROR RGXInitFwPageTableHeap(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
- eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode,
- &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]);
- PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CODE", ErrorDeinit);
+#if defined(RGX_PREMAP_FW_HEAPS)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ PHYS_HEAP_CONFIG *psFwPageTableHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_PREMAP_PT);
+
+ PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg != NULL),
+ "The Firmware Page Table phys heap config not found.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
- eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode,
- &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]);
- PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_DATA", ErrorDeinit);
+
+ PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_PREMAP_PT),
+ "The Firmware Page Table heap must be used exclusively for this purpose",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+
+ PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->eType == PHYS_HEAP_TYPE_LMA) ||
+ (psFwPageTableHeapCfg->eType == PHYS_HEAP_TYPE_DMA),
+ "The Firmware Page Table heap must be LMA or DMA memory.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+
+ PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->uiSize >= RGX_FIRMWARE_MAX_PAGETABLE_SIZE),
+ "The Firmware Page Table heap must be able to hold the maximum "
+ "number of pagetables needed to cover the Firmware's VA space.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+
+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode,
+ psFwPageTableHeapCfg,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig:FwPageTableHeap");
+
+ eError = PhysHeapAcquire(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquire:FwPageTableHeap");
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+#endif /* defined(RGX_PREMAP_FW_HEAPS) */
return eError;
+}
+
+static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ eError = RGXInitFwPageTableHeap(psDeviceNode);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitFwPageTableHeap", ErrorDeinit);
+ eError = RGXInitSharedFwPhysHeaps(psDeviceNode);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitSharedFwPhysHeaps", ErrorDeinit);
+ eError = RGXInitPrivateFwPhysHeaps(psDeviceNode);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitPrivateFwPhysHeaps", ErrorDeinit);
ErrorDeinit:
- PVR_ASSERT(IMG_FALSE);
- PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function RGXDeviceFWMainHeapMemCheck
+@Description Checks the free memory in FW Main PhysHeap of a device to ensure
+ there is enough for a connection to be made.
+
+@Input psDeviceNode The device of the FW Main PhysHeap to be checked.
+
+@Return On success PVRSRV_OK, else a PVRSRV_ERROR code.
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXDeviceFWMainHeapMemCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PHYS_HEAP *psFWMainPhysHeap;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode");
+
+ psFWMainPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN];
+ if (psFWMainPhysHeap == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to get device's FW Main PhysHeap"));
+ return PVRSRV_ERROR_INVALID_HEAP;
+ }
+
+ if (PhysHeapGetType(psFWMainPhysHeap) == PHYS_HEAP_TYPE_LMA)
+ {
+ const IMG_UINT32 ui32MinMemInKBs = RGX_FW_PHYSHEAP_MINMEM_ON_CONNECTION;
+ IMG_UINT64 ui64FreePhysHeapMem;
+
+ eError = PhysHeapFreeMemCheck(psFWMainPhysHeap,
+ KB2B(ui32MinMemInKBs),
+ &ui64FreePhysHeapMem);
+
+ if (eError == PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FW_MAIN PhysHeap contains less than the "
+ "minimum free space required to acquire a connection. "
+ "Free space: %"IMG_UINT64_FMTSPEC"KB "
+ "Minimum required: %uKB",
+ B2KB(ui64FreePhysHeapMem),
+ ui32MinMemInKBs));
+ }
+ }
return eError;
}
-static void _ReadNon4KHeapPageSize(IMG_UINT32 *pui32Log2Non4KPgSize)
+static PVRSRV_ERROR _ReadNon4KHeapPageSize(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 *pui32Log2Non4KPgSize)
{
void *pvAppHintState = NULL;
IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE;
IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+ IMG_UINT32 uiLog2OSPageSize = OSGetPageShift();
/* Get the page size for the dummy page from the NON4K heap apphint */
OSCreateKMAppHintState(&pvAppHintState);
OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState,
- GeneralNon4KHeapPageSize, &ui32AppHintDefault,
- &ui32GeneralNon4KHeapPageSize);
+ GeneralNon4KHeapPageSize, &ui32AppHintDefault,
+ &ui32GeneralNon4KHeapPageSize);
*pui32Log2Non4KPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize);
OSFreeKMAppHintState(pvAppHintState);
+#if defined(FIX_HW_BRN_71317_BIT_MASK)
+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71317))
+ {
+ if (*pui32Log2Non4KPgSize == RGX_HEAP_2MB_PAGE_SHIFT
+ || *pui32Log2Non4KPgSize == RGX_HEAP_1MB_PAGE_SHIFT)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Page sizes of 2MB or 1MB cause page faults."));
+ return PVRSRV_ERROR_INVALID_NON4K_HEAP_PAGESIZE;
+ }
+ }
+#endif
+
+ /* Check the Non4k page size is at least the size of the OS page size
+ * or larger. The Non4k page size also has to be a multiple of the OS page
+ * size but since we have the log2 value from the apphint we know powers of 2
+ * will always be multiples.
+ */
+ PVR_LOG_RETURN_IF_FALSE(*pui32Log2Non4KPgSize >= uiLog2OSPageSize,
+ "Non4K page size smaller than OS page size",
+ PVRSRV_ERROR_INVALID_NON4K_HEAP_PAGESIZE);
+
+ return PVRSRV_OK;
}
/* RGXRegisterDevice
*
- * NOTE: No PDUMP statements are allowed in until Part 2 of the device initialisation
- * is reached.
+ * WARNING!
+ *
+ * No PDUMP statements are allowed until device initialisation starts.
*/
PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
{
/* Configure MMU specific stuff */
RGXMMUInit_Register(psDeviceNode);
- psDeviceNode->pfnDevSLCFlushRange = NULL;
psDeviceNode->pfnInvalFBSCTable = NULL;
psDeviceNode->pfnValidateOrTweakPhysAddrs = NULL;
/* Callback for getting the MMU device attributes */
psDeviceNode->pfnGetMMUDeviceAttributes = RGXDevMMUAttributes;
+ /* Callback for getting TFBC configuration */
+ psDeviceNode->pfnGetTFBCLossyGroup = RGXGetTFBCLossyGroup;
+
/* Register callback for initialising device-specific physical memory heaps */
psDeviceNode->pfnPhysMemDeviceHeapsInit = RGXPhysMemDeviceHeapsInit;
- /* Set up required support for dummy page */
- OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0);
- OSAtomicWrite(&(psDeviceNode->sDevZeroPage.atRefCounter), 0);
-
- /* Set the order to 0 */
- psDeviceNode->sDummyPage.sPageHandle.uiOrder = 0;
- psDeviceNode->sDevZeroPage.sPageHandle.uiOrder = 0;
-
- /* Set the size of the Dummy page to zero */
- psDeviceNode->sDummyPage.ui32Log2PgSize = 0;
-
- /* Set the size of the Zero page to zero */
- psDeviceNode->sDevZeroPage.ui32Log2PgSize = 0;
-
- /* Set the Dummy page phys addr */
- psDeviceNode->sDummyPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
-
- /* Set the Zero page phys addr */
- psDeviceNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
-
- /* The lock can be acquired from MISR (Z-buffer) path */
- eError = OSLockCreate(&psDeviceNode->sDummyPage.psPgLock);
- if (PVRSRV_OK != eError)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__));
- return eError;
- }
+ /* Register callback for checking a device's FW Main physical heap for sufficient free memory */
+ psDeviceNode->pfnCheckForSufficientFWPhysMem = RGXDeviceFWMainHeapMemCheck;
- /* Create the lock for zero page */
- eError = OSLockCreate(&psDeviceNode->sDevZeroPage.psPgLock);
- if (PVRSRV_OK != eError)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create Zero page lock", __func__));
- goto free_dummy_page;
- }
-#if defined(PDUMP)
- psDeviceNode->sDummyPage.hPdumpPg = NULL;
- psDeviceNode->sDevZeroPage.hPdumpPg = NULL;
-#endif
+ /* Register callback for determining the appropriate LMA allocation policy for a phys heap */
+ psDeviceNode->pfnPhysHeapGetLMAPolicy = RGXPhysHeapGetLMAPolicy;
/*********************
* Device info setup *
goto e14;
}
- _ReadNon4KHeapPageSize(&psDevInfo->ui32Log2Non4KPgSize);
-
- /*Set the zero & dummy page sizes as needed for the heap with largest page size */
- psDeviceNode->sDevZeroPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize;
- psDeviceNode->sDummyPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize;
+ eError = _ReadNon4KHeapPageSize(psDevInfo,
+ &psDeviceNode->ui32RGXLog2Non4KPgSize);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_ReadNon4KHeapPageSize", e14);
eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo);
if (eError != PVRSRV_OK)
#if defined(SUPPORT_SOC_TIMER)
{
- IMG_BOOL ui32AppHintDefault = IMG_FALSE;
+ IMG_BOOL bAppHintDefault = IMG_FALSE;
IMG_BOOL bInitSocTimer;
void *pvAppHintState = NULL;
OSCreateKMAppHintState(&pvAppHintState);
- OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ValidateSOCUSCTimer, &ui32AppHintDefault, &bInitSocTimer);
+ OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ValidateSOCUSCTimer, &bAppHintDefault, &bInitSocTimer);
OSFreeKMAppHintState(pvAppHintState);
if (bInitSocTimer)
e0:
OSFreeMem(psDevInfo);
- /* Destroy the zero page lock created above */
- OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock);
-
-free_dummy_page:
- /* Destroy the dummy page lock created above */
- OSLockDestroy(psDeviceNode->sDummyPage.psPgLock);
-
PVR_ASSERT(eError != PVRSRV_OK);
return eError;
}
return PVRSRV_OK;
}
-#if (RGX_NUM_OS_SUPPORTED > 1)
+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1)
/*!
*******************************************************************************
@Description Called to perform additional initialisation
******************************************************************************/
-static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid)
+static PVRSRV_ERROR RGXInitFwRawHeap(PVRSRV_RGXDEV_INFO *psDevInfo, DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32DriverID)
{
IMG_UINT32 uiStringLength;
IMG_UINT32 uiStringLengthMax = 32;
IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift());
+ PVR_RETURN_IF_FALSE(ui32Log2RgxDefaultPageShift != 0, PVRSRV_ERROR_INVALID_PARAMS);
+
+#if defined(FIX_HW_BRN_71317_BIT_MASK)
+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71317))
+ {
+ if (ui32Log2RgxDefaultPageShift == RGX_HEAP_2MB_PAGE_SHIFT
+ || ui32Log2RgxDefaultPageShift == RGX_HEAP_1MB_PAGE_SHIFT)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "OS page size too large for device virtual heaps. "
+ "Maximum page size supported is 256KB when BRN71317 is present."));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+#endif
+
uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1);
- /* Start by allocating memory for this OSID heap identification string */
+ /* Start by allocating memory for this DriverID heap identification string */
psDevMemHeap->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR));
if (psDevMemHeap->pszName == NULL)
{
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
- /* Append the OSID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */
- OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSid);
+ /* Append the DriverID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */
+ OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID);
/* Use the common blueprint template support function to initialise the heap */
HeapCfgBlueprintInit(psDevMemHeap->pszName,
- RGX_FIRMWARE_RAW_HEAP_BASE + (ui32OSid * RGX_FIRMWARE_RAW_HEAP_SIZE),
+ RGX_FIRMWARE_RAW_HEAP_BASE + (ui32DriverID * RGX_FIRMWARE_RAW_HEAP_SIZE),
RGX_FIRMWARE_RAW_HEAP_SIZE,
0,
ui32Log2RgxDefaultPageShift,
0,
+ NULL,
+ NULL,
psDevMemHeap);
return PVRSRV_OK;
static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap)
{
IMG_UINT64 uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE;
- IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_OS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE);
+ IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_DRIVERS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE);
/* Safe to do as the guest firmware heaps are last in the list */
if (psDevMemHeap->sHeapBaseAddr.uiAddr >= uiBase &&
OSFreeMem(pszName);
}
}
-#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */
/******************************************************************************
End of file (rgxinit.c)
#include "pmr.h"
#if defined(PDUMP)
-#include <stdarg.h>
+#if defined(__linux__)
+ #include <linux/version.h>
+
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ #include <linux/stdarg.h>
+ #else
+ #include <stdarg.h>
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+#else
+ #include <stdarg.h>
+#endif /* __linux__ */
#endif
void RGXMemCopy(const void *hPrivate,
sRGXMMUTopLevelDevVAddrConfig.uiPTIndexMask = IMG_UINT64_C(0xfffffff000); /* Get the PT address bits from a 40 bit virt. address (in a 64bit UINT) */
sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K;
- sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = (RGX_NUM_DRIVERS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
/*
*
sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT;
- sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = (RGX_NUM_DRIVERS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00ffff0000);
sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_64K;
- sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = (RGX_NUM_DRIVERS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff);
sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
* log2 page size (12, 14, 16 for a 4K, 16K, 64K page size).
*/
sRGXMMUDeviceAttributes.ui32BaseAlign =
- (CeilLog2(RGX_NUM_OS_SUPPORTED) + RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) & ~1U;
+ (CeilLog2(RGX_NUM_DRIVERS_SUPPORTED) + RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) & ~1U;
/* 256K alignment might be too hard to achieve, fall back to 64K */
sRGXMMUDeviceAttributes.ui32BaseAlign =
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
{
+ IMG_BOOL bPowerWasDown;
IMG_UINT32 ui32MulticoreRegBankOffset = (1 << RGX_GET_FEATURE_VALUE(psDevInfo, XPU_MAX_REGBANKS_ADDR_WIDTH));
IMG_UINT32 ui32MulticoreGPUReg = RGX_CR_MULTICORE_GPU;
IMG_UINT32 ui32NumCores;
IMG_UINT32 i;
+ bPowerWasDown = (psDeviceNode->psDevConfig->pfnGpuDomainPower(psDeviceNode) == PVRSRV_SYS_POWER_STATE_OFF);
+
+ /* Power-up the device as required to read the registers */
+ if (bPowerWasDown)
+ {
+ eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON");
+ }
+
ui32NumCores = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM);
#if !defined(NO_HARDWARE)
/* check that the number of cores reported is in-bounds */
ui32MulticoreGPUReg += ui32MulticoreRegBankOffset;
}
+ /* revert power state to what it was on entry to this function */
+ if (bPowerWasDown)
+ {
+ eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_OFF);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF");
+ }
+
/* Register callback to return info about multicore setup to client bridge */
psDeviceNode->pfnGetMultiCoreInfo = RGXGetMultiCoreInfo;
}
}
/*
- * Add a PDUMP POLL on the KZ signature check status.
+ * Add a PDUMP POLL on the WGP signature check status.
*/
- if (psDevInfo->ui32ValidationFlags & RGX_VAL_KZ_SIG_CHECK_NOERR_EN)
+ if (psDevInfo->ui32ValidationFlags & RGX_VAL_WGP_SIG_CHECK_NOERR_EN)
{
- PDUMPCOMMENT(psDeviceNode, "Verify KZ Signature: match required");
+ PDUMPCOMMENT(psDeviceNode, "Verify WGP Signature: match required");
eError = PDUMPREGPOL(psDeviceNode,
RGX_PDUMPREG_NAME,
RGX_CR_SCRATCH11,
ui32PDumpFlags,
PDUMP_POLL_OPERATOR_EQUAL);
}
- else if (psDevInfo->ui32ValidationFlags & RGX_VAL_KZ_SIG_CHECK_ERR_EN)
+ else if (psDevInfo->ui32ValidationFlags & RGX_VAL_WGP_SIG_CHECK_ERR_EN)
{
- PDUMPCOMMENT(psDeviceNode, "Verify KZ Signature: mismatch required");
+ PDUMPCOMMENT(psDeviceNode, "Verify WGP Signature: mismatch required");
eError = PDUMPREGPOL(psDeviceNode,
RGX_PDUMPREG_NAME,
RGX_CR_SCRATCH11,
static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo)
{
RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb;
- IMG_UINT64 *paui64StatsCounters;
+ IMG_UINT64 (*paui64DMOSLastWord)[RGXFW_MAX_NUM_OSIDS];
+ IMG_UINT64 (*paaui64DMOSStatsCounters)[RGXFW_MAX_NUM_OSIDS][RGXFWIF_GPU_UTIL_STATE_NUM];
IMG_UINT64 ui64LastPeriod;
IMG_UINT64 ui64LastState;
IMG_UINT64 ui64LastTime;
IMG_UINT64 ui64TimeNow;
+ RGXFWIF_DM eDM;
psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
- paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0];
+ paui64DMOSLastWord = &psUtilFWCb->aaui64DMOSLastWord[0];
+ paaui64DMOSStatsCounters = &psUtilFWCb->aaaui64DMOSStatsCounters[0];
OSLockAcquire(psDevInfo->hGPUUtilLock);
ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDevInfo->psDeviceNode));
/* Update counters to account for the time since the last update */
- ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord);
- ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord);
+ ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64GpuLastWord);
+ ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64GpuLastWord);
ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
- paui64StatsCounters[ui64LastState] += ui64LastPeriod;
+ psUtilFWCb->aui64GpuStatsCounters[ui64LastState] += ui64LastPeriod;
/* Update state and time of the latest update */
- psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+ psUtilFWCb->ui64GpuLastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+
+ for (eDM = 0; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++)
+ {
+ IMG_UINT32 ui32DriverID;
+
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->aaui64DMOSLastWord[eDM][ui32DriverID]);
+ ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->aaui64DMOSLastWord[eDM][ui32DriverID]);
+ ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+ paaui64DMOSStatsCounters[eDM][ui32DriverID][ui64LastState] += ui64LastPeriod;
+
+ /* Update state and time of the latest update */
+ paui64DMOSLastWord[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+ }
+ }
OSLockRelease(psDevInfo->hGPUUtilLock);
}
KM_SET_OS_CONNECTION(OFFLINE, psDevInfo);
#if defined(RGX_FW_IRQ_OS_COUNTERS)
- ui32idx = RGXFW_HOST_OS;
+ ui32idx = RGXFW_HOST_DRIVER_ID;
#else
for_each_irq_cnt(ui32idx)
#endif /* RGX_FW_IRQ_OS_COUNTERS */
LOOP_UNTIL_TIMEOUT(ui32FwTimeout)
{
- IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32DriverID;
IMG_BOOL bGuestOnline = IMG_FALSE;
- for (ui32OSid = RGXFW_GUEST_OSID_START;
- ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+ for (ui32DriverID = RGXFW_GUEST_DRIVER_ID_START;
+ ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED; ui32DriverID++)
{
RGXFWIF_CONNECTION_FW_STATE eGuestState = (RGXFWIF_CONNECTION_FW_STATE)
- psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32OSid].bfOsState;
+ psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32DriverID].bfOsState;
if ((eGuestState == RGXFW_CONNECTION_FW_ACTIVE) ||
(eGuestState == RGXFW_CONNECTION_FW_OFFLOADING))
{
bGuestOnline = IMG_TRUE;
- PVR_DPF((PVR_DBG_WARNING, "%s: Guest OS %u still online.", __func__, ui32OSid));
+ PVR_DPF((PVR_DBG_WARNING, "%s: Guest OS %u still online.", __func__, ui32DriverID));
}
}
#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
/* Guest drivers expect the firmware to have set its end of the
- * connection to Ready state by now. Poll indefinitely otherwise. */
+ * connection to Ready state by now. */
if (!KM_FW_CONNECTION_IS(READY, psDevInfo))
{
PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Ready state. Waiting for Firmware ...", __func__));
}
- while (!KM_FW_CONNECTION_IS(READY, psDevInfo))
+
+ LOOP_UNTIL_TIMEOUT(RGX_VZ_CONNECTION_TIMEOUT_US)
{
- OSSleepms(10);
+ if (KM_FW_CONNECTION_IS(READY, psDevInfo))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware Connection is Ready. Initialisation proceeding.", __func__));
+ break;
+ }
+ else
+ {
+ OSSleepms(10);
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (!KM_FW_CONNECTION_IS(READY, psDevInfo))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Timed out waiting for the Firmware to enter Ready state.", __func__));
+ return PVRSRV_ERROR_TIMEOUT;
}
- PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Ready. Initialisation proceeding.", __func__));
#endif /* RGX_VZ_STATIC_CARVEOUT_FW_HEAPS */
/* Guests can only access the register holding the connection states,
{
KM_SET_OS_CONNECTION(READY, psDevInfo);
+#if defined(SUPPORT_AUTOVZ)
/* Disable power callbacks that should not be run on virtualised drivers after the GPU
* is fully initialised: system layer pre/post functions and driver idle requests.
* The original device RGX Pre/Post functions are called from this Vz wrapper. */
&RGXVzPrePowerState, &RGXVzPostPowerState,
NULL, NULL, NULL, NULL);
-#if defined(SUPPORT_AUTOVZ)
/* During first-time boot the flag is set here, while subsequent reboots will already
* have set it earlier in RGXInit. Set to true from this point onwards in any case. */
psDeviceNode->bAutoVzFwIsUp = IMG_TRUE;
}
/* Wait for the firmware to accept and enable the connection with this OS by setting its state to Active */
- while (!KM_FW_CONNECTION_IS(ACTIVE, psDevInfo))
+ LOOP_UNTIL_TIMEOUT(RGX_VZ_CONNECTION_TIMEOUT_US)
{
- PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Active state. Waiting for Firmware ...", __func__));
- OSSleepms(100);
+ if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Active. Initialisation proceeding.", __func__));
+ break;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Active state. Waiting for Firmware ...", __func__));
+ OSSleepms(10);
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (!KM_FW_CONNECTION_IS(ACTIVE, psDevInfo))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Timed out waiting for the Firmware to enter Active state.", __func__));
+ return PVRSRV_ERROR_TIMEOUT;
}
- PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Active. Initialisation proceeding.", __func__));
/* poll on the Firmware supplying the compatibility data */
LOOP_UNTIL_TIMEOUT(ui32FwTimeout)
#define EMU_CR_SYSTEM_IRQ_STATUS (0x00E0U)
/* IRQ is officially defined [8 .. 0] but here we split out the old deprecated single irq. */
#define EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE01))
+/* Volcanic TB does uses [7 .. 0] but here we split out the old deprecated single irq. */
+#define EMU_CR_SYSTEM_IRQ_STATUS__VOLCANIC_TB__IRQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF01))
#define EMU_CR_SYSTEM_IRQ_STATUS_OLD_IRQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
#endif
PDUMPIF(psDevInfo->psDeviceNode, "IMG_PVR_TESTBENCH", ui32PDumpFlags);
PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
"Poll for TB irq status to be set (irqs signalled)...");
- PDUMPREGPOL(psDevInfo->psDeviceNode,
- RGX_TB_PDUMPREG_NAME,
- EMU_CR_SYSTEM_IRQ_STATUS,
- ~EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK,
- ~EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK,
- ui32PDumpFlags,
- PDUMP_POLL_OPERATOR_EQUAL);
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, VOLCANIC_TB))
+ {
+ PDUMPREGPOL(psDevInfo->psDeviceNode,
+ RGX_TB_PDUMPREG_NAME,
+ EMU_CR_SYSTEM_IRQ_STATUS,
+ ~EMU_CR_SYSTEM_IRQ_STATUS__VOLCANIC_TB__IRQ_CLRMSK,
+ ~EMU_CR_SYSTEM_IRQ_STATUS__VOLCANIC_TB__IRQ_CLRMSK,
+ ui32PDumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+ }
+ else
+ {
+ PDUMPREGPOL(psDevInfo->psDeviceNode,
+ RGX_TB_PDUMPREG_NAME,
+ EMU_CR_SYSTEM_IRQ_STATUS,
+ ~EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK,
+ ~EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK,
+ ui32PDumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+ }
PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
"... and then clear them");
- for (ui32OSid = 0; ui32OSid < RGXFW_MAX_NUM_OS; ui32OSid++)
+ FOREACH_HW_OSID(ui32OSid)
{
PDUMPREG32(psDevInfo->psDeviceNode,
RGX_PDUMPREG_NAME,
PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags,
"Validate Interrupt lines.");
- for (ui32OSid = 0; ui32OSid < RGXFW_MAX_NUM_OS; ui32OSid++)
+ FOREACH_HW_OSID(ui32OSid)
{
PDUMPREGPOL(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
RGX_CR_IRQ_OS0_EVENT_STATUS + ui32OSid * 0x10000,
}
#endif /* defined(NO_HARDWARE) && defined(PDUMP) */
-#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE)
+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS)
/*
* To validate the MTS unit we do the following:
* - Immediately after firmware loading for each OSID
ui32OsRegBanksMapped = MIN(ui32OsRegBanksMapped, GPUVIRT_VALIDATION_NUM_OS);
- if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OS)
+ if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OSIDS)
{
PVR_DPF((PVR_DBG_WARNING, "The register bank mapped into kernel VA does not cover all OS' registers:"));
- PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OS, ui32OsRegBanksMapped));
+ PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OSIDS, ui32OsRegBanksMapped));
PVR_DPF((PVR_DBG_WARNING, "Only first %d MTS registers will be tested", ui32OsRegBanksMapped));
}
ui32OSid,
ui32ScheduleRegister));
OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32ScheduleRegister, ui32KickType);
- OSMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + ui32ScheduleRegister);
#if defined(PDUMP)
PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "VZ sideband test, kicking MTS register %u", ui32OSid);
PDUMP_FLAGS_CONTINUOUS);
#endif
+#if !defined(NO_HARDWARE)
+ OSMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + ui32ScheduleRegister);
+
/* Wait test enable bit to be unset */
if (PVRSRVPollForValueKM(psDeviceNode,
- (IMG_UINT32 *)&psFwSysInit->ui32OSKickTest,
+ (volatile IMG_UINT32 __iomem *)&psFwSysInit->ui32OSKickTest,
0,
RGXFWIF_KICK_TEST_ENABLED_BIT,
POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK)
}
PVR_DPF((PVR_DBG_MESSAGE, " PASS"));
+#endif
}
PVR_LOG(("MTS passed sideband tests"));
return PVRSRV_OK;
}
-#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) */
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION_MTS) */
#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP)
#define SCRATCH_VALUE (0x12345678U)
static void RGXRiscvDebugModuleTest(PVRSRV_RGXDEV_INFO *psDevInfo)
{
void *pvAppHintState = NULL;
- IMG_UINT32 ui32AppHintDefault = 0;
+ const IMG_BOOL bDefaultFalse = IMG_FALSE;
IMG_BOOL bRunRiscvDmiTest;
IMG_UINT32 *pui32FWCode = NULL;
OSCreateKMAppHintState(&pvAppHintState);
OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, RiscvDmiTest,
- &ui32AppHintDefault, &bRunRiscvDmiTest);
+ &bDefaultFalse, &bRunRiscvDmiTest);
OSFreeKMAppHintState(pvAppHintState);
if (bRunRiscvDmiTest == IMG_FALSE)
#endif
#endif
-#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE)
+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS)
eError = RGXVirtualisationPowerupSidebandTest(psDeviceNode, psDevInfo->psRGXFWIfSysInit, psDevInfo);
if (eError != PVRSRV_OK)
{
#endif
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
- SetFirmwareStartTime(psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp);
+ PVRSRVSetFirmwareStartTime(psDeviceNode->psPowerDev,
+ psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp);
#endif
HTBSyncPartitionMarker(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal);
if (psFwSysData->ePowState == RGXFWIF_POW_IDLE)
{
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
- SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFwSysData->ui64StartIdleTime);
+ PVRSRVSetFirmwareHandshakeIdleTime(psDeviceNode->psPowerDev,
+ RGXReadHWTimerReg(psDevInfo)-psFwSysData->ui64StartIdleTime);
#endif
PDUMPPOWCMDSTART(psDeviceNode);
/*
* Services AppHints initialisation
*/
-#define X(a, b, c, d, e) SrvInitParamInit ## b(a, d, e)
+#define X(a, b, c, d, e, f) SrvInitParamInit ## b(a, d, e)
APPHINT_LIST_ALL
#undef X
#endif /* !defined(__linux__) */
SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FBCDCVersionOverride, psHints->ui32FBCDCVersionOverride);
SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TFBCCompressionControlGroup, psHints->ui32TFBCCompressionControlGroup);
SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TFBCCompressionControlScheme, psHints->ui32TFBCCompressionControlScheme);
- SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TFBCCompressionControlYUVFormat, psHints->bTFBCCompressionControlYUVFormat);
+ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, TFBCCompressionControlYUVFormat, psHints->bTFBCCompressionControlYUVFormat);
#endif
/*
IMG_UINT32 *pui32FWConfigFlagsExt,
IMG_UINT32 *pui32FwOsCfgFlags)
{
-#if defined(SUPPORT_VALIDATION)
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
-#endif
IMG_UINT32 ui32FWConfigFlags = 0;
IMG_UINT32 ui32FWConfigFlagsExt = 0;
((psHints->bTFBCCompressionControlYUVFormat) ? RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN : 0))
<< RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT) & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK;
}
+#else
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT))
+ {
+ ui32FWConfigFlagsExt |=
+ ((((PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP << RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT) &
+ ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) |
+ ((PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME << RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT) &
+ ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK))
+ << RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT) & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK;
+ }
#endif
}
RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt)
{
RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData;
+#if defined(HWPERF_UNIFIED)
RGXFWIF_HWPERF_DA_BLK *psHWPerfInitDABlkData;
+#endif
IMG_UINT32 ui32CntBlkModelLen;
const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel;
const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc;
/* Get the block configure store to update from the global store of
* block configuration. This is used to remember the configuration
* between configurations and core power on in APM.
- * For RGX_FEATURE_HWPERF_OCEANIC layout we have a different
+ * For HWPERF_UNIFIED layout we will have a different
* structure type to decode the HWPerf block. This is indicated by
* the RGX_CNTBLK_ID_DA_MASK bit being set in the block-ID value. */
bDirect = (psBlkTypeDesc->ui32IndirectReg == 0U);
uiUnit = ui32BlockID - psBlkTypeDesc->ui32CntBlkIdBase;
+#if defined(HWPERF_UNIFIED)
if ((ui32BlockID & RGX_CNTBLK_ID_DA_MASK) == RGX_CNTBLK_ID_DA_MASK)
{
psHWPerfInitDABlkData = rgxfw_hwperf_get_da_block_ctl(ui32BlockID, psHWPerfInitDataInt);
}
}
else
+#endif /* defined(HWPERF_UNIFIED) */
{
psHWPerfInitBlkData = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfInitDataInt);
/* Assert to check for HWPerf block mis-configuration */
PVR_ASSERT(psHWPerfInitBlkData);
- psHWPerfInitBlkData->bValid = IMG_TRUE;
+ psHWPerfInitBlkData->ui32Valid = IMG_TRUE;
PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
"bValid: This specifies if the layout block is valid for the given BVNC.");
DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
- (size_t)&(psHWPerfInitBlkData->bValid) - (size_t)(psHWPerfInitDataInt),
- psHWPerfInitBlkData->bValid,
+ (size_t)&(psHWPerfInitBlkData->ui32Valid) - (size_t)(psHWPerfInitDataInt),
+ psHWPerfInitBlkData->ui32Valid,
PDUMP_FLAGS_CONTINUOUS);
- psHWPerfInitBlkData->bEnabled = IMG_FALSE;
+ psHWPerfInitBlkData->ui32Enabled = IMG_FALSE;
PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS,
"bEnabled: Set to 0x1 if the block needs to be enabled during playback.");
DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
- (size_t)&(psHWPerfInitBlkData->bEnabled) - (size_t)(psHWPerfInitDataInt),
- psHWPerfInitBlkData->bEnabled,
+ (size_t)&(psHWPerfInitBlkData->ui32Enabled) - (size_t)(psHWPerfInitDataInt),
+ psHWPerfInitBlkData->ui32Enabled,
PDUMP_FLAGS_CONTINUOUS);
psHWPerfInitBlkData->eBlockID = ui32BlockID;
{
void *pvParamState = NULL;
IMG_UINT32 ui32LogType;
- IMG_BOOL bAnyLogGroupConfigured;
IMG_UINT32 ui32BufferSize;
IMG_UINT32 ui32OpMode;
return;
SrvInitParamGetUINT32BitField(INITPARAM_NO_DEVICE, pvParamState, EnableHTBLogGroup, ui32LogType);
- bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE;
SrvInitParamGetUINT32List(INITPARAM_NO_DEVICE, pvParamState, HTBOperationMode, ui32OpMode);
SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HTBufferSizeInKB, ui32BufferSize);
PHYS_HEAP *psHeap = psDeviceNode->apsPhysHeap[ePhysHeap];
PHYS_HEAP_USAGE_FLAGS ui32HeapFlags = PhysHeapGetFlags(psHeap);
PHYS_HEAP_USAGE_FLAGS ui32InvalidFlags = ~(PHYS_HEAP_USAGE_FW_PRIV_DATA | PHYS_HEAP_USAGE_FW_CODE
- | PHYS_HEAP_USAGE_GPU_SECURE);
+ | PHYS_HEAP_USAGE_GPU_SECURE | PHYS_HEAP_USAGE_FW_PRIVATE);
PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32RequiredFlags) != 0,
PVRSRV_ERROR_NOT_SUPPORTED,
psDeviceNode->psDevConfig->pszName);
PDUMPCOMMENT(psDeviceNode, "Device ID: %u (%d)",
psDeviceNode->sDevId.ui32InternalID,
- psDeviceNode->sDevId.i32OsDeviceID);
+ psDeviceNode->sDevId.i32KernelDeviceID);
if (psDeviceNode->psDevConfig->pszVersion)
{
RGXInitMultiCoreInfo(psDeviceNode);
-#if defined(PDUMP)
- eError = DevmemIntAllocDefBackingPage(psDeviceNode,
- &psDeviceNode->sDummyPage,
- PVR_DUMMY_PAGE_INIT_VALUE,
- DUMMY_PAGE,
- IMG_TRUE);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate dummy page.", __func__));
- goto cleanup;
- }
-
- eError = DevmemIntAllocDefBackingPage(psDeviceNode,
- &psDeviceNode->sDevZeroPage,
- PVR_ZERO_PAGE_INIT_VALUE,
- DEV_ZERO_PAGE,
- IMG_TRUE);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate Zero page.", __func__));
- goto cleanup;
- }
-#endif /* defined(PDUMP) */
-
sLayerParams.psDevInfo = psDevInfo;
#if defined(SUPPORT_TRUSTED_DEVICE)
eError = RGXValidateTDHeaps(psDeviceNode);
if ((sApphints.bEnableTrustedDeviceAceConfig) &&
(RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)))
{
- SetTrustedDeviceAceEnabled();
+ SetTrustedDeviceAceEnabled(psDeviceNode->psDevConfig->hSysData);
}
#endif
#endif
ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
#endif
-#if defined(MIPS_FW_CODE_OSID)
ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK;
- ui64RemapSettings |= MIPS_FW_CODE_OSID << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT;
+#if defined(MIPS_FW_CODE_OSID)
+ ui64RemapSettings |= ((IMG_UINT64) MIPS_FW_CODE_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT;
+#else
+ ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT;
#endif
RGXCommentLog(hPrivate, "RGXStart: Write boot remap registers");
}
#endif
-#if defined(MIPS_FW_CODE_OSID)
ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK;
-#endif
+ ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT;
RGXCommentLog(hPrivate, "RGXStart: Write data remap registers");
RGXDataRemapConfig(hPrivate,
ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
#endif
-#if defined(MIPS_FW_CODE_OSID)
ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK;
- ui64RemapSettings |= MIPS_FW_CODE_OSID << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT;
+#if defined(MIPS_FW_CODE_OSID)
+ ui64RemapSettings |= ((IMG_UINT64) MIPS_FW_CODE_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT;
+#else
+ ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT;
#endif
RGXCommentLog(hPrivate, "RGXStart: Write exceptions remap registers");
ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
#endif
-#if defined(MIPS_FW_CODE_OSID)
ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK;
-#endif
+ ui64RemapSettings |= ((IMG_UINT64) FW_OSID) << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT;
RGXCommentLog(hPrivate, "RGXStart: Write trampoline remap registers");
RGXTrampolineRemapConfig(hPrivate,
*/
ui32Reg = RGX_CR_SLC_CTRL_MISC;
ui32RegVal = RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1;
-
-#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
ui32RegVal |= RGXReadReg32(hPrivate, ui32Reg) & RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN;
-#endif
#if defined(FIX_HW_BRN_60084_BIT_MASK)
if (RGX_DEVICE_HAS_BRN(hPrivate, 60084))
}
#endif
-#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
/* Bypass burst combiner if SLC line size is smaller than 1024 bits */
if (RGXGetDeviceCacheLineSize(hPrivate) < 1024)
{
ui32RegVal |= RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN;
}
-#endif
RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
}
else
#endif /* defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) */
{
-#if defined(RGX_CR_MMU_CBASE_MAPPING) // FIXME_OCEANIC
+#if defined(RGX_CR_MMU_CBASE_MAPPING)
IMG_UINT32 uiPCAddr;
uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT)
<< RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT)
}
#endif
-#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_FEATURE_SLC_VIVT_BIT_MASK)
- if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT))
- {
- RGXCommentLog(hPrivate, "OSID 0 and 1 are trusted");
- ui64RegVal |= IMG_UINT64_C(0xFC)
- << RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT;
- }
-#endif
-
RGXCommentLog(hPrivate, "Init AXI-ACE interface");
RGXWriteReg64(hPrivate, ui32RegAddr, ui64RegVal);
}
#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL)
#endif
+#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK)
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR))
+ {
+ RGXCommentLog(hPrivate, "RGXStart: soft reset cpu core");
+ RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 0);
+ }
+#endif
+
#if defined(RGX_S7_SOFT_RESET_DUSTS)
if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
{
/* Take everything out of reset but the FW processor */
RGXCommentLog(hPrivate, "RGXStart: Take everything out of reset but %s", pcRGXFW_PROCESSOR);
-#if defined(RGX_FEATURE_XE_ARCHITECTURE) && (RGX_FEATURE_XE_ARCHITECTURE > 1)
- RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_CPU_EN);
-#else
RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
-#endif
(void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
}
/* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper
* For LAYOUT_MARS = 1, SIDEKICK would have been powered down by FW
*/
-#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0))
{
#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
if (eError != PVRSRV_OK) return eError;
}
-#endif
if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0))
{
RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC,
RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
& RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL);
-#if defined(RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC) // FIXME_OCEANIC
+
+#if defined(RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC)
RGXWriteReg32(hPrivate,
RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC,
RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
}
#endif
-#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
/* Extra Idle checks */
eError = RGXPollReg32(hPrivate,
RGX_CR_BIF_STATUS_MMU,
0,
RGX_CR_BIFPM_STATUS_MMU_MASKFULL);
if (eError != PVRSRV_OK) return eError;
-#endif
#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE) &&
if (eError != PVRSRV_OK) return eError;
}
-#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
eError = RGXPollReg32(hPrivate,
RGX_CR_BIFPM_READS_EXT_STATUS,
0,
RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL);
if (eError != PVRSRV_OK) return eError;
-#endif
-
{
IMG_UINT64 ui64SLCMask = RGX_CR_SLC_STATUS1_MASKFULL;
eError = RGXPollReg64(hPrivate,
if (eError != PVRSRV_OK) return eError;
}
-#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
if (4 == RGXGetDeviceSLCBanks(hPrivate))
{
eError = RGXPollReg64(hPrivate,
RGX_CR_SLC_STATUS2_MASKFULL);
if (eError != PVRSRV_OK) return eError;
}
-#endif
if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0))
{
/* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper
* For LAYOUT_MARS = 1, SIDEKICK would have been powered down by FW
*/
-#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0))
{
#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK)
if (eError != PVRSRV_OK) return eError;
}
-#endif
#if defined(RGX_FEATURE_META_MAX_VALUE_IDX)
if (bMetaFW)
RGX_CR_MARS_IDLE_CPU_EN | RGX_CR_MARS_IDLE_MH_SYSARB0_EN);
if (eError != PVRSRV_OK) return eError;
}
-#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1)
else
{
eError = RGXPollReg32(hPrivate,
RGX_CR_SIDEKICK_IDLE_GARTEN_EN);
if (eError != PVRSRV_OK) return eError;
}
-#endif
}
return eError;
#include "rgxhwperf.h"
#include "ospvr_gputrace.h"
#include "rgxsyncutils.h"
-#include "htbuffer.h"
+#include "htbserver.h"
#include "rgxdefs_km.h"
#include "rgx_fwif_km.h"
typedef struct {
DEVMEM_MEMDESC *psContextStateMemDesc;
RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
- IMG_UINT32 ui32Priority;
+ IMG_INT32 i32Priority;
} RGX_SERVER_RC_TA_DATA;
typedef struct {
DEVMEM_MEMDESC *psContextStateMemDesc;
RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
- IMG_UINT32 ui32Priority;
+ IMG_INT32 i32Priority;
} RGX_SERVER_RC_3D_DATA;
struct _RGX_SERVER_RENDER_CONTEXT_ {
eError = PhysmemNewRamBackedPMR(psFreeList->psConnection,
psFreeList->psDevInfo->psDeviceNode,
uiSize,
- uiSize,
1,
1,
&ui32MappingTable,
RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
- PVRSRV_MEMALLOCFLAG_GPU_READABLE,
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(GPU_PRIVATE),
sizeof(szAllocName),
szAllocName,
psFreeList->ownerPid,
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
/* Update Stats */
- PVRSRVStatsUpdateFreelistStats(0,
+ PVRSRVStatsUpdateFreelistStats(psDevInfo->psDeviceNode,
+ 0,
1, /* Add 1 to the appropriate counter (Requests by FW) */
psFreeList->ui32InitFLPages,
psFreeList->ui32NumHighPages,
*/
PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet)
{
- PVRSRV_RGXDEV_INFO *psDevInfo;
PVRSRV_DEVICE_NODE *psDevNode;
PVRSRV_ERROR eError;
PRGXFWIF_HWRTDATA psHWRTData;
PVR_ASSERT(psKMHWRTDataSet);
psDevNode = psKMHWRTDataSet->psDeviceNode;
- psDevInfo = psDevNode->pvDevice;
eError = RGXSetFirmwareAddress(&psHWRTData,
psKMHWRTDataSet->psHWRTDataFwMemDesc, 0,
}
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
/* Update Stats */
- PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+ PVRSRVStatsUpdateFreelistStats(psDeviceNode,
+ 1, /* Add 1 to the appropriate counter (Requests by App)*/
0,
psFreeList->ui32InitFLPages,
psFreeList->ui32NumHighPages,
if (psZSBuffer->ui32RefCount == 0)
{
- if (psZSBuffer->bOnDemand)
- {
- IMG_HANDLE hDevmemHeap;
+ IMG_HANDLE hDevmemHeap;
- PVR_ASSERT(psZSBuffer->psMapping == NULL);
+ PVR_ASSERT(psZSBuffer->psMapping == NULL);
- /* Get Heap */
- eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap);
- PVR_ASSERT(psZSBuffer->psMapping == NULL);
- if (unlikely(hDevmemHeap == (IMG_HANDLE)NULL))
- {
- OSLockRelease(hLockZSBuffer);
- return PVRSRV_ERROR_INVALID_HEAP;
- }
-
- eError = DevmemIntMapPMR(hDevmemHeap,
- psZSBuffer->psReservation,
- psZSBuffer->psPMR,
- psZSBuffer->uiMapFlags,
- &psZSBuffer->psMapping);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "Unable populate ZS Buffer [%p, ID=0x%08x] (%s)",
- psZSBuffer,
- psZSBuffer->ui32ZSBufferID,
- PVRSRVGetErrorString(eError)));
- OSLockRelease(hLockZSBuffer);
- return eError;
+ /* Get Heap */
+ eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap);
+ PVR_ASSERT(psZSBuffer->psMapping == NULL);
+ if (unlikely(hDevmemHeap == (IMG_HANDLE)NULL))
+ {
+ OSLockRelease(hLockZSBuffer);
+ return PVRSRV_ERROR_INVALID_HEAP;
+ }
- }
- PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired",
+ eError = DevmemIntMapPMR(hDevmemHeap,
+ psZSBuffer->psReservation,
+ psZSBuffer->psPMR,
+ psZSBuffer->uiMapFlags,
+ &psZSBuffer->psMapping);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Unable populate ZS Buffer [%p, ID=0x%08x] (%s)",
psZSBuffer,
- psZSBuffer->ui32ZSBufferID));
+ psZSBuffer->ui32ZSBufferID,
+ PVRSRVGetErrorString(eError)));
+ OSLockRelease(hLockZSBuffer);
+ return eError;
+
}
+ PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID));
}
/* Increase refcount*/
psZSBuffer->ui32NumReqByApp++;
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
- PVRSRVStatsUpdateZSBufferStats(1, 0, psZSBuffer->owner);
+ PVRSRVStatsUpdateZSBufferStats(psZSBuffer->psDevInfo->psDeviceNode,
+ 1, 0, psZSBuffer->owner);
#endif
/* Do the backing */
psZSBuffer->ui32NumReqByFW++;
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
- PVRSRVStatsUpdateZSBufferStats(0, 1, psZSBuffer->owner);
+ PVRSRVStatsUpdateZSBufferStats(psDevInfo->psDeviceNode,
+ 0, 1, psZSBuffer->owner);
#endif
}
DEVMEM_MEMDESC *psFWMemContextMemDesc,
IMG_DEV_VIRTADDR sVDMCallStackAddr,
IMG_UINT32 ui32CallStackDepth,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32MaxDeadlineMS,
IMG_UINT64 ui64RobustnessAddress,
RGX_COMMON_CONTEXT_INFO *psInfo,
ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2,
ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2,
ui32ContextFlags,
- ui32Priority,
+ i32Priority,
ui32MaxDeadlineMS,
ui64RobustnessAddress,
psInfo,
PDUMP_FLAGS_CONTINUOUS);
#endif
- psTAData->ui32Priority = ui32Priority;
+ psTAData->i32Priority = i32Priority;
return PVRSRV_OK;
fail_tacommoncontext:
DEVMEM_MEMDESC *psAllocatedMemDesc,
IMG_UINT32 ui32AllocatedOffset,
DEVMEM_MEMDESC *psFWMemContextMemDesc,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32MaxDeadlineMS,
IMG_UINT64 ui64RobustnessAddress,
RGX_COMMON_CONTEXT_INFO *psInfo,
ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2,
ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2,
ui32ContextFlags,
- ui32Priority,
+ i32Priority,
ui32MaxDeadlineMS,
ui64RobustnessAddress,
psInfo,
sizeof(RGXFWIF_3DCTX_STATE),
PDUMP_FLAGS_CONTINUOUS);
- ps3DData->ui32Priority = ui32Priority;
+ ps3DData->i32Priority = i32Priority;
return PVRSRV_OK;
fail_3dcommoncontext:
*/
PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_DEV_VIRTADDR sVDMCallStackAddr,
IMG_UINT32 ui32CallStackDepth,
IMG_UINT32 ui32FrameworkRegisterSize,
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData);
+ }
#endif
if (ui32FrameworkRegisterSize)
psRenderContext->psFWRenderContextMemDesc,
offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),
psFWMemContextMemDesc,
- ui32Priority,
+ i32Priority,
ui32Max3DDeadlineMS,
ui64RobustnessAddress,
&sInfo,
psFWMemContextMemDesc,
sVDMCallStackAddr,
ui32CallStackDepth,
- ui32Priority,
+ i32Priority,
ui32MaxTADeadlineMS,
ui64RobustnessAddress,
&sInfo,
/* Copy the static render context data */
OSDeviceMemCopy(&psFWRenderContext->sStaticRenderContextState, pStaticRenderContextState, ui32StaticRenderContextStateSize);
+#if defined(SUPPORT_TRP)
+ psFWRenderContext->eTRPGeomCoreAffinity = RGXFWIF_DM_MAX;
+#endif
DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc, 0, sizeof(RGXFWIF_FWRENDERCONTEXT), PDUMP_FLAGS_CONTINUOUS);
DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
{
PVRSRV_ERROR eError;
PVRSRV_RGXDEV_INFO *psDevInfo = psRenderContext->psDeviceNode->pvDevice;
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- RGXFWIF_FWRENDERCONTEXT *psFWRenderContext;
- IMG_UINT32 ui32WorkEstCCBSubmitted;
-#endif
/* remove node from list before calling destroy - as destroy, if successful
* will invalidate the node
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
- (void **)&psFWRenderContext);
- if (eError != PVRSRV_OK)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to map firmware render context (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- goto e0;
- }
+ RGXFWIF_FWRENDERCONTEXT *psFWRenderContext;
+ IMG_UINT32 ui32WorkEstCCBSubmitted;
+
+ eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+ (void **)&psFWRenderContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to map firmware render context (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ goto e0;
+ }
- ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted;
+ ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted;
- DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+ DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
- /* Check if all of the workload estimation CCB commands for this workload are read */
- if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)
- {
+ /* Check if all of the workload estimation CCB commands for this workload are read */
+ if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)
+ {
- PVR_DPF((PVR_DBG_WARNING,
- "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
- __func__, ui32WorkEstCCBSubmitted,
- psRenderContext->sWorkEstData.ui32WorkEstCCBReceived));
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
+ __func__, ui32WorkEstCCBSubmitted,
+ psRenderContext->sWorkEstData.ui32WorkEstCCBReceived));
- eError = PVRSRV_ERROR_RETRY;
- goto e0;
+ eError = PVRSRV_ERROR_RETRY;
+ goto e0;
+ }
}
#endif
SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData);
+ }
#endif
OSLockDestroy(psRenderContext->hLock);
if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 ||
iCheck3DFence >= 0 || iUpdate3DTimeline >= 0)
{
- PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL;
- PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL;
-
CHKPT_DBG((PVR_DBG_ERROR,
"%s: [TA] iCheckFence = %d, iUpdateTimeline = %d",
__func__, iCheckTAFence, iUpdateTATimeline));
(void*)psTAFenceTimelineUpdateSync,
ui32TAFenceTimelineUpdateValue));
- /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */
- pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint);
- CHKPT_DBG((PVR_DBG_ERROR,
- "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x",
- __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr));
+#if defined(TA3D_CHECKPOINT_DEBUG)
+ {
+ PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL;
+
+ /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */
+ pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint);
+ CHKPT_DBG((PVR_DBG_ERROR,
+ "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x",
+ __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr));
+ }
+#endif
}
/* Append the sync prim update for the TA timeline (if required) */
(void*)ps3DFenceTimelineUpdateSync,
ui323DFenceTimelineUpdateValue));
- /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */
- pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint);
- CHKPT_DBG((PVR_DBG_ERROR,
- "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x",
- __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr));
+#if defined(TA3D_CHECKPOINT_DEBUG)
+ {
+ PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL;
+
+ /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */
+ pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint);
+ CHKPT_DBG((PVR_DBG_ERROR,
+ "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x",
+ __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr));
+ }
+#endif
}
/* Append the sync prim update for the 3D timeline (if required) */
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- if (bKickTA || bKick3D || bAbort)
+ if ((!PVRSRV_VZ_MODE_IS(GUEST)) && (bKickTA || bKick3D || bAbort))
{
sWorkloadCharacteristics.sTA3D.ui32RenderTargetSize = ui32RenderTargetSize;
sWorkloadCharacteristics.sTA3D.ui32NumberOfDrawCalls = ui32NumberOfDrawCalls;
RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData;
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Prepare workload estimation */
- WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
- &psRenderContext->sWorkEstData,
- &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA,
- RGXFWIF_CCB_CMD_TYPE_GEOM,
- &sWorkloadCharacteristics,
- ui64DeadlineInus,
- &sWorkloadKickDataTA);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Prepare workload estimation */
+ WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+ &psRenderContext->sWorkEstData,
+ &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA,
+ RGXFWIF_CCB_CMD_TYPE_GEOM,
+ &sWorkloadCharacteristics,
+ ui64DeadlineInus,
+ &sWorkloadKickDataTA);
+ }
#endif
/* Init the TA command helper */
pasTACmdHelperData);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* The following is used to determine the offset of the command header containing
- the workload estimation data so that can be accessed when the KCCB is read */
- ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* The following is used to determine the offset of the command header containing
+ the workload estimation data so that can be accessed when the KCCB is read */
+ ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData);
+ }
#endif
eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS, pasTACmdHelperData);
const RGXFWIF_CCB_CMD_TYPE e3DCmdType = bAbort ? RGXFWIF_CCB_CMD_TYPE_ABORT : RGXFWIF_CCB_CMD_TYPE_3D;
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Prepare workload estimation */
- WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
- &psRenderContext->sWorkEstData,
- &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D,
- e3DCmdType,
- &sWorkloadCharacteristics,
- ui64DeadlineInus,
- &sWorkloadKickData3D);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Prepare workload estimation */
+ WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+ &psRenderContext->sWorkEstData,
+ &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D,
+ e3DCmdType,
+ &sWorkloadCharacteristics,
+ ui64DeadlineInus,
+ &sWorkloadKickData3D);
+ }
#endif
/* Init the 3D command helper */
&pas3DCmdHelperData[ui323DCmdCount++]);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* The following are used to determine the offset of the command header containing the workload estimation
- data so that can be accessed when the KCCB is read */
- ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]);
- ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* The following are used to determine the offset of the command header containing the workload estimation
+ data so that can be accessed when the KCCB is read */
+ ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]);
+ ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1);
+ }
#endif
}
FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
-
- /* This checks if the command would wrap around at the end of the CCB and therefore would start at an
- offset of 0 rather than the current command offset */
- if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- ui32TACommandOffset = ui32TACmdOffset;
- }
- else
- {
- ui32TACommandOffset = 0;
+ ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+ /* This checks if the command would wrap around at the end of the CCB and therefore would start at an
+ offset of 0 rather than the current command offset */
+ if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck)
+ {
+ ui32TACommandOffset = ui32TACmdOffset;
+ }
+ else
+ {
+ ui32TACommandOffset = 0;
+ }
}
#endif
}
FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
-
- if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck)
- {
- ui323DCommandOffset = ui323DCmdOffset;
- }
- else
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- ui323DCommandOffset = 0;
+ ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+ if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck)
+ {
+ ui323DCommandOffset = ui323DCmdOffset;
+ }
+ else
+ {
+ ui323DCommandOffset = 0;
+ }
}
#endif
}
/* Add the Workload data into the KCCB kick */
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
- sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset;
-#else
- sTACmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
+ sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset;
+ }
#endif
eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl,
ui32TACmdOffset,
psGeomCmdShared->sCmn.ui32FrameNum,
ui32ExtJobRef,
- ui32IntJobRef
- );
+ ui32IntJobRef);
}
RGXSRV_HWPERF_ENQ(psRenderContext,
ui32FWCtx,
ui32ExtJobRef,
ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_TA,
+ RGX_HWPERF_KICK_TYPE2_GEOM,
iCheckTAFence,
iUpdateTAFence,
iUpdateTATimeline,
if (eError2 != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2));
- if (eError == PVRSRV_OK)
- {
- eError = eError2;
- }
+ /* Mark the error and bail out */
+ eError = eError2;
goto fail_taacquirecmd;
}
- PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode->pvDevice,
+ PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode,
ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_TA3D);
+ RGX_HWPERF_KICK_TYPE2_GEOM);
}
if (ui323DCmdCount)
/* Add the Workload data into the KCCB kick */
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
- s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset;
-#else
- s3DCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
+ s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset;
+ }
#endif
eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl,
ui323DCmdOffset,
ps3DCmdShared->sCmn.ui32FrameNum,
ui32ExtJobRef,
- ui32IntJobRef
- );
+ ui32IntJobRef);
}
RGXSRV_HWPERF_ENQ(psRenderContext,
ui32FWCtx,
ui32ExtJobRef,
ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_3D,
+ RGX_HWPERF_KICK_TYPE2_3D,
iCheck3DFence,
iUpdate3DFence,
iUpdate3DTimeline,
LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
{
eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
- RGXFWIF_DM_3D,
- &s3DKCCBCmd,
- ui32PDumpFlags);
+ RGXFWIF_DM_3D,
+ &s3DKCCBCmd,
+ ui32PDumpFlags);
if (eError2 != PVRSRV_ERROR_RETRY)
{
break;
}
OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
} END_LOOP_UNTIL_TIMEOUT();
- }
- if (eError2 != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2));
- if (eError == PVRSRV_OK)
+ if (eError2 != PVRSRV_OK)
{
- eError = eError2;
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2));
+ if (eError == PVRSRV_OK)
+ {
+ eError = eError2;
+ }
+ goto fail_3dacquirecmd;
}
+
+ PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode,
+ ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE2_3D);
}
/*
PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE * psDeviceNode,
RGX_SERVER_RENDER_CONTEXT *psRenderContext,
- IMG_UINT32 ui32Priority)
+ IMG_INT32 i32Priority)
{
PVRSRV_ERROR eError;
OSLockAcquire(psRenderContext->hLock);
- if (psRenderContext->sTAData.ui32Priority != ui32Priority)
+ if (psRenderContext->sTAData.i32Priority != i32Priority)
{
eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext,
psConnection,
psRenderContext->psDeviceNode->pvDevice,
- ui32Priority,
+ i32Priority,
RGXFWIF_DM_GEOM);
if (eError != PVRSRV_OK)
{
__func__, PVRSRVGetErrorString(eError)));
goto fail_tacontext;
}
- psRenderContext->sTAData.ui32Priority = ui32Priority;
+ psRenderContext->sTAData.i32Priority = i32Priority;
}
- if (psRenderContext->s3DData.ui32Priority != ui32Priority)
+ if (psRenderContext->s3DData.i32Priority != i32Priority)
{
eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext,
psConnection,
psRenderContext->psDeviceNode->pvDevice,
- ui32Priority,
+ i32Priority,
RGXFWIF_DM_3D);
if (eError != PVRSRV_OK)
{
__func__, PVRSRVGetErrorString(eError)));
goto fail_3dcontext;
}
- psRenderContext->s3DData.ui32Priority = ui32Priority;
+ psRenderContext->s3DData.i32Priority = i32Priority;
}
OSLockRelease(psRenderContext->hLock);
#include "pvr_notifier.h"
typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT;
-typedef struct _RGX_FREELIST_ RGX_FREELIST;
typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE;
/*****************************************************************************
} RGX_HWRTDATA_COMMON_COOKIE;
-typedef struct _RGX_KM_HW_RT_DATASET_
-{
- RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie;
-
- PVRSRV_DEVICE_NODE *psDeviceNode;
- RGXFWIF_DEV_VIRTADDR sHWRTDataFwAddr;
-
- DEVMEM_MEMDESC *psHWRTDataFwMemDesc;
- DEVMEM_MEMDESC *psRTArrayFwMemDesc;
- DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc;
-
- RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS];
-#if !defined(SUPPORT_SHADOW_FREELISTS)
- DLLIST_NODE sNodeHWRTData;
-#endif
-
-} RGX_KM_HW_RT_DATASET;
-
-struct _RGX_FREELIST_ {
+typedef struct _RGX_FREELIST_ {
PVRSRV_RGXDEV_INFO *psDevInfo;
CONNECTION_DATA *psConnection;
/* FW data structures */
DEVMEM_MEMDESC *psFWFreelistMemDesc;
RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr;
-};
+}RGX_FREELIST;
+
+typedef struct _RGX_KM_HW_RT_DATASET_
+{
+ RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie;
+
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ RGXFWIF_DEV_VIRTADDR sHWRTDataFwAddr;
+
+ DEVMEM_MEMDESC *psHWRTDataFwMemDesc;
+ DEVMEM_MEMDESC *psRTArrayFwMemDesc;
+ DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc;
+
+ RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS];
+#if !defined(SUPPORT_SHADOW_FREELISTS)
+ DLLIST_NODE sNodeHWRTData;
+#endif
+
+} RGX_KM_HW_RT_DATASET;
struct _RGX_PMR_NODE_ {
RGX_FREELIST *psFreeList;
IMG_UINT32 ui32RefCount;
IMG_BOOL bOnDemand;
- IMG_BOOL ui32NumReqByApp; /* Number of Backing Requests from Application */
- IMG_BOOL ui32NumReqByFW; /* Number of Backing Requests from Firmware */
+ IMG_UINT32 ui32NumReqByApp; /* Number of Backing Requests from Application */
+ IMG_UINT32 ui32NumReqByFW; /* Number of Backing Requests from Firmware */
IMG_PID owner;
@Input psConnection -
@Input psDeviceNode - device node
- @Input ui32Priority - context priority
+ @Input i32Priority - context priority
@Input sVDMCallStackAddr - VDM call stack device virtual address
@Input ui32CallStackDepth - VDM call stack depth
@Input ui32FrameworkCommandSize - framework command size
******************************************************************************/
PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_DEV_VIRTADDR sVDMCallStackAddr,
IMG_UINT32 ui32CallStackDepth,
IMG_UINT32 ui32FrameworkCommandSize,
PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE * psDevNode,
RGX_SERVER_RENDER_CONTEXT *psRenderContext,
- IMG_UINT32 ui32Priority);
+ IMG_INT32 i32Priority);
PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
RGX_CONTEXT_PROPERTY eContextProperty,
#include "rgx_memallocflags.h"
#include "rgxhwperf.h"
#include "ospvr_gputrace.h"
-#include "htbuffer.h"
+#include "htbserver.h"
#include "rgxshader.h"
#include "pdump_km.h"
typedef struct {
DEVMEM_MEMDESC *psFWContextStateMemDesc;
RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
- IMG_UINT32 ui32Priority;
+ IMG_INT32 i32Priority;
#if defined(SUPPORT_BUFFER_SYNC)
struct pvr_buffer_sync_context *psBufferSyncContext;
#endif
typedef struct {
RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
- IMG_UINT32 ui32Priority;
+ IMG_INT32 i32Priority;
#if defined(SUPPORT_BUFFER_SYNC)
struct pvr_buffer_sync_context *psBufferSyncContext;
#endif
DEVMEM_MEMDESC *psAllocatedMemDesc,
IMG_UINT32 ui32AllocatedOffset,
DEVMEM_MEMDESC *psFWMemContextMemDesc,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
RGX_COMMON_CONTEXT_INFO *psInfo,
RGX_SERVER_TQ_3D_DATA *ps3DData,
IMG_UINT32 ui32CCBAllocSizeLog2,
ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ3D_CCB_SIZE_LOG2,
ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ3D_CCB_MAX_SIZE_LOG2,
ui32ContextFlags,
- ui32Priority,
+ i32Priority,
UINT_MAX, /* max deadline MS */
ui64RobustnessAddress,
psInfo,
PDUMPCOMMENT(psDeviceNode, "Dump 3D context suspend state buffer");
DevmemPDumpLoadMem(ps3DData->psFWContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS);
- ps3DData->ui32Priority = ui32Priority;
+ ps3DData->i32Priority = i32Priority;
return PVRSRV_OK;
fail_contextalloc:
static PVRSRV_ERROR _Create2DTransferContext(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
DEVMEM_MEMDESC *psFWMemContextMemDesc,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
RGX_COMMON_CONTEXT_INFO *psInfo,
RGX_SERVER_TQ_2D_DATA *ps2DData,
IMG_UINT32 ui32CCBAllocSizeLog2,
ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ2D_CCB_SIZE_LOG2,
ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ2D_CCB_MAX_SIZE_LOG2,
ui32ContextFlags,
- ui32Priority,
+ i32Priority,
UINT_MAX, /* max deadline MS */
ui64RobustnessAddress,
psInfo,
goto fail_contextalloc;
}
- ps2DData->ui32Priority = ui32Priority;
+ ps2DData->i32Priority = i32Priority;
return PVRSRV_OK;
fail_contextalloc:
*/
PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32FrameworkCommandSize,
IMG_PBYTE pabyFrameworkCommand,
IMG_HANDLE hMemCtxPrivData,
IMG_UINT32 ui32PackedCCBSizeU8888,
IMG_UINT32 ui32ContextFlags,
IMG_UINT64 ui64RobustnessAddress,
- RGX_SERVER_TQ_CONTEXT **ppsTransferContext,
- PMR **ppsCLIPMRMem,
- PMR **ppsUSCPMRMem)
+ RGX_SERVER_TQ_CONTEXT **ppsTransferContext)
{
RGX_SERVER_TQ_CONTEXT *psTransferContext;
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
psTransferContext->psFWTransferContextMemDesc,
offsetof(RGXFWIF_FWTRANSFERCONTEXT, sTQContext),
psFWMemContextMemDesc,
- ui32Priority,
+ i32Priority,
&sInfo,
&psTransferContext->s3DData,
U32toU8_Unpack3(ui32PackedCCBSizeU8888),
eError = _Create2DTransferContext(psConnection,
psDeviceNode,
psFWMemContextMemDesc,
- ui32Priority,
+ i32Priority,
&sInfo,
&psTransferContext->s2DData,
U32toU8_Unpack1(ui32PackedCCBSizeU8888),
}
#endif
- PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem);
-
{
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
if (bCheckFence && ui32FenceSyncCheckpointCount)
{
/* Append the checks (from input fence) */
- if (ui32FenceSyncCheckpointCount > 0)
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)psSyncAddrListFence));
+ SyncAddrListAppendCheckpoints(psSyncAddrListFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (!pauiIntFenceUFOAddress)
{
- CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)psSyncAddrListFence));
- SyncAddrListAppendCheckpoints(psSyncAddrListFence,
- ui32FenceSyncCheckpointCount,
- apsFenceSyncCheckpoints);
- if (!pauiIntFenceUFOAddress)
- {
- pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
- }
- ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+ pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
}
+ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+
#if defined(TRANSFER_CHECKPOINT_DEBUG)
if (ui32IntClientFenceCount > 0)
{
s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(ps3DTQCCB);
s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(ps3DTQCCB);
s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
HTBLOGK(HTB_SF_MAIN_KICK_3D,
s3DKCCBCmd.uCmdData.sCmdKickData.psContext,
ui32FWCtx,
ui32ExtJobRef,
ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_TQ3D,
+ RGX_HWPERF_KICK_TYPE2_TQ3D,
iCheckFence,
i3DUpdateFence,
i3DUpdateTimeline,
if (eError2 != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2));
- if (eError == PVRSRV_OK)
- {
- eError = eError2;
- }
+ eError = eError2;
goto fail_cmdacquire;
}
PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef,
- ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ3D);
+ ui32IntJobRef, RGX_HWPERF_KICK_TYPE2_TQ3D);
}
#if defined(RGX_FEATURE_TLA_BIT_MASK)
ui32FWCtx,
ui32ExtJobRef,
ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_TQ2D,
+ RGX_HWPERF_KICK_TYPE2_TQ2D,
iCheckFence,
i2DUpdateFence,
i2DUpdateTimeline,
if (eError2 != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2));
- if (eError == PVRSRV_OK)
- {
- eError = eError2;
- }
+ eError = eError2;
goto fail_cmdacquire;
}
PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef,
- ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ2D);
+ ui32IntJobRef, RGX_HWPERF_KICK_TYPE2_TQ2D);
}
#endif
PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE * psDevNode,
RGX_SERVER_TQ_CONTEXT *psTransferContext,
- IMG_UINT32 ui32Priority)
+ IMG_INT32 i32Priority)
{
PVRSRV_ERROR eError;
#if defined(RGX_FEATURE_TLA_BIT_MASK)
OSLockAcquire(psTransferContext->hLock);
#if defined(RGX_FEATURE_TLA_BIT_MASK)
- if ((psTransferContext->s2DData.ui32Priority != ui32Priority) &&
+ if ((psTransferContext->s2DData.i32Priority != i32Priority) &&
(RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
{
eError = ContextSetPriority(psTransferContext->s2DData.psServerCommonContext,
psConnection,
psTransferContext->psDeviceNode->pvDevice,
- ui32Priority,
+ i32Priority,
RGXFWIF_DM_2D);
if (eError != PVRSRV_OK)
{
}
goto fail_2dcontext;
}
- psTransferContext->s2DData.ui32Priority = ui32Priority;
+ psTransferContext->s2DData.i32Priority = i32Priority;
}
#endif
- if (psTransferContext->s3DData.ui32Priority != ui32Priority)
+ if (psTransferContext->s3DData.i32Priority != i32Priority)
{
eError = ContextSetPriority(psTransferContext->s3DData.psServerCommonContext,
psConnection,
psTransferContext->psDeviceNode->pvDevice,
- ui32Priority,
+ i32Priority,
RGXFWIF_DM_3D);
if (eError != PVRSRV_OK)
{
}
goto fail_3dcontext;
}
- psTransferContext->s3DData.ui32Priority = ui32Priority;
+ psTransferContext->s3DData.i32Priority = i32Priority;
}
OSLockRelease(psTransferContext->hLock);
return ui32ContextBitMask;
}
+PVRSRV_ERROR PVRSRVRGXTQGetSharedMemoryKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ PMR ** ppsCLIPMRMem,
+ PMR ** ppsUSCPMRMem)
+{
+ PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXTQReleaseSharedMemoryKM(PMR * psPMRMem)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMRMem);
+
+ return PVRSRV_OK;
+}
+
/**************************************************************************//**
End of file (rgxtransfer.c)
******************************************************************************/
******************************************************************************/
PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32FrameworkCommandSize,
IMG_PBYTE pabyFrameworkCommand,
IMG_HANDLE hMemCtxPrivData,
IMG_UINT32 ui32PackedCCBSizeU8888,
IMG_UINT32 ui32ContextFlags,
IMG_UINT64 ui64RobustnessAddress,
- RGX_SERVER_TQ_CONTEXT **ppsTransferContext,
- PMR **ppsCLIPMRMem,
- PMR **ppsUSCPMRMem);
+ RGX_SERVER_TQ_CONTEXT **ppsTransferContext);
/*!
PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE * psDevNode,
RGX_SERVER_TQ_CONTEXT *psTransferContext,
- IMG_UINT32 ui32Priority);
+ IMG_INT32 i32Priority);
PVRSRV_ERROR PVRSRVRGXSetTransferContextPropertyKM(RGX_SERVER_TQ_CONTEXT *psTransferContext,
RGX_CONTEXT_PROPERTY eContextProperty,
/* Debug/Watchdog - check if client transfer contexts are stalled */
IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+PVRSRV_ERROR PVRSRVRGXTQGetSharedMemoryKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ PMR ** ppsCLIPMRMem,
+ PMR ** ppsUSCPMRMem);
+
+PVRSRV_ERROR PVRSRVRGXTQReleaseSharedMemoryKM(PMR * psUSCPMRMem);
+
#endif /* RGXTRANSFER_H */
+++ /dev/null
-/*************************************************************************/ /*!
-@File
-@Title RGX Compute routines
-@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
-@Description RGX Compute routines
-@License Dual MIT/GPLv2
-
-The contents of this file are subject to the MIT license as set out below.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-Alternatively, the contents of this file may be used under the terms of
-the GNU General Public License Version 2 ("GPL") in which case the provisions
-of GPL are applicable instead of those above.
-
-If you wish to allow use of your version of this file only under the terms of
-GPL, and not to allow others to use your version of this file under the terms
-of the MIT license, indicate your decision by deleting the provisions above
-and replace them with the notice and other provisions required by GPL as set
-out in the file called "GPL-COPYING" included in this distribution. If you do
-not delete the provisions above, a recipient may use your version of this file
-under the terms of either the MIT license or GPL.
-
-This License is also included in this distribution in the file called
-"MIT-COPYING".
-
-EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
-PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
-BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
-PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/ /**************************************************************************/
-
-#include "img_defs.h"
-#include "srvkm.h"
-#include "pdump_km.h"
-#include "pvr_debug.h"
-#include "rgxutils.h"
-#include "rgxfwutils.h"
-#include "rgxcompute.h"
-#include "rgxmem.h"
-#include "allocmem.h"
-#include "devicemem.h"
-#include "devicemem_pdump.h"
-#include "osfunc.h"
-#include "rgxccb.h"
-#include "rgxhwperf.h"
-#include "ospvr_gputrace.h"
-#include "htbuffer.h"
-
-#include "sync_server.h"
-#include "sync_internal.h"
-#include "sync.h"
-#include "rgx_memallocflags.h"
-
-#if defined(SUPPORT_BUFFER_SYNC)
-#include "pvr_buffer_sync.h"
-#endif
-
-#include "sync_checkpoint.h"
-#include "sync_checkpoint_internal.h"
-
-#include "rgxtimerquery.h"
-
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
-#include "rgxworkest.h"
-
-#define HASH_CLEAN_LIMIT 6
-#endif
-
-/* Enable this to dump the compiled list of UFOs prior to kick call */
-#define ENABLE_CMP_UFO_DUMP 0
-
-//#define CMP_CHECKPOINT_DEBUG 1
-
-#if defined(CMP_CHECKPOINT_DEBUG)
-#define CHKPT_DBG(X) PVR_DPF(X)
-#else
-#define CHKPT_DBG(X)
-#endif
-
-typedef struct {
- DEVMEM_MEMDESC *psContextStateMemDesc;
- RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
- IMG_UINT32 ui32Priority;
-} RGX_SERVER_CC_CMP_DATA;
-
-struct _RGX_SERVER_COMPUTE_CONTEXT_ {
- PVRSRV_DEVICE_NODE *psDeviceNode;
- //RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
- //DEVMEM_MEMDESC *psFWComputeContextStateMemDesc;
- DEVMEM_MEMDESC *psFWComputeContextMemDesc;
- DEVMEM_MEMDESC *psFWFrameworkMemDesc;
- RGX_SERVER_CC_CMP_DATA sComputeData;
- DLLIST_NODE sListNode;
- SYNC_ADDR_LIST sSyncAddrListFence;
- SYNC_ADDR_LIST sSyncAddrListUpdate;
- POS_LOCK hLock;
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- WORKEST_HOST_DATA sWorkEstData;
-#endif
-#if defined(SUPPORT_BUFFER_SYNC)
- struct pvr_buffer_sync_context *psBufferSyncContext;
-#endif
-};
-
-static
-PVRSRV_ERROR _CreateComputeContext(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDeviceNode,
- DEVMEM_MEMDESC *psAllocatedMemDesc,
- IMG_UINT32 ui32AllocatedOffset,
- SERVER_MMU_CONTEXT *psServerMMUContext,
- DEVMEM_MEMDESC *psFWMemContextMemDesc,
- IMG_UINT32 ui32PackedCCBSizeU88,
- IMG_UINT32 ui32ContextFlags,
- IMG_UINT32 ui32Priority,
- IMG_UINT64 ui64RobustnessAddress,
- IMG_UINT32 ui32MaxDeadlineMS,
- RGX_COMMON_CONTEXT_INFO *psInfo,
- RGX_SERVER_CC_CMP_DATA *psComputeData)
-{
- IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2;
- PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
- PVRSRV_ERROR eError;
-
- /*
- Allocate device memory for the firmware GPU context suspend state.
- Note: the FW reads/writes the state to memory by accessing the GPU register interface.
- */
- PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware compute context suspend state");
-
- eError = DevmemFwAllocate(psDevInfo,
- sizeof(RGXFWIF_COMPUTECTX_STATE),
- RGX_FWCOMCTX_ALLOCFLAGS,
- "FwComputeContextState",
- &psComputeData->psContextStateMemDesc);
-
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to allocate firmware GPU context suspend state (%d)",
- __func__,
- eError));
- goto fail_contextsuspendalloc;
- }
-
- ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88);
- ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88);
- eError = FWCommonContextAllocate(psConnection,
- psDeviceNode,
- REQ_TYPE_CDM,
- RGXFWIF_DM_CDM,
- psServerMMUContext,
- psAllocatedMemDesc,
- ui32AllocatedOffset,
- psFWMemContextMemDesc,
- psComputeData->psContextStateMemDesc,
- ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_CDM_CCB_SIZE_LOG2,
- ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_CDM_CCB_MAX_SIZE_LOG2,
- ui32ContextFlags,
- ui32Priority,
- ui32MaxDeadlineMS,
- ui64RobustnessAddress,
- psInfo,
- &psComputeData->psServerCommonContext);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to init Compute fw common context (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- goto fail_computecommoncontext;
- }
-
- /*
- * Dump the FW compute context suspend state buffer
- */
- PDUMPCOMMENT(psDeviceNode, "Dump the compute context suspend state buffer");
- DevmemPDumpLoadMem(psComputeData->psContextStateMemDesc,
- 0,
- sizeof(RGXFWIF_COMPUTECTX_STATE),
- PDUMP_FLAGS_CONTINUOUS);
-
- psComputeData->ui32Priority = ui32Priority;
- return PVRSRV_OK;
-
-fail_computecommoncontext:
- DevmemFree(psComputeData->psContextStateMemDesc);
-fail_contextsuspendalloc:
- PVR_ASSERT(eError != PVRSRV_OK);
-
- return eError;
-}
-
-static
-PVRSRV_ERROR _DestroyComputeContext(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
-{
- RGX_SERVER_CC_CMP_DATA *psComputeData = &psComputeContext->sComputeData;
- PVRSRV_DEVICE_NODE *psDeviceNode = psComputeContext->psDeviceNode;
- PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
- PVRSRV_ERROR eError;
-
- /* Check if the FW has finished with this resource ... */
- eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
- psComputeData->psServerCommonContext,
- RGXFWIF_DM_CDM,
- PDUMP_FLAGS_NONE);
- if (eError == PVRSRV_ERROR_RETRY)
- {
- return eError;
- }
- else if (eError != PVRSRV_OK)
- {
- PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- return eError;
- }
-
- /* ... it has so we can free its resources */
-
- /* Remove from node list before freeing. */
- OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
- dllist_remove_node(&(psComputeContext->sListNode));
- OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
-
- FWCommonContextFree(psComputeData->psServerCommonContext);
- DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psComputeData->psContextStateMemDesc);
- psComputeData->psServerCommonContext = NULL;
- return PVRSRV_OK;
- }
-
-PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32Priority,
- IMG_UINT32 ui32FrameworkCommandSize,
- IMG_PBYTE pabyFrameworkCommand,
- IMG_HANDLE hMemCtxPrivData,
- IMG_UINT32 ui32StaticComputeContextStateSize,
- IMG_PBYTE pStaticComputeContextState,
- IMG_UINT32 ui32PackedCCBSizeU88,
- IMG_UINT32 ui32ContextFlags,
- IMG_UINT64 ui64RobustnessAddress,
- IMG_UINT32 ui32MaxDeadlineMS,
- RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext)
-{
- DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
- PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
- RGX_SERVER_COMPUTE_CONTEXT *psComputeContext;
- RGX_COMMON_CONTEXT_INFO sInfo = {NULL};
- PVRSRV_ERROR eError = PVRSRV_OK;
- RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext;
-
- /* Prepare cleanup struct */
- *ppsComputeContext = NULL;
-
- if (ui32StaticComputeContextStateSize > RGXFWIF_STATIC_COMPUTECONTEXT_SIZE)
- {
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
-
- psComputeContext = OSAllocZMem(sizeof(*psComputeContext));
- if (psComputeContext == NULL)
- {
- return PVRSRV_ERROR_OUT_OF_MEMORY;
- }
-
- /*
- Create the FW compute context, this has the CDM common
- context embedded within it
- */
- eError = DevmemFwAllocate(psDevInfo,
- sizeof(RGXFWIF_FWCOMPUTECONTEXT),
- RGX_FWCOMCTX_ALLOCFLAGS,
- "FwComputeContext",
- &psComputeContext->psFWComputeContextMemDesc);
- if (eError != PVRSRV_OK)
- {
- goto fail_fwcomputecontext;
- }
-
- eError = OSLockCreate(&psComputeContext->hLock);
-
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to create lock (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- goto fail_createlock;
- }
-
- psComputeContext->psDeviceNode = psDeviceNode;
-
- if (ui32FrameworkCommandSize)
- {
- eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
- &psComputeContext->psFWFrameworkMemDesc,
- ui32FrameworkCommandSize);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to allocate firmware GPU framework state (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- goto fail_frameworkcreate;
- }
-
- /* Copy the Framework client data into the framework buffer */
- eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode,
- psComputeContext->psFWFrameworkMemDesc,
- pabyFrameworkCommand,
- ui32FrameworkCommandSize);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to populate the framework buffer (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- goto fail_frameworkcopy;
- }
- sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc;
- }
-
- eError = _CreateComputeContext(psConnection,
- psDeviceNode,
- psComputeContext->psFWComputeContextMemDesc,
- offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext),
- hMemCtxPrivData,
- psFWMemContextMemDesc,
- ui32PackedCCBSizeU88,
- ui32ContextFlags,
- ui32Priority,
- ui64RobustnessAddress,
- ui32MaxDeadlineMS,
- &sInfo,
- &psComputeContext->sComputeData);
- if (eError != PVRSRV_OK)
- {
- goto fail_computecontext;
- }
-
- eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc,
- (void **)&psFWComputeContext);
- if (eError != PVRSRV_OK)
- {
- goto fail_acquire_cpu_mapping;
- }
-
- OSDeviceMemCopy(&psFWComputeContext->sStaticComputeContextState, pStaticComputeContextState, ui32StaticComputeContextStateSize);
- DevmemPDumpLoadMem(psComputeContext->psFWComputeContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS);
- DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc);
-
-#if defined(SUPPORT_BUFFER_SYNC)
- psComputeContext->psBufferSyncContext =
- pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
- "rogue-cdm");
- if (IS_ERR(psComputeContext->psBufferSyncContext))
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: failed to create buffer_sync context (err=%ld)",
- __func__, PTR_ERR(psComputeContext->psBufferSyncContext)));
-
- eError = PVRSRV_ERROR_INVALID_PARAMS;
- goto fail_buffer_sync_context_create;
- }
-#endif
-
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- WorkEstInitCompute(psDevInfo, &psComputeContext->sWorkEstData);
-#endif
-
- SyncAddrListInit(&psComputeContext->sSyncAddrListFence);
- SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate);
-
- {
- PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
-
- OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
- dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode));
- OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
- }
-
- *ppsComputeContext = psComputeContext;
- return PVRSRV_OK;
-
-#if defined(SUPPORT_BUFFER_SYNC)
-fail_buffer_sync_context_create:
-#endif
-fail_acquire_cpu_mapping:
- FWCommonContextFree(psComputeContext->sComputeData.psServerCommonContext);
-fail_frameworkcopy:
-fail_computecontext:
- if (psComputeContext->psFWFrameworkMemDesc)
- {
- DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
- }
-fail_frameworkcreate:
- OSLockDestroy(psComputeContext->hLock);
-fail_createlock:
- DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc);
-fail_fwcomputecontext:
- OSFreeMem(psComputeContext);
- return eError;
-}
-
-PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
-{
- PVRSRV_ERROR eError = PVRSRV_OK;
- PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext;
- IMG_UINT32 ui32WorkEstCCBSubmitted;
-
- eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc,
- (void **)&psFWComputeContext);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to map firmware compute context (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- return eError;
- }
-
- ui32WorkEstCCBSubmitted = psFWComputeContext->ui32WorkEstCCBSubmitted;
-
- DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc);
-
- /* Check if all of the workload estimation CCB commands for this workload are read */
- if (ui32WorkEstCCBSubmitted != psComputeContext->sWorkEstData.ui32WorkEstCCBReceived)
- {
- PVR_DPF((PVR_DBG_WARNING,
- "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
- __func__, ui32WorkEstCCBSubmitted,
- psComputeContext->sWorkEstData.ui32WorkEstCCBReceived));
-
- return PVRSRV_ERROR_RETRY;
- }
-#endif
-
- eError = _DestroyComputeContext(psComputeContext);
- if (eError != PVRSRV_OK)
- {
- return eError;
- }
-
-#if defined(SUPPORT_BUFFER_SYNC)
- /* remove after RGXFWRequestCommonContextCleanUp() because we might return
- * RETRY and don't want to be calling this twice */
- if (psComputeContext->psBufferSyncContext != NULL)
- {
- pvr_buffer_sync_context_destroy(psComputeContext->psBufferSyncContext);
- psComputeContext->psBufferSyncContext = NULL;
- }
-#endif
-
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- WorkEstDeInitCompute(psDevInfo, &psComputeContext->sWorkEstData);
-#endif
-
- if (psComputeContext->psFWFrameworkMemDesc != NULL)
- {
- DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
- }
- DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc);
-
- OSLockDestroy(psComputeContext->hLock);
- OSFreeMem(psComputeContext);
-
- return PVRSRV_OK;
-}
-
-
-PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
- IMG_UINT32 ui32ClientUpdateCount,
- SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock,
- IMG_UINT32 *paui32ClientUpdateSyncOffset,
- IMG_UINT32 *paui32ClientUpdateValue,
- PVRSRV_FENCE iCheckFence,
- PVRSRV_TIMELINE iUpdateTimeline,
- PVRSRV_FENCE *piUpdateFence,
- IMG_CHAR pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
- IMG_UINT32 ui32CmdSize,
- IMG_PBYTE pui8DMCmd,
- IMG_UINT32 ui32PDumpFlags,
- IMG_UINT32 ui32ExtJobRef,
- IMG_UINT32 ui32SyncPMRCount,
- IMG_UINT32 *paui32SyncPMRFlags,
- PMR **ppsSyncPMRs,
- IMG_UINT32 ui32NumWorkgroups,
- IMG_UINT32 ui32NumWorkitems,
- IMG_UINT64 ui64DeadlineInus)
-{
- RGXFWIF_KCCB_CMD sCmpKCCBCmd;
- RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1];
- PVRSRV_ERROR eError;
- PVRSRV_ERROR eError2;
- IMG_UINT32 ui32CDMCmdOffset = 0;
- PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->sComputeData.psServerCommonContext);
- RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psComputeContext->sComputeData.psServerCommonContext);
- IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
- IMG_UINT32 ui32FWCtx;
- IMG_BOOL bCCBStateOpen = IMG_FALSE;
-
- PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
- PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
- PRGXFWIF_UFO_ADDR pRMWUFOAddr;
-
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataCompute = {0};
- IMG_UINT32 ui32CDMWorkloadDataRO = 0;
- IMG_UINT32 ui32CDMCmdHeaderOffset = 0;
- IMG_UINT32 ui32CDMCmdOffsetWrapCheck = 0;
- RGX_WORKLOAD sWorkloadCharacteristics = {0};
-#endif
-
- IMG_UINT64 ui64FBSCEntryMask;
- IMG_UINT32 ui32IntClientFenceCount = 0;
- PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
- IMG_UINT32 ui32IntClientUpdateCount = 0;
- PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
- IMG_UINT32 *paui32IntUpdateValue = NULL;
- PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
- IMG_UINT64 uiCheckFenceUID = 0;
- IMG_UINT64 uiUpdateFenceUID = 0;
- PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
- PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
- IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
- IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
- PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
- IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
- void *pvUpdateFenceFinaliseData = NULL;
-
-#if defined(SUPPORT_BUFFER_SYNC)
- struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
- PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
- IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
- PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
-#endif /* defined(SUPPORT_BUFFER_SYNC) */
-
- CMD_COMMON *psComputeCmdCmn = IMG_OFFSET_ADDR(pui8DMCmd, 0);
-
- if (iUpdateTimeline >= 0 && !piUpdateFence)
- {
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
-
- /* Ensure we haven't been given a null ptr to
- * update values if we have been told we
- * have updates
- */
- if (ui32ClientUpdateCount > 0)
- {
- PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL,
- "paui32ClientUpdateValue NULL but "
- "ui32ClientUpdateCount > 0",
- PVRSRV_ERROR_INVALID_PARAMS);
- }
-
- /* Ensure the string is null-terminated (Required for safety) */
- pszUpdateFenceName[31] = '\0';
-
- OSLockAcquire(psComputeContext->hLock);
-
- eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence,
- 0,
- NULL,
- NULL);
- if (eError != PVRSRV_OK)
- {
- goto err_populate_sync_addr_list;
- }
-
- ui32IntClientUpdateCount = ui32ClientUpdateCount;
-
- eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate,
- ui32ClientUpdateCount,
- pauiClientUpdateUFODevVarBlock,
- paui32ClientUpdateSyncOffset);
- if (eError != PVRSRV_OK)
- {
- goto err_populate_sync_addr_list;
- }
- if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress)
- {
- pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
- }
- paui32IntUpdateValue = paui32ClientUpdateValue;
-
- if (ui32SyncPMRCount != 0)
- {
-#if defined(SUPPORT_BUFFER_SYNC)
- int err;
-
- CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling "
- "pvr_buffer_sync_resolve_and_create_fences", __func__));
-
- err = pvr_buffer_sync_resolve_and_create_fences(
- psComputeContext->psBufferSyncContext,
- psComputeContext->psDeviceNode->hSyncCheckpointContext,
- ui32SyncPMRCount,
- ppsSyncPMRs,
- paui32SyncPMRFlags,
- &ui32BufferFenceSyncCheckpointCount,
- &apsBufferFenceSyncCheckpoints,
- &psBufferUpdateSyncCheckpoint,
- &psBufferSyncData
- );
-
- if (unlikely(err))
- {
- switch (err)
- {
- case -EINTR:
- eError = PVRSRV_ERROR_RETRY;
- break;
- case -ENOMEM:
- eError = PVRSRV_ERROR_OUT_OF_MEMORY;
- break;
- default:
- eError = PVRSRV_ERROR_INVALID_PARAMS;
- break;
- }
-
- if (eError != PVRSRV_ERROR_RETRY)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: "
- "pvr_buffer_sync_resolve_and_create_fences failed (%d)",
- __func__, eError));
- }
-
- goto fail_resolve_input_fence;
- }
-
- /* Append buffer sync fences */
- if (ui32BufferFenceSyncCheckpointCount > 0)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints "
- "to CDM Fence (&psTransferContext->sSyncAddrListFence=<%p>, "
- "pauiIntFenceUFOAddress=<%p>)...", __func__,
- ui32BufferFenceSyncCheckpointCount,
- (void *) &psComputeContext->sSyncAddrListFence ,
- (void *) pauiIntFenceUFOAddress));
-
- SyncAddrListAppendAndDeRefCheckpoints(&psComputeContext->sSyncAddrListFence,
- ui32BufferFenceSyncCheckpointCount,
- apsBufferFenceSyncCheckpoints);
- if (pauiIntFenceUFOAddress == NULL)
- {
- pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
- }
- ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
- }
-
- /* Append the update (from output fence) */
- if (psBufferUpdateSyncCheckpoint)
- {
- SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate,
- 1, &psBufferUpdateSyncCheckpoint);
- if (pauiIntUpdateUFOAddress == NULL)
- {
- pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
- }
- ui32IntClientUpdateCount++;
- }
-#else /* defined(SUPPORT_BUFFER_SYNC) */
- PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers",
- __func__, ui32SyncPMRCount));
- eError = PVRSRV_ERROR_INVALID_PARAMS;
- goto err_populate_sync_addr_list;
-#endif /* defined(SUPPORT_BUFFER_SYNC) */
- }
-
- CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
- /* Resolve the sync checkpoints that make up the input fence */
- eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext,
- iCheckFence,
- &ui32FenceSyncCheckpointCount,
- &apsFenceSyncCheckpoints,
- &uiCheckFenceUID, ui32PDumpFlags);
- if (eError != PVRSRV_OK)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError));
- goto fail_free_buffer_sync_data;
- }
- CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
-#if defined(CMP_CHECKPOINT_DEBUG)
- if (ui32FenceSyncCheckpointCount > 0)
- {
- IMG_UINT32 ii;
- for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
- {
- PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
- CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint));
- }
- }
-#endif
- /* Create the output fence (if required) */
- if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
- eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode,
- pszUpdateFenceName,
- iUpdateTimeline,
- psComputeContext->psDeviceNode->hSyncCheckpointContext,
- &iUpdateFence,
- &uiUpdateFenceUID,
- &pvUpdateFenceFinaliseData,
- &psUpdateSyncCheckpoint,
- (void*)&psFenceTimelineUpdateSync,
- &ui32FenceTimelineUpdateValue,
- ui32PDumpFlags);
- if (eError != PVRSRV_OK)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __func__, eError));
- goto fail_create_output_fence;
- }
-
- CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
-
- CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync));
- /* Append the sync prim update for the timeline (if required) */
- if (psFenceTimelineUpdateSync)
- {
- IMG_UINT32 *pui32TimelineUpdateWp = NULL;
-
- /* Allocate memory to hold the list of update values (including our timeline update) */
- pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
- if (!pui32IntAllocatedUpdateValues)
- {
- /* Failed to allocate memory */
- eError = PVRSRV_ERROR_OUT_OF_MEMORY;
- goto fail_alloc_update_values_mem;
- }
- OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
- /* Copy the update values into the new memory, then append our timeline update value */
- if (paui32IntUpdateValue)
- {
- OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
- }
-#if defined(CMP_CHECKPOINT_DEBUG)
- if (ui32IntClientUpdateCount > 0)
- {
- IMG_UINT32 iii;
- IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
-
- CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount));
- for (iii=0; iii<ui32IntClientUpdateCount; iii++)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
- pui32Tmp++;
- }
- }
-#endif
- /* Now set the additional update value */
- pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
- *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
- ui32IntClientUpdateCount++;
- /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */
- paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
-
- CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__, (void*)psFenceTimelineUpdateSync));
- /* Now append the timeline sync prim addr to the compute context update list */
- SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate,
- psFenceTimelineUpdateSync);
-#if defined(CMP_CHECKPOINT_DEBUG)
- if (ui32IntClientUpdateCount > 0)
- {
- IMG_UINT32 iii;
- IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
-
- CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount));
- for (iii=0; iii<ui32IntClientUpdateCount; iii++)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
- pui32Tmp++;
- }
- }
-#endif
- /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
- paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
- }
- }
-
- /* Append the checks (from input fence) */
- if (ui32FenceSyncCheckpointCount > 0)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence));
-#if defined(CMP_CHECKPOINT_DEBUG)
- if (ui32IntClientUpdateCount > 0)
- {
- IMG_UINT32 iii;
- IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
-
- for (iii=0; iii<ui32IntClientUpdateCount; iii++)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
- pui32Tmp++;
- }
- }
-#endif
- SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence,
- ui32FenceSyncCheckpointCount,
- apsFenceSyncCheckpoints);
- if (!pauiIntFenceUFOAddress)
- {
- pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
- }
- ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
- }
-#if defined(CMP_CHECKPOINT_DEBUG)
- if (ui32IntClientUpdateCount > 0)
- {
- IMG_UINT32 iii;
- IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue;
-
- CHKPT_DBG((PVR_DBG_ERROR, "%s: Dumping %d update values (paui32IntUpdateValue=<%p>)...", __func__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue));
- for (iii=0; iii<ui32IntClientUpdateCount; iii++)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: paui32IntUpdateValue[%d] = <%p>", __func__, iii, (void*)pui32Tmp));
- CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __func__, iii, *pui32Tmp));
- pui32Tmp++;
- }
- }
-#endif
-
- if (psUpdateSyncCheckpoint)
- {
- /* Append the update (from output fence) */
- CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint));
- SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate,
- 1,
- &psUpdateSyncCheckpoint);
- if (!pauiIntUpdateUFOAddress)
- {
- pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
- }
- ui32IntClientUpdateCount++;
-#if defined(CMP_CHECKPOINT_DEBUG)
- if (ui32IntClientUpdateCount > 0)
- {
- IMG_UINT32 iii;
- IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
-
- CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount));
- for (iii=0; iii<ui32IntClientUpdateCount; iii++)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
- pui32Tmp++;
- }
- }
-#endif
- }
- CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
-
-#if (ENABLE_CMP_UFO_DUMP == 1)
- PVR_DPF((PVR_DBG_ERROR, "%s: dumping Compute (CDM) fence/updates syncs...", __func__));
- {
- IMG_UINT32 ii;
- PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
- PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
- IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
-
- /* Dump Fence syncs and Update syncs */
- PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) fence syncs (&psComputeContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psComputeContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
- for (ii=0; ii<ui32IntClientFenceCount; ii++)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
- psTmpIntFenceUFOAddress++;
- }
- PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) update syncs (&psComputeContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psComputeContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
- for (ii=0; ii<ui32IntClientUpdateCount; ii++)
- {
- if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
- }
- else
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
- pui32TmpIntUpdateValue++;
- }
- psTmpIntUpdateUFOAddress++;
- }
- }
-#endif
- RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psComputeContext->psDeviceNode->pvDevice,
- &pPreAddr,
- &pPostAddr,
- &pRMWUFOAddr);
- /*
- * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command,
- * in other words, take the value and set it to zero afterwards.
- * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts
- * as it must be ready at the time of context activation.
- */
- {
- eError = RGXExtractFBSCEntryMaskFromMMUContext(psComputeContext->psDeviceNode,
- FWCommonContextGetServerMMUCtx(psComputeContext->sComputeData.psServerCommonContext),
- &ui64FBSCEntryMask);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError));
- goto fail_cmdinvalfbsc;
- }
- }
-
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- sWorkloadCharacteristics.sCompute.ui32NumberOfWorkgroups = ui32NumWorkgroups;
- sWorkloadCharacteristics.sCompute.ui32NumberOfWorkitems = ui32NumWorkitems;
-
- /* Prepare workload estimation */
- WorkEstPrepare(psComputeContext->psDeviceNode->pvDevice,
- &psComputeContext->sWorkEstData,
- &psComputeContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM,
- RGXFWIF_CCB_CMD_TYPE_CDM,
- &sWorkloadCharacteristics,
- ui64DeadlineInus,
- &sWorkloadKickDataCompute);
-#endif
-
- RGXCmdHelperInitCmdCCB(psDevInfo,
- psClientCCB,
- ui64FBSCEntryMask,
- ui32IntClientFenceCount,
- pauiIntFenceUFOAddress,
- NULL,
- ui32IntClientUpdateCount,
- pauiIntUpdateUFOAddress,
- paui32IntUpdateValue,
- ui32CmdSize,
- pui8DMCmd,
- &pPreAddr,
- &pPostAddr,
- &pRMWUFOAddr,
- RGXFWIF_CCB_CMD_TYPE_CDM,
- ui32ExtJobRef,
- ui32IntJobRef,
- ui32PDumpFlags,
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- &sWorkloadKickDataCompute,
-#else
- NULL,
-#endif
- "Compute",
- bCCBStateOpen,
- asCmdHelperData);
-
- eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData);
- if (eError != PVRSRV_OK)
- {
- goto fail_cmdaquire;
- }
-
-
- /*
- We should reserve space in the kernel CCB here and fill in the command
- directly.
- This is so if there isn't space in the kernel CCB we can return with
- retry back to services client before we take any operations
- */
-
- /*
- We might only be kicking for flush out a padding packet so only submit
- the command if the create was successful
- */
- if (eError == PVRSRV_OK)
- {
- /*
- All the required resources are ready at this point, we can't fail so
- take the required server sync operations and commit all the resources
- */
-
- ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB);
- RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext).ui32Addr);
- }
-
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* The following is used to determine the offset of the command header containing
- the workload estimation data so that can be accessed when the KCCB is read */
- ui32CDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData);
-
- ui32CDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->sComputeData.psServerCommonContext));
-
- /* This checks if the command would wrap around at the end of the CCB and
- * therefore would start at an offset of 0 rather than the current command
- * offset */
- if (ui32CDMCmdOffset < ui32CDMCmdOffsetWrapCheck)
- {
- ui32CDMWorkloadDataRO = ui32CDMCmdOffset;
- }
- else
- {
- ui32CDMWorkloadDataRO = 0;
- }
-#endif
-
- /* Construct the kernel compute CCB command. */
- sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
- sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext);
- sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
- sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
- sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
-
- /* Add the Workload data into the KCCB kick */
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Store the offset to the CCCB command header so that it can be referenced
- * when the KCCB command reaches the FW */
- sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32CDMWorkloadDataRO + ui32CDMCmdHeaderOffset;
-#else
- sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
-#endif
-
- ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext).ui32Addr;
-
- if (psComputeCmdCmn)
- {
- HTBLOGK(HTB_SF_MAIN_KICK_CDM,
- sCmpKCCBCmd.uCmdData.sCmdKickData.psContext,
- ui32CDMCmdOffset,
- psComputeCmdCmn->ui32FrameNum,
- ui32ExtJobRef,
- ui32IntJobRef
- );
- }
-
- RGXSRV_HWPERF_ENQ(psComputeContext, OSGetCurrentClientProcessIDKM(),
- ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_CDM,
- iCheckFence,
- iUpdateFence,
- iUpdateTimeline,
- uiCheckFenceUID,
- uiUpdateFenceUID,
- NO_DEADLINE,
- NO_CYCEST);
-
- /*
- * Submit the compute command to the firmware.
- */
- LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
- {
- eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
- RGXFWIF_DM_CDM,
- &sCmpKCCBCmd,
- ui32PDumpFlags);
- if (eError2 != PVRSRV_ERROR_RETRY)
- {
- break;
- }
- OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
- } END_LOOP_UNTIL_TIMEOUT();
-
- if (eError2 != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s failed to schedule kernel CCB command (%s)",
- __func__,
- PVRSRVGetErrorString(eError2)));
- if (eError == PVRSRV_OK)
- {
- eError = eError2;
- }
- }
- else
- {
- PVRGpuTraceEnqueueEvent(psComputeContext->psDeviceNode->pvDevice,
- ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_CDM);
- }
- /*
- * Now check eError (which may have returned an error from our earlier call
- * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
- * so we check it now...
- */
- if (eError != PVRSRV_OK )
- {
- goto fail_cmdaquire;
- }
-
-#if defined(NO_HARDWARE)
- /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
- if (psUpdateSyncCheckpoint)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
- SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
- }
- if (psFenceTimelineUpdateSync)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
- SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
- }
- SyncCheckpointNoHWUpdateTimelines(NULL);
-#endif /* defined(NO_HARDWARE) */
-
-#if defined(SUPPORT_BUFFER_SYNC)
- if (psBufferSyncData)
- {
- pvr_buffer_sync_kick_succeeded(psBufferSyncData);
- }
- if (apsBufferFenceSyncCheckpoints)
- {
- kfree(apsBufferFenceSyncCheckpoints);
- }
-#endif /* defined(SUPPORT_BUFFER_SYNC) */
-
- *piUpdateFence = iUpdateFence;
-
- if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
- {
- SyncCheckpointFinaliseFence(psComputeContext->psDeviceNode, iUpdateFence,
- pvUpdateFenceFinaliseData,
- psUpdateSyncCheckpoint, pszUpdateFenceName);
- }
- /* Drop the references taken on the sync checkpoints in the
- * resolved input fence */
- SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
- apsFenceSyncCheckpoints);
- /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
- if (apsFenceSyncCheckpoints)
- {
- SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
- }
- /* Free memory allocated to hold the internal list of update values */
- if (pui32IntAllocatedUpdateValues)
- {
- OSFreeMem(pui32IntAllocatedUpdateValues);
- pui32IntAllocatedUpdateValues = NULL;
- }
-
- OSLockRelease(psComputeContext->hLock);
-
- return PVRSRV_OK;
-
-fail_cmdaquire:
-fail_cmdinvalfbsc:
- SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence);
- SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate);
-fail_alloc_update_values_mem:
- if (iUpdateFence != PVRSRV_NO_FENCE)
- {
- SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
- }
-fail_create_output_fence:
- /* Drop the references taken on the sync checkpoints in the
- * resolved input fence */
- SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
- apsFenceSyncCheckpoints);
-
-fail_free_buffer_sync_data:
-#if defined(SUPPORT_BUFFER_SYNC)
- if (psBufferSyncData)
- {
- pvr_buffer_sync_kick_failed(psBufferSyncData);
- }
- if (apsBufferFenceSyncCheckpoints)
- {
- kfree(apsBufferFenceSyncCheckpoints);
- }
-
-fail_resolve_input_fence:
-#endif /* defined(SUPPORT_BUFFER_SYNC) */
-
-err_populate_sync_addr_list:
- /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
- if (apsFenceSyncCheckpoints)
- {
- SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
- }
- /* Free memory allocated to hold the internal list of update values */
- if (pui32IntAllocatedUpdateValues)
- {
- OSFreeMem(pui32IntAllocatedUpdateValues);
- pui32IntAllocatedUpdateValues = NULL;
- }
- OSLockRelease(psComputeContext->hLock);
- return eError;
-}
-
-PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
-{
- RGXFWIF_KCCB_CMD sFlushCmd;
- PVRSRV_ERROR eError = PVRSRV_OK;
- IMG_UINT32 ui32kCCBCommandSlot;
- PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
-
-#if defined(PDUMP)
- PDUMPCOMMENTWITHFLAGS(psComputeContext->psDeviceNode,
- PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush");
-#endif
- sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
- sFlushCmd.uCmdData.sSLCFlushInvalData.ui64Size = 0;
- sFlushCmd.uCmdData.sSLCFlushInvalData.ui64Address = 0;
- sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE;
- sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE;
- sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext);
-
- OSLockAcquire(psComputeContext->hLock);
-
- LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
- {
- eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo,
- RGXFWIF_DM_CDM,
- &sFlushCmd,
- PDUMP_FLAGS_CONTINUOUS,
- &ui32kCCBCommandSlot);
- /* Iterate if we hit a PVRSRV_ERROR_KERNEL_CCB_FULL error */
- if ((eError != PVRSRV_ERROR_RETRY) &&
- (eError != PVRSRV_ERROR_KERNEL_CCB_FULL))
- {
- break;
- }
- OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
- } END_LOOP_UNTIL_TIMEOUT();
-
- if (eError != PVRSRV_OK)
- {
- /* If we hit a temporary KCCB exhaustion, return a RETRY to caller */
- if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)
- {
- PVR_DPF((PVR_DBG_WARNING,
- "%s: Returning RETRY to caller", __func__));
- eError = PVRSRV_ERROR_RETRY;
- }
- else
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to schedule SLC flush command (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- }
- }
- else
- {
- /* Wait for the SLC flush to complete */
- eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Compute flush aborted (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- }
- else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] &
- RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE))
- {
- PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__));
- }
- }
-
- OSLockRelease(psComputeContext->hLock);
- return eError;
-}
-
-
-PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
-{
- RGXFWIF_KCCB_CMD sKCCBCmd;
- PVRSRV_ERROR eError;
-
- OSLockAcquire(psComputeContext->hLock);
-
- /* Schedule the firmware command */
- sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
- sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->sComputeData.psServerCommonContext);
-
- LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
- {
- eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
- RGXFWIF_DM_CDM,
- &sKCCBCmd,
- PDUMP_FLAGS_NONE);
- if (eError != PVRSRV_ERROR_RETRY)
- {
- break;
- }
- OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
- } END_LOOP_UNTIL_TIMEOUT();
-
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to schedule the FW command %d (%s)",
- __func__,
- eError,
- PVRSRVGETERRORSTRING(eError)));
- }
-
- OSLockRelease(psComputeContext->hLock);
-
- return eError;
-}
-
-
-PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE * psDeviceNode,
- RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
- IMG_UINT32 ui32Priority)
-{
- PVRSRV_ERROR eError;
-
- PVR_UNREFERENCED_PARAMETER(psDeviceNode);
-
- OSLockAcquire(psComputeContext->hLock);
-
- eError = ContextSetPriority(psComputeContext->sComputeData.psServerCommonContext,
- psConnection,
- psComputeContext->psDeviceNode->pvDevice,
- ui32Priority,
- RGXFWIF_DM_CDM);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __func__, PVRSRVGetErrorString(eError)));
- }
-
- OSLockRelease(psComputeContext->hLock);
- return eError;
-}
-
-/*
- * PVRSRVRGXSetComputeContextPropertyKM
- */
-PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
- RGX_CONTEXT_PROPERTY eContextProperty,
- IMG_UINT64 ui64Input,
- IMG_UINT64 *pui64Output)
-{
- PVRSRV_ERROR eError = PVRSRV_OK;
-
- switch (eContextProperty)
- {
- case RGX_CONTEXT_PROPERTY_FLAGS:
- {
- IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input;
-
- OSLockAcquire(psComputeContext->hLock);
- eError = FWCommonContextSetFlags(psComputeContext->sComputeData.psServerCommonContext,
- ui32ContextFlags);
- OSLockRelease(psComputeContext->hLock);
- break;
- }
-
- default:
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty));
- eError = PVRSRV_ERROR_NOT_SUPPORTED;
- }
- }
-
- return eError;
-}
-
-void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
- DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
- void *pvDumpDebugFile,
- IMG_UINT32 ui32VerbLevel)
-{
- DLLIST_NODE *psNode, *psNext;
- OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
- dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
- {
- RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
- IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
- DumpFWCommonContextInfo(psCurrentServerComputeCtx->sComputeData.psServerCommonContext,
- pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
- }
- OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
-}
-
-IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
-{
- IMG_UINT32 ui32ContextBitMask = 0;
- DLLIST_NODE *psNode, *psNext;
- OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
- dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
- {
- RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
- IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
-
- if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->sComputeData.psServerCommonContext, RGX_KICK_TYPE_DM_CDM)
- == PVRSRV_ERROR_CCCB_STALLED)
- {
- ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM;
- }
- }
- OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
- return ui32ContextBitMask;
-}
-
-/*
- * PVRSRVRGXGetLastDeviceErrorKM
- */
-PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 *ui32Error)
-{
- PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
- PVR_UNREFERENCED_PARAMETER(psConnection);
-
- *ui32Error = psDevInfo->eLastDeviceError;
- psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_NONE;
- return PVRSRV_OK;
-}
-
-/******************************************************************************
- End of file (rgxcompute.c)
-******************************************************************************/
+++ /dev/null
-/*************************************************************************/ /*!
-@File
-@Title RGX compute functionality
-@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
-@Description Header for the RGX compute functionality
-@License Dual MIT/GPLv2
-
-The contents of this file are subject to the MIT license as set out below.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-Alternatively, the contents of this file may be used under the terms of
-the GNU General Public License Version 2 ("GPL") in which case the provisions
-of GPL are applicable instead of those above.
-
-If you wish to allow use of your version of this file only under the terms of
-GPL, and not to allow others to use your version of this file under the terms
-of the MIT license, indicate your decision by deleting the provisions above
-and replace them with the notice and other provisions required by GPL as set
-out in the file called "GPL-COPYING" included in this distribution. If you do
-not delete the provisions above, a recipient may use your version of this file
-under the terms of either the MIT license or GPL.
-
-This License is also included in this distribution in the file called
-"MIT-COPYING".
-
-EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
-PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
-BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
-PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/ /**************************************************************************/
-
-#if !defined(RGXCOMPUTE_H)
-#define RGXCOMPUTE_H
-
-#include "devicemem.h"
-#include "device.h"
-#include "rgxfwutils.h"
-#include "rgx_fwif_resetframework.h"
-#include "rgxdebug.h"
-#include "pvr_notifier.h"
-
-#include "sync_server.h"
-#include "sync_internal.h"
-#include "connection_server.h"
-
-
-typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT;
-
-/*!
-*******************************************************************************
- @Function PVRSRVRGXCreateComputeContextKM
-
- @Description
- Creates a RGX device context for submitting commands to CDM.
-
- @Input pvDeviceNode - Services-managed device
- @Input ui32Priority - Scheduling priority for commands on this context
- @Input hMemCtxPrivData - private data
- @Input ui32PackedCCBSizeU88 - packed CCB size. The first byte contains the
- log2 CCB size and the second byte the log2 maximum CCB size.
- @Input ui32ComputeCtxSwitchSize - Context control size
- @Input pComputeCtxSwitch_Regs - Context control registers
- @Output ppsComputeContext - cleanup data
- @Return PVRSRV_ERROR
-******************************************************************************/
-PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32Priority,
- IMG_UINT32 ui32FrameworkCommandSize,
- IMG_PBYTE pabyFrameworkCommand,
- IMG_HANDLE hMemCtxPrivData,
- IMG_UINT32 ui32StaticComputeContextStateSize,
- IMG_PBYTE pStaticComputeContextState,
- IMG_UINT32 ui32PackedCCBSizeU88,
- IMG_UINT32 ui32ContextFlags,
- IMG_UINT64 ui64RobustnessAddress,
- IMG_UINT32 ui32MaxDeadlineMS,
- RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext);
-
-/*!
-*******************************************************************************
- @Function PVRSRVRGXDestroyComputeContextKM
-
- @Description
- Server-side implementation of RGXDestroyComputeContext
-
- @Return PVRSRV_ERROR
-******************************************************************************/
-PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
-
-
-/*!
-*******************************************************************************
- @Function PVRSRVRGXKickCDMKM
-
- @Description
- Server-side implementation of RGXKickCDM
-
- @Return PVRSRV_ERROR
-******************************************************************************/
-PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
- IMG_UINT32 ui32ClientUpdateCount,
- SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock,
- IMG_UINT32 *paui32ClientUpdateSyncOffset,
- IMG_UINT32 *paui32ClientUpdateValue,
- PVRSRV_FENCE iCheckFence,
- PVRSRV_TIMELINE iUpdateTimeline,
- PVRSRV_FENCE *piUpdateFence,
- IMG_CHAR pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
- IMG_UINT32 ui32CmdSize,
- IMG_PBYTE pui8DMCmd,
- IMG_UINT32 ui32PDumpFlags,
- IMG_UINT32 ui32ExtJobRef,
- IMG_UINT32 ui32SyncPMRCount,
- IMG_UINT32 *paui32SyncPMRFlags,
- PMR **ppsSyncPMRs,
- IMG_UINT32 ui32NumWorkgroups,
- IMG_UINT32 ui32NumWorkitems,
- IMG_UINT64 ui64DeadlineInus);
-
-/*!
-*******************************************************************************
- @Function PVRSRVRGXFlushComputeDataKM
-
- @Description
- Server-side implementation of RGXFlushComputeData
-
- @Input psComputeContext - Compute context to flush
-
- @Return PVRSRV_ERROR
-******************************************************************************/
-PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
-
-/*!
-*******************************************************************************
-
- @Function PVRSRVRGXNotifyComputeWriteOffsetUpdateKM
- @Description Server-side implementation of RGXNotifyComputeWriteOffsetUpdate
-
- @Input psComputeContext - Compute context to flush
-
- @Return PVRSRV_ERROR
-
-******************************************************************************/
-PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
-
-PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDeviceNode,
- RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
- IMG_UINT32 ui32Priority);
-
-PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
- RGX_CONTEXT_PROPERTY eContextProperty,
- IMG_UINT64 ui64Input,
- IMG_UINT64 *pui64Output);
-
-PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 *ui32Error);
-
-/* Debug - Dump debug info of compute contexts on this device */
-void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
- DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
- void *pvDumpDebugFile,
- IMG_UINT32 ui32VerbLevel);
-
-/* Debug/Watchdog - check if client compute contexts are stalled */
-IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
-
-#endif /* RGXCOMPUTE_H */
#include "fwtrace_string.h"
#include "rgxfwimageutils.h"
#include "fwload.h"
+#include "debug_common.h"
#include "rgxta3d.h"
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
#include "rgxkicksync.h"
+#endif
#include "rgxcompute.h"
#include "rgxtdmtransfer.h"
#include "rgxtimecorr.h"
PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion), \
PVRVERSION_UNPACK_MIN((x).ui32DDKVersion), \
(x).ui32DDKBuild, \
- ((x).ui32BuildOptions & OPTIONS_DEBUG_MASK) ? "debug":"release",\
+ ((x).ui32BuildOptions & OPTIONS_DEBUG_EN) ? "debug":"release", \
(x).ui32BuildOptions);
#define DD_SUMMARY_INDENT ""
#undef X
};
-typedef struct _IMG_FLAGS2DESC_
-{
- IMG_UINT32 uiFlag;
- const IMG_CHAR *pszLabel;
-} IMG_FLAGS2DESC;
-
static const IMG_CHAR * const apszFwOsStateName[RGXFW_CONNECTION_FW_STATE_COUNT] =
{
"offline",
The function will query DevicememHistory for information about the faulting page, as well
as the page before and after.
+ @Input psDeviceNode - The device which this allocation search should be made on
@Input uiPID - The process ID to search for allocations belonging to
@Input sFaultDevVAddr - The device address to search for allocations at/before/after
@Input asQueryOut - Storage for the query results
@Return IMG_BOOL - IMG_TRUE if any results were found for this page fault
******************************************************************************/
-static IMG_BOOL _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr,
+static IMG_BOOL _GetDevicememHistoryData(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PID uiPID,
+ IMG_DEV_VIRTADDR sFaultDevVAddr,
DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT],
IMG_UINT32 ui32PageSizeBytes)
{
sQueryIn.uiPID = uiPID;
}
+ sQueryIn.psDevNode = psDeviceNode;
/* Query the DevicememHistory for all allocations in the previous page... */
sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - ui32PageSizeBytes;
if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING],
******************************************************************************/
static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ PVRSRV_DEVICE_NODE *psDevNode,
void *pvDumpDebugFile,
FAULT_INFO *psInfo,
const IMG_CHAR* pszIndent)
else
{
PVR_DUMPDEBUG_LOG("%s No matching Devmem History for fault address", pszIndent);
+ DevicememHistoryDumpRecordStats(psDevNode, pfnDumpDebugPrintf, pvDumpDebugFile);
+ PVR_DUMPDEBUG_LOG("%s Records Searched -"
+ " PP:%"IMG_UINT64_FMTSPEC
+ " FP:%"IMG_UINT64_FMTSPEC
+ " NP:%"IMG_UINT64_FMTSPEC,
+ pszIndent,
+ psInfo->asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING].ui64SearchCount,
+ psInfo->asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED].ui64SearchCount,
+ psInfo->asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_NEXT].ui64SearchCount);
}
}
else
{
/* get any DevicememHistory data for the faulting address */
- bHits = _GetDevicememHistoryData(sProcessInfo.uiPID,
+ bHits = _GetDevicememHistoryData(psDevInfo->psDeviceNode,
+ sProcessInfo.uiPID,
sFaultDevVAddr,
psInfo->asQueryOut,
ui32PageSizeBytes);
static const IMG_FLAGS2DESC asMisc2Description[] =
{
{RGXFWIF_INICFG_POW_RASCALDUST, " Power Rascal/Dust;"},
+ {RGXFWIF_INICFG_SPU_CLOCK_GATE, " SPU Clock Gating (requires Power Rascal/Dust);"},
{RGXFWIF_INICFG_HWPERF_EN, " HwPerf EN;"},
{RGXFWIF_INICFG_FBCDC_V3_1_EN, " FBCDCv3.1;"},
{RGXFWIF_INICFG_CHECK_MLIST_EN, " Check MList;"},
}
#endif // !defined(NO_HARDWARE)
-/*
- Appends flags strings to a null-terminated string buffer - each flag
- description string starts with a space.
-*/
-static void _Flags2Description(IMG_CHAR *psDesc,
- IMG_UINT32 ui32DescSize,
- const IMG_FLAGS2DESC *psConvTable,
- IMG_UINT32 ui32TableSize,
- IMG_UINT32 ui32Flags)
-{
- IMG_UINT32 ui32Idx;
-
- for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++)
- {
- if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag)
- {
- OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize);
- }
- }
-}
-
/*
* Translate ID code to descriptive string.
* Returns on the first match.
OSStringLCopy(psDesc, szCswLabel, ui32DescSize);
- _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags);
- _Flags2Description(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags);
+ DebugCommonFlagStrings(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags);
+ DebugCommonFlagStrings(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags);
}
static void _GetFwOsFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags)
OSStringLCopy(psDesc, szCswLabel, ui32DescSize);
- _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags);
+ DebugCommonFlagStrings(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags);
}
}
else
{
- _Flags2Description(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE,
+ DebugCommonFlagStrings(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE,
asDmState2Description, ARRAY_SIZE(asDmState2Description),
ui32HWRRecoveryFlags);
}
OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber);
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
{
- PVR_DUMPDEBUG_LOG(" %s Core = %u, PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
+ PVR_DUMPDEBUG_LOG(" %s Core = %u, PID = %u / %s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
aui8RecoveryNum,
psHWRInfo->ui32CoreID,
psHWRInfo->ui32PID,
+ psHWRInfo->szProcName,
psHWRInfo->ui32FrameNum,
psHWRInfo->ui32ActiveHWRTData,
psHWRInfo->ui32EventStatus,
}
else
{
- PVR_DUMPDEBUG_LOG(" %s PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
+ PVR_DUMPDEBUG_LOG(" %s PID = %u / %s, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
aui8RecoveryNum,
psHWRInfo->ui32PID,
+ psHWRInfo->szProcName,
psHWRInfo->ui32FrameNum,
psHWRInfo->ui32ActiveHWRTData,
psHWRInfo->ui32EventStatus,
}
}
+ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 65104))
+ {
+ PVR_DUMPDEBUG_LOG(" Active PDS DM USCs = 0x%08x", psHWRInfo->ui32PDSActiveDMUSCs);
+ }
+
+ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 69700))
+ {
+ PVR_DUMPDEBUG_LOG(" DMs stalled waiting on PDS Store space = 0x%08x", psHWRInfo->ui32PDSStalledDMs);
+ }
+
switch (psHWRInfo->eHWRType)
{
case RGX_HWRTYPE_ECCFAULT:
sFaultDevVAddr.uiAddr <<= 4; /* align shift */
ui32PC = (psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS1_CONTEXT_CLRMSK) >>
RGX_CR_MMU_FAULT_STATUS1_CONTEXT_SHIFT;
-#if defined(SUPPORT_TRUSTED_DEVICE)
- ui32PC = ui32PC - 1;
-#endif
bPMFault = (ui32PC <= 8);
sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress;
if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED)
{
- _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT);
+ _PrintFaultInfo(pfnDumpDebugPrintf, psDevInfo->psDeviceNode, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT);
}
OSLockRelease(psDevInfo->hDebugFaultInfoLock);
IMG_CHAR *pszState, *pszReason;
const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl;
- IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32DriverID;
const RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
/* space for the current clock speed and 3 previous */
RGXFWIF_TIME_CORR asTimeCorrs[4];
IMG_DEV_PHYADDR sPCDevPAddr;
MMU_FAULT_DATA sFaultData;
IMG_BOOL bIsValid;
+ IMG_UINT32 ui32CBaseMapCtxReg;
+
+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)
+ {
+ ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4;
+
+ OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32CBaseMapCtxReg, ui32CatBase);
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, ui32CatBase);
+ ui64CBaseMapping = OSReadUncheckedHWReg64(psDevInfo->pvSecureRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1);
+ sPCDevPAddr.uiAddr = (((ui64CBaseMapping & ~RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_CLRMSK)
+ >> RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT)
+ << RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT);
+ bIsValid = !(ui64CBaseMapping & RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__INVALID_EN);
+ }
+ else
+ {
+ ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT;
- ui64CBaseMapping = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING);
- sPCDevPAddr.uiAddr = (((ui64CBaseMapping & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK)
- >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT)
- << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT);
- bIsValid = !(ui64CBaseMapping & RGX_CR_MMU_CBASE_MAPPING_INVALID_EN);
+ OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32CBaseMapCtxReg, ui32CatBase);
+
+ ui64CBaseMapping = OSReadUncheckedHWReg64(psDevInfo->pvSecureRegsBaseKM, RGX_CR_MMU_CBASE_MAPPING);
+ sPCDevPAddr.uiAddr = (((ui64CBaseMapping & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK)
+ >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT)
+ << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT);
+ bIsValid = !(ui64CBaseMapping & RGX_CR_MMU_CBASE_MAPPING_INVALID_EN);
+ }
PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC
" on cat base %u. PC Addr = 0x%llX is %s",
#if !defined(NO_HARDWARE)
/* Determine the type virtualisation support used */
-#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
if (!PVRSRV_VZ_MODE_IS(NATIVE))
{
#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with dynamic Fw heap allocation");
#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
}
-#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+#endif /* (RGX_NUM_DRIVERS_SUPPORTED > 1) */
-#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1))
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1))
if (!PVRSRV_VZ_MODE_IS(NATIVE))
{
RGXFWIF_CONNECTION_FW_STATE eFwState = KM_GET_FW_CONNECTION(psDevInfo);
}
#endif
-#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
if (!PVRSRV_VZ_MODE_IS(NATIVE))
{
IMG_UINT32 ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo);
if (!PVRSRV_VZ_MODE_IS(GUEST))
{
IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE];
- IMG_BOOL bOsIsolationEnabled = IMG_FALSE;
+ IMG_BOOL bDriverIsolationEnabled = IMG_FALSE;
+ IMG_UINT32 ui32HostIsolationGroup;
if (psFwSysData == NULL)
{
sHwrStateDescription[0] = '\0';
- _Flags2Description(sHwrStateDescription, RGX_DEBUG_STR_SIZE,
+ DebugCommonFlagStrings(sHwrStateDescription, RGX_DEBUG_STR_SIZE,
asHwrState2Description, ARRAY_SIZE(asHwrState2Description),
psFwSysData->ui32HWRStateFlags);
PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x:%s)", pszState, pszReason, psFwSysData->ui32HWRStateFlags, sHwrStateDescription);
PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)",
- pszPowStateName[psFwSysData->ePowState],
- (psDevInfo->pvAPMISRData)?"enabled":"disabled",
- psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle,
- psDevInfo->ui32ActivePMReqDenied,
- psDevInfo->ui32ActivePMReqNonIdle,
- psDevInfo->ui32ActivePMReqRetry,
- psDevInfo->ui32ActivePMReqTotal -
- psDevInfo->ui32ActivePMReqOk -
- psDevInfo->ui32ActivePMReqDenied -
- psDevInfo->ui32ActivePMReqRetry -
- psDevInfo->ui32ActivePMReqNonIdle,
- psDevInfo->ui32ActivePMReqTotal,
- psRuntimeCfg->ui32ActivePMLatencyms);
+ (psFwSysData->ePowState < ARRAY_SIZE(pszPowStateName) ? pszPowStateName[psFwSysData->ePowState] : "???"),
+ (psDevInfo->pvAPMISRData)?"enabled":"disabled",
+ psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle,
+ psDevInfo->ui32ActivePMReqDenied,
+ psDevInfo->ui32ActivePMReqNonIdle,
+ psDevInfo->ui32ActivePMReqRetry,
+ psDevInfo->ui32ActivePMReqTotal -
+ psDevInfo->ui32ActivePMReqOk -
+ psDevInfo->ui32ActivePMReqDenied -
+ psDevInfo->ui32ActivePMReqRetry -
+ psDevInfo->ui32ActivePMReqNonIdle,
+ psDevInfo->ui32ActivePMReqTotal,
+ psRuntimeCfg->ui32ActivePMLatencyms);
ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges);
RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs));
asTimeCorrs[3].ui64OSTimeStamp);
}
- for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+ ui32HostIsolationGroup = psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[RGXFW_HOST_DRIVER_ID];
+
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
{
- RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32OSid];
+ RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32DriverID];
+ IMG_UINT32 ui32IsolationGroup = psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID];
+ IMG_BOOL bMTSEnabled = IMG_FALSE;
- IMG_BOOL bMTSEnabled = (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ?
- IMG_TRUE : ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32OSid)) != 0);
+#if !defined(NO_HARDWARE)
+ if (bRGXPoweredON)
+ {
+ bMTSEnabled = (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ? IMG_TRUE :
+ ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32DriverID)) != 0);
+ }
+#endif
- PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %d;%s %s", ui32OSid,
+ PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %u; Isolation group: %u; %s", ui32DriverID,
apszFwOsStateName[sFwRunFlags.bfOsState],
(sFwRunFlags.bfFLOk) ? "Ok" : "Not Ok",
(sFwRunFlags.bfFLGrowPending) ? "; Grow Request Pending" : "",
- psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid],
- (sFwRunFlags.bfIsolatedOS) ? " Isolated;" : "",
+ psDevInfo->psRGXFWIfRuntimeCfg->aui32DriverPriority[ui32DriverID],
+ ui32IsolationGroup,
(bMTSEnabled) ? "MTS on;" : "MTS off;"
);
- bOsIsolationEnabled |= sFwRunFlags.bfIsolatedOS;
+ if (ui32IsolationGroup != ui32HostIsolationGroup)
+ {
+ bDriverIsolationEnabled = IMG_TRUE;
+ }
}
#if defined(PVR_ENABLE_PHR)
IMG_CHAR sPHRConfigDescription[RGX_DEBUG_STR_SIZE];
sPHRConfigDescription[0] = '\0';
- _Flags2Description(sPHRConfigDescription, RGX_DEBUG_STR_SIZE,
+ DebugCommonFlagStrings(sPHRConfigDescription, RGX_DEBUG_STR_SIZE,
asPHRConfig2Description, ARRAY_SIZE(asPHRConfig2Description),
BIT_ULL(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode));
}
#endif
- if (bOsIsolationEnabled)
+ if (bDriverIsolationEnabled)
{
PVR_DUMPDEBUG_LOG("RGX Hard Context Switch deadline: %u ms", psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS);
}
X(RGX_CR_META_SP_MSLVIRQENABLE) \
X(RGX_CR_META_SP_MSLVIRQLEVEL)
-#define RGX_META_SP_EXTRA_DEBUG__UNPACKED_ACCESSES \
- X(RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES) \
- X(RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES) \
- X(RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES) \
- X(RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES) \
- X(RGX_CR_META_SP_MSLVIRQENABLE__META_REGISTER_UNPACKED_ACCESSES) \
- X(RGX_CR_META_SP_MSLVIRQLEVEL__META_REGISTER_UNPACKED_ACCESSES)
+#define RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_V1_AND_METAREG_UNPACKED_ACCESSES \
+ X(RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED) \
+ X(RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED) \
+ X(RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED) \
+ X(RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED) \
+ X(RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_V1_AND_METAREG_UNPACKED) \
+ X(RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_V1_AND_METAREG_UNPACKED)
+
+#define RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_GT1_AND_METAREG_UNPACKED_ACCESSES \
+ X(RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED) \
+ X(RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED) \
+ X(RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED) \
+ X(RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED) \
+ X(RGX_CR_META_SP_MSLVIRQENABLE__HOST_SECURITY_GT1_AND_METAREG_UNPACKED) \
+ X(RGX_CR_META_SP_MSLVIRQLEVEL__HOST_SECURITY_GT1_AND_METAREG_UNPACKED)
- IMG_UINT32 ui32Idx, ui32RegIdx;
+ IMG_UINT32 ui32Idx;
IMG_UINT32 ui32RegVal;
IMG_UINT32 ui32RegAddr;
RGX_META_SP_EXTRA_DEBUG
#undef X
};
- const IMG_UINT32 aui32DebugRegAddrUA[] = {
+ const IMG_UINT32 aui32DebugRegAddrUAHSV1[] = {
+#define X(A) A,
+ RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_V1_AND_METAREG_UNPACKED_ACCESSES
+#undef X
+ };
+
+ const IMG_UINT32 aui32DebugRegAddrUAHSGT1[] = {
#define X(A) A,
- RGX_META_SP_EXTRA_DEBUG__UNPACKED_ACCESSES
+ RGX_META_SP_EXTRA_DEBUG__HOST_SECURITY_GT1_AND_METAREG_UNPACKED_ACCESSES
#undef X
};
#undef X
};
- const IMG_UINT32 aui32Debug2RegAddr[] = {0xA28, 0x0A30, 0x0A38};
-
PVR_DUMPDEBUG_LOG("META Slave Port extra debug:");
/* array of register offset values depends on feature. But don't augment names in apszDebugRegName */
- PVR_ASSERT(sizeof(aui32DebugRegAddrUA) == sizeof(aui32DebugRegAddr));
+ PVR_ASSERT(sizeof(aui32DebugRegAddrUAHSGT1) == sizeof(aui32DebugRegAddr));
+ PVR_ASSERT(sizeof(aui32DebugRegAddrUAHSV1) == sizeof(aui32DebugRegAddr));
pui32DebugRegAddr = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ?
- aui32DebugRegAddrUA : aui32DebugRegAddr;
+ ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) ? (aui32DebugRegAddrUAHSGT1) : (aui32DebugRegAddrUAHSV1)) : aui32DebugRegAddr;
- /* dump first set of Slave Port debug registers */
+ /* dump set of Slave Port debug registers */
for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++)
{
const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx];
ui32RegAddr = pui32DebugRegAddr[ui32Idx];
- ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+ ui32RegVal = OSReadUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32RegAddr);
PVR_DUMPDEBUG_LOG(" * %s: 0x%8.8X", pszRegName, ui32RegVal);
}
- /* dump second set of Slave Port debug registers */
- for (ui32Idx = 0; ui32Idx < 4; ui32Idx++)
- {
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx);
- ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20);
- PVR_DUMPDEBUG_LOG(" * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal);
-
- }
-
- for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++)
- {
- ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx];
- for (ui32Idx = 0; ui32Idx < 2; ui32Idx++)
- {
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx);
- ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
- PVR_DUMPDEBUG_LOG(" * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal);
- }
- }
-
}
#endif /* !defined(NO_HARDWARE) */
/* Print the decoded log for each thread... */
for (tid = 0; tid < RGXFW_THREAD_NUM; tid++)
{
- volatile IMG_UINT32 *pui32FWWrapCount = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum);
+ volatile IMG_UINT32 *pui32FWWrapCount = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32WrapCount);
volatile IMG_UINT32 *pui32FWTracePtr = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer);
IMG_UINT32 *pui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
IMG_UINT32 ui32HostWrapCount = *pui32FWWrapCount;
}
}
-#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
-void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
- void *pvDumpDebugFile,
- PVRSRV_RGXDEV_INFO *psDevInfo)
-{
- const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
-
- /* Print the power monitoring counters... */
- if (psFwSysData != NULL)
- {
- const IMG_UINT32 *pui32TraceBuf = psFwSysData->sPowerMonBuf.pui32TraceBuffer;
- IMG_UINT32 ui32TracePtr = 0; //psFwSysData->sPowerMonBuf.ui32TracePointer;
- IMG_UINT32 ui32PowerMonBufSizeInDWords = psFwSysData->ui32PowerMonBufSizeInDWords;
- IMG_UINT32 ui32Count = 0;
- IMG_UINT64 ui64Timestamp;
-
- if (pui32TraceBuf == NULL)
- {
- /* power monitoring buffer not yet allocated */
- return;
- }
-
- if (pui32TraceBuf[ui32TracePtr] != RGX_CR_TIMER)
- {
- PVR_DPF((PVR_DBG_WARNING, "Power monitoring data not available."));
- return;
- }
- ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords]) << 32 |
- (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords]);
-
- /* Update the trace pointer... */
- ui32TracePtr = (ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords;
- ui32Count = (ui32Count + 3);
-
- PVR_DPF((PVR_DBG_WARNING, "Dumping power monitoring buffer: CPUVAddr = %p, pointer = 0x%x, size = 0x%x",
- pui32TraceBuf,
- ui32TracePtr,
- ui32PowerMonBufSizeInDWords));
-
- while (ui32Count < ui32PowerMonBufSizeInDWords)
- {
- /* power monitoring data is (register, value) dword pairs */
- PVR_DUMPDEBUG_LOG("%" IMG_UINT64_FMTSPEC ":POWMON 0x%08x 0x%08x 0x%08x 0x%08x",
- ui64Timestamp,
- pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords],
- pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords],
- pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords],
- pui32TraceBuf[(ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords]);
-
- if (pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID ||
- pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID)
- {
- /* end of buffer */
- break;
- }
-
- /* Update the trace pointer... */
- ui32TracePtr = (ui32TracePtr + 4) % ui32PowerMonBufSizeInDWords;
- ui32Count = (ui32Count + 4);
- }
- }
-}
-#endif
-
static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState)
{
switch (eDevState)
{
- case PVRSRV_DEVICE_STATE_INIT:
+ case PVRSRV_DEVICE_STATE_CREATING:
+ return "Creating";
+ case PVRSRV_DEVICE_STATE_CREATED:
return "Initialising";
case PVRSRV_DEVICE_STATE_ACTIVE:
return "Active";
#define DDLOG32_DPX(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R));
#define DDLOG64_DPX(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R));
#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V);
-#define DDLOG32UNPACKED(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R##__META_REGISTER_UNPACKED_ACCESSES));
#if !defined(NO_HARDWARE)
static PVRSRV_ERROR RGXDumpRISCVState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
void *pvDumpDebugFile,
PVRSRV_RGXDEV_INFO *psDevInfo)
{
-#if !defined(NO_HARDWARE)
+#if defined(NO_HARDWARE)
+ PVR_DUMPDEBUG_LOG("------[ RGX registers ]------");
+ PVR_DUMPDEBUG_LOG("(Not supported for NO_HARDWARE builds)");
+
+ return PVRSRV_OK;
+#else
IMG_UINT32 ui32Meta = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0;
IMG_UINT32 ui32RegVal;
PVRSRV_ERROR eError;
IMG_BOOL bFirmwarePerf;
-#endif
IMG_BOOL bMulticore = RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT);
void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
-#if !defined(NO_HARDWARE)
/* Check if firmware perf was set at Init time */
bFirmwarePerf = (psDevInfo->psRGXFWIfSysInit->eFirmwarePerf != FW_PERF_CONF_NONE);
-#endif
DDLOG64(CORE_ID);
DDLOG64(SCRATCH15);
DDLOG32(IRQ_OS0_EVENT_STATUS);
-#if !defined(NO_HARDWARE)
if (ui32Meta)
{
IMG_BOOL bIsT0Enabled = IMG_FALSE, bIsFWFaulted = IMG_FALSE;
- if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
- {
- DDLOG32UNPACKED(META_SP_MSLVIRQSTATUS);
- }
- else
- {
- DDLOG32(META_SP_MSLVIRQSTATUS);
- }
+ IMG_UINT32 ui32MSlvIrqStatusReg = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ?
+ ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) ?
+ RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED :
+ RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED) :
+ RGX_CR_META_SP_MSLVIRQSTATUS;
+
+ PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, "META_SP_MSLVIRQSTATUS", OSReadUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32MSlvIrqStatusReg));
eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal);
PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError);
eError = RGXDumpRISCVState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
PVR_RETURN_IF_ERROR(eError);
}
-#endif
return PVRSRV_OK;
-#if !defined(NO_HARDWARE)
_METASPError:
PVR_DUMPDEBUG_LOG("Dump Slave Port debug information");
_RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
return eError;
-#endif
+#endif /* defined(NO_HARDWARE) */
}
#undef REG32_FMTSPEC
#undef DDLOG32_DPX
#undef DDLOG64_DPX
#undef DDLOGVAL32
-#undef DDLOG32UNPACKED
/*!
*******************************************************************************
}
if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) ||
- (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED)))
+ (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_DRIVERS_SUPPORTED)))
{
PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)",
- (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount);
+ (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_DRIVERS_SUPPORTED), ui8FwOsCount);
}
PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d Start ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID);
PVR_DUMPDEBUG_LOG("------[ RGX Info ]------");
PVR_DUMPDEBUG_LOG("Device Node (Info): %p (%p)", psDevInfo->psDeviceNode, psDevInfo);
+ DevicememHistoryDumpRecordStats(psDevInfo->psDeviceNode, pfnDumpDebugPrintf, pvDumpDebugFile);
PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B,
psDevInfo->sDevFeatureCfg.ui32V,
psDevInfo->sDevFeatureCfg.ui32N,
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Dump out the Workload estimation CCB. */
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
+ /* Dump out the Workload estimation CCB. */
const RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl;
if (psWorkEstCCBCtl != NULL)
if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST))
{
+#if !defined(NO_HARDWARE)
PVR_DUMPDEBUG_LOG("------[ RGX registers ]------");
PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM);
PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr);
+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)
+ {
+ PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Linear): 0x%p",
+ psDevInfo->pvSecureRegsBaseKM);
+ PVR_DUMPDEBUG_LOG("RGX Host Secure Register Base Address (Physical): 0x%08lX",
+ (unsigned long)psDevInfo->sRegsPhysBase.uiAddr + RGX_HOST_SECURE_REGBANK_OFFSET);
+ }
+
if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
{
+ IMG_UINT32 ui32MSlvCtrl1Reg = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES) ?
+ ((RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1) ?
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED :
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED) :
+ RGX_CR_META_SP_MSLVCTRL1;
+
/* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */
- if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
- {
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES, 0x0);
- }
- else
- {
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0);
- }
+ OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32MSlvCtrl1Reg, 0x0);
}
+#endif
eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
if (eError != PVRSRV_OK)
}
else
{
- PVR_DUMPDEBUG_LOG("------[ Stalled FWCtxs ]------");
+ PVR_DUMPDEBUG_LOG("------[ FWCtxs Next CMD ]------");
}
DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
-
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
+#endif
}
}
void *pvDumpDebugFile,
PVRSRV_RGXDEV_INFO *psDevInfo);
-#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
-void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
- void *pvDumpDebugFile,
- PVRSRV_RGXDEV_INFO *psDevInfo);
-#endif
-
#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG)
/*!
*******************************************************************************
#define RGX_VAL_FBDC_SIG_CHECK_NOERR_EN (0x2U) /*!< Enable FBDC signature check. Signatures must match */
#define RGX_VAL_FBDC_SIG_CHECK_ERR_EN (0x4U) /*!< Enable FBDC signature check. Signatures must not match */
#define RGX_VAL_GPUSTATEPIN_EN (0x8U) /*!< Enable GPU state pin check */
-#define RGX_VAL_KZ_SIG_CHECK_NOERR_EN (0x10U) /*!< Enable KZ signature check. Signatures must match */
-#define RGX_VAL_KZ_SIG_CHECK_ERR_EN (0x20U) /*!< Enable KZ signature check. Signatures must not match */
-#define RGX_VAL_SIG_CHECK_ERR_EN (RGX_VAL_FBDC_SIG_CHECK_ERR_EN)
+#define RGX_VAL_WGP_SIG_CHECK_NOERR_EN (0x10U) /*!< Enable WGP signature check. Signatures must match */
+#define RGX_VAL_WGP_SIG_CHECK_ERR_EN (0x20U) /*!< Enable WGP signature check. Signatures must not match */
+#define RGX_VAL_TRP_SIG_CHECK_NOERR_EN (0x40U) /*!< Enable TRP signature check. Signatures must match */
+#define RGX_VAL_TRP_SIG_CHECK_ERR_EN (0x80U) /*!< Enable TRP signature check. Signatures must not match */
typedef struct _GPU_FREQ_TRACKING_DATA_
{
IMG_UINT64 ui64GpuStatBlocked; /* GPU blocked statistic */
IMG_UINT64 ui64GpuStatIdle; /* GPU idle statistic */
IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */
+
+ IMG_UINT64 aaui64DMOSStatActive[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS active statistic */
+ IMG_UINT64 aaui64DMOSStatBlocked[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS blocked statistic */
+ IMG_UINT64 aaui64DMOSStatIdle[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS idle statistic */
+ IMG_UINT64 aaui64DMOSStatCumulative[RGXFWIF_DM_MAX][RGX_NUM_DRIVERS_SUPPORTED]; /* Per-DM per-OS sum of active/blocked/idle stats */
+
IMG_UINT64 ui64TimeStamp; /* Timestamp of the most recent sample of the GPU stats */
} RGXFWIF_GPU_UTIL_STATS;
IMG_UINT32 ui32Characteristic1;
IMG_UINT32 ui32Characteristic2;
} sTransfer;
+
+ struct
+ {
+ IMG_UINT32 ui32DispatchSize;
+ IMG_UINT32 ui32AccStructSize;
+ } sRay;
+
} RGX_WORKLOAD;
/*!
{
WORKLOAD_MATCHING_DATA sDataTDM; /*!< matching data for TDM-TQ commands */
} sTransfer;
+
+ struct
+ {
+ WORKLOAD_MATCHING_DATA sDataRDM; /*!< matching data for RDM commands */
+ } sRay;
} uWorkloadMatchingData;
/*
/* Kernel mode linear address of device registers */
void __iomem *pvRegsBaseKM;
+ /* Kernel mode linear address of device registers */
+ void __iomem *pvSecureRegsBaseKM;
+
IMG_HANDLE hRegMapping;
/* System physical address of device registers */
DEVMEM_MEMDESC *psWorkEstFirmwareCCBMemDesc; /*!< memdesc for Workload Estimation Firmware CCB */
IMG_UINT8 *psWorkEstFirmwareCCB; /*!< kernel mapping for Workload Estimation Firmware CCB */
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ /* Counter dumping */
+ DEVMEM_MEMDESC *psCounterBufferMemDesc; /*!< mem desc for counter dumping buffer */
+ POS_LOCK hCounterDumpingLock; /*!< Lock for guarding access to counter dumping buffer */
+#endif
+
PVRSRV_MEMALLOCFLAGS_T uiFWPoisonOnFreeFlag; /*!< Flag for poisoning FW allocations when freed */
IMG_BOOL bIgnoreHWReportedBVNC; /*!< Ignore BVNC reported by HW */
RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl; /*!< structure containing trace control data and actual trace buffer */
DEVMEM_MEMDESC *psRGXFWIfFwSysDataMemDesc; /*!< memdesc of the firmware-shared system data structure */
- RGXFWIF_SYSDATA *psRGXFWIfFwSysData; /*!< structure containing trace control data and actual trace buffer */
+ RGXFWIF_SYSDATA *psRGXFWIfFwSysData; /*!< structure containing km-firmware shared system data */
DEVMEM_MEMDESC *psRGXFWIfFwOsDataMemDesc; /*!< memdesc of the firmware-shared os structure */
- RGXFWIF_OSDATA *psRGXFWIfFwOsData; /*!< structure containing trace control data and actual trace buffer */
+ RGXFWIF_OSDATA *psRGXFWIfFwOsData; /*!< structure containing km-firmware shared os data */
#if defined(SUPPORT_TBI_INTERFACE)
DEVMEM_MEMDESC *psRGXFWIfTBIBufferMemDesc; /*!< memdesc of actual FW TBI buffer */
DEVMEM_MEMDESC *psRGXFWIfRuntimeCfgMemDesc;
RGXFWIF_RUNTIME_CFG *psRGXFWIfRuntimeCfg;
- /* Additional guest firmware memory context info */
- DEVMEM_HEAP *psGuestFirmwareRawHeap[RGX_NUM_OS_SUPPORTED];
- DEVMEM_MEMDESC *psGuestFirmwareRawMemDesc[RGX_NUM_OS_SUPPORTED];
+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY)
+ DEVMEM_MEMDESC *psRGXFWIfActiveContextBufDesc;
+ RGXFWIF_ACTIVE_CONTEXT_BUF_DATA *psRGXFWIfActiveContextBuf;
+#endif
+
+ /* Premapped firmware memory context info */
+ DEVMEM_HEAP *psPremappedFwRawHeap[RGX_NUM_DRIVERS_SUPPORTED];
+ DEVMEM_MEMDESC *psPremappedFwRawMemDesc[RGX_NUM_DRIVERS_SUPPORTED];
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
/* Array to store data needed for workload estimation when a workload
IMG_HANDLE hHWPerfStream; /*! TL Stream buffer (L2) for firmware event stream */
IMG_UINT32 ui32L2BufMaxPacketSize;/*!< Max allowed packet size in FW HWPerf TL (L2) buffer */
IMG_BOOL bSuspendHWPerfL2DataCopy; /*! Flag to indicate if copying HWPerf data is suspended */
+ IMG_BOOL bHWPerfHasRun; /*! Flag to indicate that the HWPerf was enabled. Used by FTrace
+ to determine if HWPerf has been enabled outside of FTrace module. */
IMG_UINT32 ui32HWPerfHostFilter; /*! Event filter for HWPerfHost stream (settable by AppHint) */
POS_LOCK hLockHWPerfHostStream; /*! Lock guarding access to HWPerfHost stream from multiple threads */
RGX_CONTEXT_RESET_REASON eLastDeviceError; /*!< device error reported to client */
- IMG_UINT32 ui32Log2Non4KPgSize; /* Page size of Non4k heap in log2 form */
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+ PMR *psGenHeapSecMem; /*!< An allocation of secure memory mapped to
+ the general devmem heap. The allocation is
+ created and mapped at driver init. It's used for
+ various purposes. See rgx_fwif_km.h for all use cases. */
+#endif
+
+#if defined(SUPPORT_SECURE_CONTEXT_SWITCH)
+ DEVMEM_MEMDESC *psRGXFWScratchBufMemDesc;
+#endif
} PVRSRV_RGXDEV_INFO;
#include "rgxmem.h"
#include "rgxmmudefs_km.h"
#include "rgxta3d.h"
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
#include "rgxkicksync.h"
+#endif
#include "rgxutils.h"
#include "rgxtimecorr.h"
#include "rgxfwimageutils.h"
#include "sync_checkpoint_external.h"
#include "tlstream.h"
#include "devicemem_server_utils.h"
-#include "htbuffer.h"
+#include "htbserver.h"
#include "info_page.h"
#include "physmem_lma.h"
*/
#if defined(FPGA) || defined(EMULATOR) || defined(VIRTUAL_PLATFORM) || defined(PDUMP)
#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (480000)
-#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (1000000)
+#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (10800000)
#else
#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (30000)
-#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (90000)
+#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (600000)
#endif
/* Workload Estimation Firmware CCB length */
static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value)
{
- /* ensure memory is flushed before kicking MTS */
+ /* Ensure any uncached/WC memory writes are flushed from CPU write buffers
+ * before kicking MTS.
+ */
OSWriteMemoryBarrier(NULL);
+ /* This should *NOT* happen. Try to trace what caused this and avoid a NPE
+ * with the Write/Read at the foot of the function.
+ */
+ PVR_ASSERT((psDevInfo != NULL));
+ if (psDevInfo == NULL)
+ {
+ return;
+ }
+
+ /* Kick MTS to wake firmware. */
OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value);
- /* ensure the MTS kick goes through before continuing */
-#if !defined(NO_HARDWARE)
- OSWriteMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + RGX_CR_MTS_SCHEDULE);
-#else
- OSWriteMemoryBarrier(NULL);
-#endif
+ /* Uncached device/IO mapping will ensure MTS kick leaves CPU, read back
+ * will ensure it reaches the regbank via inter-connects (AXI, PCIe etc)
+ * before continuing.
+ */
+ (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE);
}
/*************************************************************************/ /*!
}
#endif
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+/*!
+ ******************************************************************************
+ @Function RGXFWSetupCounterBuffer
+ @Description
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+ *****************************************************************************/
+static PVRSRV_ERROR RGXFWSetupCounterBuffer(PVRSRV_RGXDEV_INFO* psDevInfo,
+ DEVMEM_MEMDESC** ppsBufferMemDesc,
+ IMG_UINT32 ui32CounterDataBufferSize,
+ RGXFWIF_COUNTER_DUMP_CTL* psCounterDumpCtl,
+ const IMG_CHAR* pszBufferName)
+{
+ PVRSRV_ERROR eError;
+
+ eError = RGXSetupFwAllocation(psDevInfo,
+ (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)),
+ ui32CounterDataBufferSize,
+ "FwCounterBuffer",
+ ppsBufferMemDesc,
+ &psCounterDumpCtl->sBuffer,
+ NULL,
+ RFW_FWADDR_NOREF_FLAG);
+ PVR_LOG_RETURN_IF_ERROR(eError, "RGXSetupFwAllocation");
+
+ psCounterDumpCtl->ui32SizeInDwords = ui32CounterDataBufferSize >> 2;
+
+ return PVRSRV_OK;
+}
+#endif
+
/*!
*******************************************************************************
@Function RGXFWSetupAlignChecks
psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
{
- eError = RGXPollReg32(hPrivate,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
- | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
- | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN);
- if (eError == PVRSRV_OK)
+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)
{
- /* Issue a Write */
- CHECK_HWBRN_68777(ui32RegAddr);
- RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES, ui32RegAddr);
- (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */
- RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES, ui32RegValue);
- (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN);
+ if (eError == PVRSRV_OK)
+ {
+ /* Issue a Write */
+ CHECK_HWBRN_68777(ui32RegAddr);
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, ui32RegAddr);
+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED); /* Fence write */
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, ui32RegValue);
+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED); /* Fence write */
+ }
+ }
+ else
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN);
+ if (eError == PVRSRV_OK)
+ {
+ /* Issue a Write */
+ CHECK_HWBRN_68777(ui32RegAddr);
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED, ui32RegAddr);
+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED); /* Fence write */
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED, ui32RegValue);
+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED); /* Fence write */
+ }
}
}
else
psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
{
- eError = RGXPollReg32(hPrivate,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
- | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
- | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN);
- if (eError == PVRSRV_OK)
+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)
{
- /* Issue a Read */
- CHECK_HWBRN_68777(ui32RegAddr);
- RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES,
- ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_EN);
- (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */
-
- /* Wait for Slave Port to be Ready */
eError = RGXPollReg32(hPrivate,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
- | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
- | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN);
- if (eError != PVRSRV_OK) return eError;
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN);
+ if (eError == PVRSRV_OK)
+ {
+ /* Issue a Read */
+ CHECK_HWBRN_68777(ui32RegAddr);
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED,
+ ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__RD_EN);
+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED); /* Fence write */
+
+ /* Wait for Slave Port to be Ready */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN);
+ if (eError != PVRSRV_OK) return eError;
+ }
+ }
+ else
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN);
+ if (eError == PVRSRV_OK)
+ {
+ /* Issue a Read */
+ CHECK_HWBRN_68777(ui32RegAddr);
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED,
+ ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__RD_EN);
+ (void) RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED); /* Fence write */
+
+ /* Wait for Slave Port to be Ready */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN);
+ if (eError != PVRSRV_OK) return eError;
+ }
}
#if !defined(NO_HARDWARE)
- *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES);
+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)
+ {
+ *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED);
+ }
+ else
+ {
+ *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED);
+ }
#else
*ui32RegValue = 0xFFFFFFFF;
#endif
IMG_UINT32 ui32AllocatedOffset,
DEVMEM_MEMDESC *psFWMemContextMemDesc,
DEVMEM_MEMDESC *psContextStateMemDesc,
- IMG_UINT32 ui32CCBAllocSize,
- IMG_UINT32 ui32CCBMaxAllocSize,
+ IMG_UINT32 ui32CCBAllocSizeLog2,
+ IMG_UINT32 ui32CCBMaxAllocSizeLog2,
IMG_UINT32 ui32ContextFlags,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32MaxDeadlineMS,
IMG_UINT64 ui64RobustnessAddress,
RGX_COMMON_CONTEXT_INFO *psInfo,
RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext;
IMG_UINT32 ui32FWCommonContextOffset;
IMG_UINT8 *pui8Ptr;
- IMG_INT32 i32Priority = (IMG_INT32)ui32Priority;
PVRSRV_ERROR eError;
/*
/* Allocate the client CCB */
eError = RGXCreateCCB(psDevInfo,
- ui32CCBAllocSize,
- ui32CCBMaxAllocSize,
+ ui32CCBAllocSizeLog2,
+ ui32CCBMaxAllocSizeLog2,
ui32ContextFlags,
psConnection,
eRGXCCBRequestor,
psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset);
psFWCommonContext->eDM = eDM;
+ BITMASK_SET(psFWCommonContext->ui32MiscFlags, RGXFWIF_CONTEXT_MISC_FLAGS_HAS_DEFER_COUNT);
/* Set the firmware CCB device addresses in the firmware common context */
eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCB,
eError = RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd,
psInfo->psFWFrameworkMemDesc,
0, RFW_FWADDR_FLAG_NONE);
- PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:4", fail_fwframeworkfwadd);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:4", fail_fwframeworkfwaddr);
}
else
{
/* Store a references to Server Common Context and PID for notifications back from the FW. */
psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID;
psFWCommonContext->ui32PID = OSGetCurrentClientProcessIDKM();
+ OSCachedMemCopy(psFWCommonContext->szProcName, psConnection->pszProcName, RGXFW_PROCESS_NAME_LEN);
/* Set the firmware GPU context state buffer */
psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc;
trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(),
aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ psDeviceNode->sDevId.ui32InternalID,
ui32FWAddr);
}
#endif
{
RGXUnsetFirmwareAddress(psInfo->psFWFrameworkMemDesc);
}
-fail_fwframeworkfwadd:
+fail_fwframeworkfwaddr:
RGXUnsetFirmwareAddress(psFWMemContextMemDesc);
fail_fwmemctxfwaddr:
RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc);
return PVRSRV_ERROR_INVALID_PARAMS;
}
+PRGXFWIF_FWCOMMONCONTEXT RGXGetFWCommonContextAddrFromServerCommonCtx(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DLLIST_NODE *psNode)
+{
+ RGX_SERVER_COMMON_CONTEXT *psThisContext =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+ return FWCommonContextGetFWAddress(psThisContext);
+}
+
PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
IMG_UINT32 ui32ContextFlags)
{
OSFreeKMAppHintState(pvAppHintState);
pvAppHintState = NULL;
+ if (psTraceBufCtl->ui32TraceBufSizeInDWords < RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS ||
+ psTraceBufCtl->ui32TraceBufSizeInDWords > RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Requested trace buffer size (%u) out of its minimum (%u) & maximum (%u) range. Exiting error.",
+ __func__,
+ psTraceBufCtl->ui32TraceBufSizeInDWords,
+ RGXFW_TRACE_BUF_MIN_SIZE_IN_DWORDS,
+ RGXFW_TRACE_BUF_MAX_SIZE_IN_DWORDS));
+ eError = PVRSRV_ERROR_OUT_OF_RANGE;
+ goto exit_error;
+ }
+
uiTraceBufSizeInBytes = psTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32);
for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++)
fail:
RGXTraceBufferDeinit(psDevInfo);
+exit_error:
return eError;
}
-#if defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
-/*************************************************************************/ /*!
-@Function RGXPowmonBufferIsInitRequired
-
-@Description Returns true if the power monitoring buffer is not allocated and
- might be required by the firmware soon. Powmon buffer allocated
- on-demand to reduce RAM footprint on systems not needing
- power monitoring.
-
-@Input psDevInfo RGX device info
-
-@Return IMG_BOOL Whether on-demand allocation(s) is/are needed
- or not
-*/ /**************************************************************************/
-INLINE IMG_BOOL RGXPowmonBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo)
-{
- /* The firmware expects a power monitoring buffer only when:
- * - Single-shot power counters are enabled with RGX_HWPERF_PWR_EST_REQUEST
- * - the Driver Mode is not Guest
- */
- if ((psDevInfo->psRGXFWIfPowMonBufferMemDesc == NULL)
- && (psDevInfo->ui64HWPerfFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_EST_REQUEST))
- && !PVRSRV_VZ_MODE_IS(GUEST))
- {
- return IMG_TRUE;
- }
-
- return IMG_FALSE;
-}
-
-/*************************************************************************/ /*!
-@Function RGXPowmonBufferDeinit
-
-@Description Deinitialises all the allocations and references that are made
- for the FW power monitoring buffer
-
-@Input ppsDevInfo RGX device info
-@Return void
-*/ /**************************************************************************/
-static void RGXPowmonBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
-{
- RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
-
- if (psDevInfo->psRGXFWIfPowMonBufferMemDesc)
- {
- if (psFwSysData->sPowerMonBuf.pui32TraceBuffer != NULL)
- {
- DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfPowMonBufferMemDesc);
- psFwSysData->sPowerMonBuf.pui32TraceBuffer = NULL;
- }
-
- DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfPowMonBufferMemDesc);
- psDevInfo->psRGXFWIfPowMonBufferMemDesc = NULL;
- }
-}
-
-/*************************************************************************/ /*!
-@Function RGXPowmonBufferInitOnDemandResources
-
-@Description Allocates the power monitoring buffer.
-
-@Input psDevInfo RGX device info
-
-@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise.
-*/ /**************************************************************************/
-PVRSRV_ERROR RGXPowmonBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo)
-{
- RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
- PVRSRV_ERROR eError = PVRSRV_OK;
-
-#define POWER_MON_BUF_SIZE (8192UL)
- /* Ensure allocation API is only called when not already allocated */
- PVR_ASSERT(psDevInfo->psRGXFWIfPowMonBufferMemDesc == NULL);
-
- eError = RGXSetupFwAllocation(psDevInfo,
- RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
- POWER_MON_BUF_SIZE,
- "FwPowMonBuffer",
- &psDevInfo->psRGXFWIfPowMonBufferMemDesc,
- &psFwSysData->sPowerMonBuf.pui32RGXFWIfTraceBuffer,
- (void **)&psFwSysData->sPowerMonBuf.pui32TraceBuffer,
- RFW_FWADDR_NOREF_FLAG);
- PVR_LOG_GOTO_IF_ERROR(eError, "Power Monitoring Buffer allocation", fail);
-
- psFwSysData->ui32PowerMonBufSizeInDWords = POWER_MON_BUF_SIZE >> 2;
- OSWriteMemoryBarrier(psFwSysData->sPowerMonBuf.pui32TraceBuffer);
-
- return PVRSRV_OK;
-fail:
- RGXPowmonBufferDeinit(psDevInfo);
- return eError;
-}
-#endif
-
#if defined(PDUMP)
/*************************************************************************/ /*!
@Function RGXPDumpLoadFWInitData
#endif /* defined(SUPPORT_VALIDATION) */
PDUMPCOMMENT(psDevInfo->psDeviceNode,
"( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST);
+#if defined(SUPPORT_VALIDATION)
+ PDUMPCOMMENT(psDevInfo->psDeviceNode,
+ "( SPU Clock Gating (Needs R+D Power Island) 0x%08x)", RGXFWIF_INICFG_SPU_CLOCK_GATE);
+#endif
PDUMPCOMMENT(psDevInfo->psDeviceNode,
"( FBCDC Version 3.1 Enable: 0x%08x)", RGXFWIF_INICFG_FBCDC_V3_1_EN);
PDUMPCOMMENT(psDevInfo->psDeviceNode,
"( Check MList: 0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN);
PDUMPCOMMENT(psDevInfo->psDeviceNode,
"( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN);
+ PDUMPCOMMENT(psDevInfo->psDeviceNode,
+ "( Try overlapping DM pipelines: 0x%08x)", RGXFWIF_INICFG_TRY_OVERLAPPING_DM_PIPELINES);
PDUMPCOMMENT(psDevInfo->psDeviceNode,
"( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN);
PDUMPCOMMENT(psDevInfo->psDeviceNode,
PDUMPCOMMENT(psDevInfo->psDeviceNode,
"( SPU power state mask change Enable: 0x%08x)", RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- PDUMPCOMMENT(psDevInfo->psDeviceNode,
- "( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ PDUMPCOMMENT(psDevInfo->psDeviceNode,
+ "( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST);
#if defined(SUPPORT_PDVFS)
- PDUMPCOMMENT(psDevInfo->psDeviceNode,
- "( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS);
+ PDUMPCOMMENT(psDevInfo->psDeviceNode,
+ "( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS);
#endif /* defined(SUPPORT_PDVFS) */
+ }
#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */
PDUMPCOMMENT(psDevInfo->psDeviceNode,
"( CDM Arbitration Mode (task demand=b'01, round robin=b'10): 0x%08x)", RGXFWIF_INICFG_CDM_ARBITRATION_MASK);
PDUMP_FLAGS_CONTINUOUS);
PDUMPCOMMENT(psDevInfo->psDeviceNode,
- "( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))",
+ "( PID filter PID/DriverID list (Up to %u entries. Terminate with a zero PID))",
RGXFWIF_PID_FILTER_MAX_NUM_PIDS);
{
IMG_UINT32 i;
const IMG_DEVMEM_OFFSET_T uiPIDOff
= (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].uiPID);
- const IMG_DEVMEM_OFFSET_T uiOSIDOff
- = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32OSID);
+ const IMG_DEVMEM_OFFSET_T uiDriverIDOff
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32DriverID);
- PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID and OSID pair %u)", i);
+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID and DriverID pair %u)", i);
PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID)");
DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc,
0,
PDUMP_FLAGS_CONTINUOUS);
- PDUMPCOMMENT(psDevInfo->psDeviceNode, "(OSID)");
+ PDUMPCOMMENT(psDevInfo->psDeviceNode, "(DriverID)");
DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc,
- uiOSIDOff,
+ uiDriverIDOff,
0,
PDUMP_FLAGS_CONTINUOUS);
}
IMG_UINT32 *pui32USRMNumRegions,
IMG_UINT64 *pui64UVBRMNumRegions,
RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+ IMG_BOOL bSPUClockGating,
FW_PERF_CONF eFirmwarePerf,
IMG_UINT32 ui32AvailablePowUnitsMask,
IMG_UINT32 ui32AvailableRACMask)
ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0;
}
+#if defined(SUPPORT_VALIDATION)
+ ui32ConfigFlags |= bSPUClockGating ? RGXFWIF_INICFG_SPU_CLOCK_GATE : 0;
+#else
+ PVR_UNREFERENCED_PARAMETER(bSPUClockGating);
+#endif
+
/* Make sure to inform firmware if the device supports fullace fabric coherency */
ui32ConfigFlags |= (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) &&
PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ?
RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED : 0;
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST;
-#if defined(SUPPORT_PDVFS)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo;
- IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg;
+ ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST;
+#if defined(SUPPORT_PDVFS)
+ {
+ RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo;
+ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg;
- /* Pro-active DVFS depends on Workload Estimation */
- psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo;
- psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
- PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table");
+ /* Pro-active DVFS depends on Workload Estimation */
+ psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo;
+ psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+ PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table");
- if (psDVFSDeviceCfg->pasOPPTable != NULL)
- {
- if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))
+ if (psDVFSDeviceCfg->pasOPPTable != NULL)
{
- PVR_DPF((PVR_DBG_ERROR,
- "%s: OPP Table too large: Size = %u, Maximum size = %lu",
- __func__,
- psDVFSDeviceCfg->ui32OPPTableSize,
- (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))));
- eError = PVRSRV_ERROR_INVALID_PARAMS;
- goto fail;
- }
+ if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: OPP Table too large: Size = %u, Maximum size = %lu",
+ __func__,
+ psDVFSDeviceCfg->ui32OPPTableSize,
+ (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues))));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail;
+ }
- OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues,
- psDVFSDeviceCfg->pasOPPTable,
- sizeof(psPDVFSOPPInfo->asOPPValues));
+ OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues,
+ psDVFSDeviceCfg->pasOPPTable,
+ sizeof(psPDVFSOPPInfo->asOPPValues));
- psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1;
+ psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1;
- ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS;
+ ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS;
+ }
}
- }
#endif /* defined(SUPPORT_PDVFS) */
+ }
#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */
/* FW trace control structure */
psDevInfo->ui32ValidationFlags |= (bGPUStatePin) ? RGX_VAL_GPUSTATEPIN_EN : 0;
}
+#endif
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ eError = RGXFWSetupCounterBuffer(psDevInfo,
+ &psDevInfo->psCounterBufferMemDesc,
+ OSGetPageSize(),
+ &psFwSysInitScratch->sCounterDumpCtl,
+ "CounterBuffer");
+ PVR_LOG_GOTO_IF_ERROR(eError, "Counter Buffer allocation", fail);
+ PVR_DPF((PVR_DBG_WARNING, "Counter buffer allocated at %p, size %zu Bytes.", psDevInfo->psCounterBufferMemDesc, OSGetPageSize()));
+#endif /* defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) */
+
+#if defined(SUPPORT_VALIDATION)
{
IMG_UINT32 ui32EnablePollOnChecksumErrorStatus;
ui32ApphintDefault = 0;
case 0: /* no checking */ break;
case 1: psDevInfo->ui32ValidationFlags |= RGX_VAL_FBDC_SIG_CHECK_NOERR_EN; break;
case 2: psDevInfo->ui32ValidationFlags |= RGX_VAL_FBDC_SIG_CHECK_ERR_EN; break;
- case 3: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_NOERR_EN; break;
- case 4: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_ERR_EN; break;
+ case 3: psDevInfo->ui32ValidationFlags |= RGX_VAL_WGP_SIG_CHECK_NOERR_EN; break;
+ case 4: psDevInfo->ui32ValidationFlags |= RGX_VAL_WGP_SIG_CHECK_ERR_EN; break;
+ case 5: psDevInfo->ui32ValidationFlags |= RGX_VAL_TRP_SIG_CHECK_NOERR_EN; break;
+ case 6: psDevInfo->ui32ValidationFlags |= RGX_VAL_TRP_SIG_CHECK_ERR_EN; break;
default:
PVR_DPF((PVR_DBG_WARNING, "Unsupported value in EnablePollOnChecksumErrorStatus (%d)", ui32EnablePollOnChecksumErrorStatus));
break;
&ui32ApphintDefault,
&psDevInfo->ui32PowDomainKickInterval);
-#if defined(SUPPORT_RAY_TRACING)
{
IMG_UINT64 ui64RCEDisableMask;
IMG_UINT64 ui64ApphintDefault = PVRSRV_APPHINT_RCEDISABLEMASK;
psFwSysInitScratch->ui64RCEDisableMask = ui64RCEDisableMask;
}
-#endif
+ {
+ #define PCG_PKT_DROP_THRESH_MAX (0x800U)
+ #define PCG_PKT_DROP_THRESH_MIN (0xBU)
+
+ IMG_UINT32 ui32PCGPktDropThresh;
+ IMG_UINT32 ui32ApphintDefault = PCG_PKT_DROP_THRESH_MIN;
+
+ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE,
+ pvAppHintState,
+ PCGPktDropThresh,
+ &ui32ApphintDefault,
+ &ui32PCGPktDropThresh);
+
+ if ((ui32PCGPktDropThresh < PCG_PKT_DROP_THRESH_MIN) ||
+ (ui32PCGPktDropThresh > PCG_PKT_DROP_THRESH_MAX))
+ {
+ ui32PCGPktDropThresh = MAX(PCG_PKT_DROP_THRESH_MIN, MIN(ui32PCGPktDropThresh, PCG_PKT_DROP_THRESH_MAX));
+
+ PVR_DPF((PVR_DBG_WARNING,
+ "Clamping value of PCGPktDropThresh apphint to %u (range is %u to %u)",
+ ui32PCGPktDropThresh, PCG_PKT_DROP_THRESH_MIN, PCG_PKT_DROP_THRESH_MAX));
+ }
+ psFwSysInitScratch->ui32PCGPktDropThresh = ui32PCGPktDropThresh;
+ }
+
+ {
+
+ IMG_UINT32 ui32RaySLCMMUAutoCacheOps;
+ IMG_UINT32 ui32ApphintDefault = PVRSRV_APPHINT_RAYSLCMMUAUTOCACHEOPS;
+
+ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE,
+ pvAppHintState,
+ RaySLCMMUAutoCacheOps,
+ &ui32ApphintDefault,
+ &ui32RaySLCMMUAutoCacheOps);
+ psFwSysInitScratch->ui32RaySLCMMUAutoCacheOps = ui32RaySLCMMUAutoCacheOps;
+ }
#endif /* defined(SUPPORT_VALIDATION) */
#if defined(SUPPORT_FIRMWARE_GCOV)
PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) |
PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
PVRSRV_MEMALLOCFLAG_GPU_READABLE | /* XXX ?? */
- PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
PVRSRV_MEMALLOCFLAG_GPU_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
PVRSRV_MEMALLOCFLAG_CPU_READABLE |
PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC |
PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
psFwSysInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer;
#endif /* defined(SUPPORT_TBI_INTERFACE) */
- /* Allocate shared buffer for GPU utilisation */
+ /* Allocate shared buffer for GPU utilisation.
+ * Enable FIRMWARE_CACHED to reduce read latency in the FW.
+ * The FW flushes the cache after any writes.
+ */
eError = RGXSetupFwAllocation(psDevInfo,
- RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS &
+ (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) &
RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
sizeof(RGXFWIF_GPU_UTIL_FWCB),
"FwGPUUtilisationBuffer",
PVR_LOG_GOTO_IF_ERROR(eError, "Firmware register user configuration structure allocation", fail);
#endif
+#if defined(SUPPORT_SECURE_CONTEXT_SWITCH)
+ eError = RGXSetupFwAllocation(psDevInfo,
+ (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS | PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) &
+ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
+ RGXFW_SCRATCH_BUF_SIZE,
+ "FwScratchBuf",
+ &psDevInfo->psRGXFWScratchBufMemDesc,
+ &psFwSysInitScratch->pbFwScratchBuf,
+ NULL,
+ RFW_FWADDR_NOREF_FLAG);
+ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware scratch buffer allocation", fail);
+#endif
+
psDevInfo->ui32RGXFWIfHWPerfBufSize = GetHwPerfBufferSize(ui32HWPerfFWBufSizeKB);
/* Second stage initialisation or HWPerf, hHWPerfLock created in first
* stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */
* enabled during PDump playback via RTCONF at any point of time. */
eError = RGXHWPerfInitOnDemandResources(psDevInfo);
PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail);
-#if defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
- if (RGXPowmonBufferIsInitRequired(psDevInfo))
- {
- /* Allocate power monitoring log buffer if enabled */
- eError = RGXPowmonBufferInitOnDemandResources(psDevInfo);
- PVR_LOG_GOTO_IF_ERROR(eError, "RGXPowmonBufferInitOnDemandResources", fail);
- }
-#endif
+ }
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+ {
+ /* Align the hwperf allocation to the DMA block size to ensure
+ * the shared control structure buffer isn't overflowed when
+ * Meta DMA writes to it. */
+ ui32HWPerfCountersDataSize = PVR_ALIGN(ui32HWPerfCountersDataSize, RGX_META_DMA_BLOCK_SIZE);
}
eError = RGXSetupFwAllocation(psDevInfo,
- RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS &
+ RGX_FWCOMCTX_ALLOCFLAGS &
RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp),
ui32HWPerfCountersDataSize,
"FwHWPerfControlStructure",
RFW_FWADDR_FLAG_NONE);
PVR_LOG_GOTO_IF_ERROR(eError, "Firmware HW Perf control struct allocation", fail);
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+ {
+ RGXSetMetaDMAAddress(&psDevInfo->psRGXFWIfRuntimeCfg->sHWPerfCtlDMABuf,
+ psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+ &psFwSysInitScratch->sHWPerfCtl, 0);
+ }
+
psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_INICFG_DISABLE_PDP_EN)
? IMG_FALSE : IMG_TRUE;
#if defined(SUPPORT_SECURITY_VALIDATION)
{
PVRSRV_MEMALLOCFLAGS_T uiFlags = RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS;
- PVRSRV_SET_PHYS_HEAP_HINT(GPU_SECURE, uiFlags);
+ PVRSRV_SET_PHYS_HEAP_HINT(FW_PRIV_DATA, uiFlags);
PDUMPCOMMENT(psDeviceNode, "Allocate non-secure buffer for security validation test");
eError = DevmemFwAllocateExportable(psDeviceNode,
if (!psDeviceNode->bAutoVzFwIsUp)
{
- IMG_UINT32 ui32OSIndex;
+ IMG_UINT32 ui32DriverID;
RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
if (PVRSRV_VZ_MODE_IS(NATIVE))
{
- psRuntimeCfg->aui32OSidPriority[RGXFW_HOST_OS] = 0;
+ psRuntimeCfg->aui32DriverPriority[RGXFW_HOST_DRIVER_ID] = 0;
+ psRuntimeCfg->aui32DriverIsolationGroup[RGXFW_HOST_DRIVER_ID] = RGX_DRIVERID_0_DEFAULT_ISOLATION_GROUP;
}
else
{
- for (ui32OSIndex = 0; ui32OSIndex < RGX_NUM_OS_SUPPORTED; ui32OSIndex++)
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
{
- const IMG_INT32 ai32DefaultOsPriority[RGXFW_MAX_NUM_OS] =
- {RGX_OSID_0_DEFAULT_PRIORITY, RGX_OSID_1_DEFAULT_PRIORITY, RGX_OSID_2_DEFAULT_PRIORITY, RGX_OSID_3_DEFAULT_PRIORITY,
- RGX_OSID_4_DEFAULT_PRIORITY, RGX_OSID_5_DEFAULT_PRIORITY, RGX_OSID_6_DEFAULT_PRIORITY, RGX_OSID_7_DEFAULT_PRIORITY};
+ const IMG_INT32 ai32DefaultPriorities[RGXFW_MAX_NUM_OSIDS] =
+ {RGX_DRIVERID_0_DEFAULT_PRIORITY,
+#if (RGXFW_MAX_NUM_OSIDS > 1)
+ RGX_DRIVERID_1_DEFAULT_PRIORITY,
+#if (RGXFW_MAX_NUM_OSIDS > 2)
+ RGX_DRIVERID_2_DEFAULT_PRIORITY,
+ RGX_DRIVERID_3_DEFAULT_PRIORITY,
+ RGX_DRIVERID_4_DEFAULT_PRIORITY,
+ RGX_DRIVERID_5_DEFAULT_PRIORITY,
+ RGX_DRIVERID_6_DEFAULT_PRIORITY,
+ RGX_DRIVERID_7_DEFAULT_PRIORITY
+#if (RGXFW_MAX_NUM_OSIDS > 8)
+#error "Support for more than 8 OSIDs not implemented."
+#endif
+#endif
+#endif
+ };
+
+ const IMG_UINT32 aui32DefaultIsolationGroups[RGXFW_MAX_NUM_OSIDS] =
+ {RGX_DRIVERID_0_DEFAULT_ISOLATION_GROUP,
+#if (RGXFW_MAX_NUM_OSIDS > 1)
+ RGX_DRIVERID_1_DEFAULT_ISOLATION_GROUP,
+#if (RGXFW_MAX_NUM_OSIDS > 2)
+ RGX_DRIVERID_2_DEFAULT_ISOLATION_GROUP,
+ RGX_DRIVERID_3_DEFAULT_ISOLATION_GROUP,
+ RGX_DRIVERID_4_DEFAULT_ISOLATION_GROUP,
+ RGX_DRIVERID_5_DEFAULT_ISOLATION_GROUP,
+ RGX_DRIVERID_6_DEFAULT_ISOLATION_GROUP,
+ RGX_DRIVERID_7_DEFAULT_ISOLATION_GROUP,
+#endif
+#endif
+ };
/* Set up initial priorities between different OSes */
- psRuntimeCfg->aui32OSidPriority[ui32OSIndex] = (IMG_UINT32)ai32DefaultOsPriority[ui32OSIndex];
+ psRuntimeCfg->aui32DriverPriority[ui32DriverID] = (IMG_UINT32)ai32DefaultPriorities[ui32DriverID];
+ psRuntimeCfg->aui32DriverIsolationGroup[ui32DriverID] = aui32DefaultIsolationGroups[ui32DriverID];
}
}
#endif
/* Initialise GPU utilisation buffer */
- psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord =
- RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE);
+ {
+ IMG_UINT64 ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(), RGXFWIF_GPU_UTIL_STATE_IDLE);
+ RGXFWIF_DM eDM;
+
+ psDevInfo->psRGXFWIfGpuUtilFWCb->ui64GpuLastWord = ui64LastWord;
+
+ for (eDM = 0; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++)
+ {
+ IMG_UINT32 ui32DriverID;
+
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ psDevInfo->psRGXFWIfGpuUtilFWCb->aaui64DMOSLastWord[eDM][ui32DriverID] = ui64LastWord;
+ }
+ }
+ }
/* init HWPERF data */
psDevInfo->psRGXFWIfFwSysData->ui32HWPerfRIdx = 0;
sFwOsInitScratch.ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit;
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Set up Workload Estimation firmware CCB */
- eError = RGXSetupCCB(psDevInfo,
- &psDevInfo->psWorkEstFirmwareCCBCtl,
- &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
- &psDevInfo->psWorkEstFirmwareCCB,
- &psDevInfo->psWorkEstFirmwareCCBMemDesc,
- &sFwOsInitScratch.psWorkEstFirmwareCCBCtl,
- &sFwOsInitScratch.psWorkEstFirmwareCCB,
- RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2,
- sizeof(RGXFWIF_WORKEST_FWCCB_CMD),
- RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
- "FwWEstCCB");
- PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Set up Workload Estimation firmware CCB */
+ eError = RGXSetupCCB(psDevInfo,
+ &psDevInfo->psWorkEstFirmwareCCBCtl,
+ &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+ &psDevInfo->psWorkEstFirmwareCCB,
+ &psDevInfo->psWorkEstFirmwareCCBMemDesc,
+ &sFwOsInitScratch.psWorkEstFirmwareCCBCtl,
+ &sFwOsInitScratch.psWorkEstFirmwareCCB,
+ RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2,
+ sizeof(RGXFWIF_WORKEST_FWCCB_CMD),
+ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS,
+ "FwWEstCCB");
+ PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail);
+ }
#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */
/* Initialise the compatibility check data */
IMG_UINT32 *pui32USRMNumRegions,
IMG_UINT64 *pui64UVBRMNumRegions,
RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+ IMG_BOOL bSPUClockGating,
FW_PERF_CONF eFirmwarePerf,
IMG_UINT32 ui32KCCBSizeLog2,
IMG_UINT32 ui32AvailablePowUnitsMask,
pui32USRMNumRegions,
pui64UVBRMNumRegions,
eRGXRDPowerIslandConf,
+ bSPUClockGating,
eFirmwarePerf,
ui32AvailablePowUnitsMask,
ui32AvailableRACMask);
#endif
#endif
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ if (psDevInfo->psCounterBufferMemDesc)
+ {
+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psCounterBufferMemDesc);
+ psDevInfo->psCounterBufferMemDesc = NULL;
+ }
+#endif
+
#if defined(SUPPORT_FIRMWARE_GCOV)
if (psDevInfo->psFirmwareGcovBufferMemDesc)
{
{
if (psDevInfo->psRGXFWIfTraceBufCtl != NULL)
{
- /* first deinit/free the tracebuffer allocation */
+ /* deinit/free the tracebuffer allocation */
RGXTraceBufferDeinit(psDevInfo);
-#if defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
- /* second free the powmon log buffer if used */
- RGXPowmonBufferDeinit(psDevInfo);
-#endif
-
DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
psDevInfo->psRGXFWIfTraceBufCtl = NULL;
}
psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL;
}
#endif
+
+#if defined(SUPPORT_FW_HOST_SIDE_RECOVERY)
+ if (psDevInfo->psRGXFWIfActiveContextBufDesc)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfActiveContextBufDesc);
+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfActiveContextBufDesc);
+ psDevInfo->psRGXFWIfActiveContextBufDesc = NULL;
+ }
+#endif
}
/*!
&psDevInfo->psFirmwareCCBMemDesc);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- RGXFreeCCB(psDevInfo,
- &psDevInfo->psWorkEstFirmwareCCBCtl,
- &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
- &psDevInfo->psWorkEstFirmwareCCB,
- &psDevInfo->psWorkEstFirmwareCCBMemDesc);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ RGXFreeCCB(psDevInfo,
+ &psDevInfo->psWorkEstFirmwareCCBCtl,
+ &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+ &psDevInfo->psWorkEstFirmwareCCB,
+ &psDevInfo->psWorkEstFirmwareCCBMemDesc);
+ }
#endif
if (psDevInfo->psPowSyncPrim != NULL)
{
return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OS_STATE_CHANGE_DATA);
}
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ case RGXFWIF_KCCB_CMD_COUNTER_DUMP:
+ {
+ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_COUNTER_DUMP_DATA);
+ }
+#endif
case RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG:
{
return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL);
{
return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CORECLKSPEEDCHANGE_DATA);
}
- case RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE:
+ case RGXFWIF_KCCB_CMD_VZ_DRV_ARRAY_CHANGE:
case RGXFWIF_KCCB_CMD_WDG_CFG:
case RGXFWIF_KCCB_CMD_HEALTH_CHECK:
case RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL:
IMG_BOOL bGPUHasWorkWaiting;
bGPUHasWorkWaiting =
- (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED);
+ (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64GpuLastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED);
if (!bGPUHasWorkWaiting)
{
goto _PVRSRVSetDevicePowerStateKM_Exit;
}
- eError = RGXPreKickCacheCommand(psDevInfo, eKCCBType, &uiMMUSyncUpdate);
- if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit;
-
- eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot);
- if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit;
-
-_PVRSRVSetDevicePowerStateKM_Exit:
- PVRSRVPowerUnlock(psDevInfo->psDeviceNode);
-
#if defined(SUPPORT_VALIDATION)
/**
* For validation, force the core to different powered units between
* DM kicks. PVRSRVDeviceGPUUnitsPowerChange acquires the power lock, hence
* ensure that this is done after the power lock is released.
*/
+ PVRSRVPowerUnlock(psDevInfo->psDeviceNode);
if ((eError == PVRSRV_OK) && (eKCCBType != RGXFWIF_DM_GP))
{
IMG_BOOL bInsertPowerDomainTransition =
((ui32PowerDomainState & ~(psDevInfo->ui32AvailablePowUnitsMask)) == 0);
} while (!bIsValid);
+ PVR_DPF((PVR_DBG_MESSAGE, "Request GPU power units mask change to 0x%x", ui32PowerDomainState));
eError = PVRSRVDeviceGPUUnitsPowerChange(psDevInfo->psDeviceNode, ui32PowerDomainState);
if (eError != PVRSRV_OK)
goto RGXScheduleCommand_exit;
}
}
+
+ /* Re-acquire the power lock. */
+ eError = PVRSRVPowerLock(psDevInfo->psDeviceNode);
+ if (unlikely(eError != PVRSRV_OK))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to re-acquire powerlock after GPU power units command (%s)",
+ __func__, PVRSRVGetErrorString(eError)));
+ goto RGXScheduleCommand_exit;
+ }
#endif
+ eError = RGXPreKickCacheCommand(psDevInfo, eKCCBType, &uiMMUSyncUpdate);
+ if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit;
+
+ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot);
+ if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit;
+
+_PVRSRVSetDevicePowerStateKM_Exit:
+ PVRSRVPowerUnlock(psDevInfo->psDeviceNode);
+
RGXScheduleCommand_exit:
return eError;
}
RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl;
IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB;
-#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
PVR_LOG_RETURN_VOID_IF_FALSE(PVRSRV_VZ_MODE_IS(NATIVE) ||
(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) &&
KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)),
{
case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS:
{
- PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp);
+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,i32AdjustmentValue,0,0,0,0,0,pidTmp);
break;
}
case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY:
{
- PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp);
+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,i32AdjustmentValue,0,0,0,0,pidTmp);
break;
}
case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES:
{
- PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp);
+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,i32AdjustmentValue,0,0,0,pidTmp);
break;
}
case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES:
{
- PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp);
+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,i32AdjustmentValue,0,0,pidTmp);
break;
}
case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES:
{
- PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp);
+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,i32AdjustmentValue,0,pidTmp);
break;
}
case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES:
{
- PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp);
+ PVRSRVStatsUpdateRenderContextStats(psDevInfo->psDeviceNode,0,0,0,0,0,i32AdjustmentValue,pidTmp);
break;
}
}
if (NULL == psFwSysData)
{
PVR_DPF((PVR_DBG_ERROR,
- "%s: Fw Sys Config is not mapped into CPU space",
- __func__));
+ "%s: Fw Sys Config is not mapped into CPU space", __func__));
return PVRSRV_ERROR_INVALID_CPU_ADDR;
}
PDUMP_FLAGS_CONTINUOUS);
}
+PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 ui32CBaseMapCtxReg;
+
+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
+
+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)
+ {
+ ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4;
+ /* Set the mapping context */
+ RGXWriteReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWPRIV);
+ (void)RGXReadReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg); /* Fence write */
+
+ /*
+ * Catbase-0 (FW MMU context) pointing to unmapped mem to make
+ * FW crash from its memory context
+ */
+ RGXWriteKernelMMUPC32(&psDevInfo->sLayerParams,
+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1,
+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT,
+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT,
+ 0xDEADBEEF);
+ }
+ else
+ {
+ ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT;
+ /* Set the mapping context */
+ RGXWriteReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWPRIV);
+ (void)RGXReadReg32(&psDevInfo->sLayerParams, ui32CBaseMapCtxReg); /* Fence write */
+
+ /*
+ * Catbase-0 (FW MMU context) pointing to unmapped mem to make
+ * FW crash from its memory context
+ */
+ RGXWriteKernelMMUPC32(&psDevInfo->sLayerParams,
+ RGX_CR_MMU_CBASE_MAPPING,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+ 0xDEADBEEF);
+ }
+
+ return PVRSRV_OK;
+}
+
PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo,
- IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32DriverID,
RGXFWIF_OS_STATE_CHANGE eOSOnlineState)
{
PVRSRV_ERROR eError = PVRSRV_OK;
const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData;
sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE;
- sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid;
+ sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32DriverID = ui32DriverID;
sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState;
#if defined(SUPPORT_AUTOVZ)
/* Guests and Host going offline should wait for confirmation
* from the Firmware of the state change. If this fails, break
* the connection on the OS Driver's end as backup. */
- if (PVRSRV_VZ_MODE_IS(GUEST) || (ui32OSid == RGXFW_HOST_OS))
+ if (PVRSRV_VZ_MODE_IS(GUEST) || (ui32DriverID == RGXFW_HOST_DRIVER_ID))
{
LOOP_UNTIL_TIMEOUT(SECONDS_TO_MICROSECONDS/2)
{
else if (psFwSysData)
{
const volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags =
- (const volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32OSid];
+ (const volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32DriverID];
/* Attempt several times until the FW manages to offload the OS */
LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
/* read the OS state */
OSMemoryBarrier(NULL);
- /* check if FW finished offloading the OSID and is stopped */
+ /* check if FW finished offloading the driver and is stopped */
if (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE)
{
eError = PVRSRV_OK;
return eError;
}
-PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
- IMG_UINT32 ui32OSid,
- IMG_UINT32 ui32Priority)
-{
- PVRSRV_ERROR eError;
- RGXFWIF_KCCB_CMD sOSidPriorityCmd = { 0 };
-
- PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED);
-
- sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE;
- psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid] = ui32Priority;
- OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid]);
-
-#if defined(PDUMP)
- PDUMPCOMMENT(psDevInfo->psDeviceNode,
- "Updating the priority of OSID%u inside RGXFWIfRuntimeCfg", ui32OSid);
- DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
- offsetof(RGXFWIF_RUNTIME_CFG, aui32OSidPriority) + (ui32OSid * sizeof(ui32Priority)),
- ui32Priority ,
- PDUMP_FLAGS_CONTINUOUS);
-#endif
-
- LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
- {
- eError = RGXScheduleCommand(psDevInfo,
- RGXFWIF_DM_GP,
- &sOSidPriorityCmd,
- PDUMP_FLAGS_CONTINUOUS);
- if (eError != PVRSRV_ERROR_RETRY)
- {
- break;
- }
- OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
- } END_LOOP_UNTIL_TIMEOUT();
-
- return eError;
-}
-
PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
CONNECTION_DATA *psConnection,
PVRSRV_RGXDEV_INFO *psDevInfo,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
RGXFWIF_DM eDM)
{
IMG_UINT32 ui32CmdSize;
RGXFWIF_CCB_CMD_HEADER *psCmdHeader;
RGXFWIF_CMD_PRIORITY *psCmd;
PVRSRV_ERROR eError;
- IMG_INT32 i32Priority = (IMG_INT32)ui32Priority;
RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psContext);
eError = _CheckPriority(psDevInfo, i32Priority, psContext->eRequestor);
sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
sPriorityCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
{
"%s: Failed to submit set priority command with error (%u)",
__func__,
eError));
+ goto fail_cmdacquire;
}
psContext->i32Priority = i32Priority;
fail_ccbacquire:
fail_checkpriority:
+fail_cmdacquire:
PVR_ASSERT(eError != PVRSRV_OK);
return eError;
}
ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo);
ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo);
-
+#if defined(SUPPORT_RGXKICKSYNC_BRIDGE)
ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo);
-
+#endif
/* If at least one DM stalled bit is different than before */
if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask))
{
_RGXUpdateHealthStatus_Exit:
OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus);
OSAtomicWrite(&psDevNode->eHealthReason, eNewReason);
- RGXSRV_HWPERF_DEVICE_INFO(psDevInfo, RGX_HWPERF_DEV_INFO_EV_HEALTH, eNewStatus, eNewReason);
+ RGXSRV_HWPERF_DEVICE_INFO_HEALTH(psDevInfo, eNewStatus, eNewReason);
/*
* Attempt to service the HWPerf buffer to regularly transport idle/periodic
}
else
{
- /* Otherwise, only dump first stalled command in the CCB */
- DumpStalledCCBCommand(psCurrentServerCommonContext->sFWCommonContextFWAddr,
+ /* Otherwise, only dump first command in the CCB */
+ DumpFirstCCBCmd(psCurrentServerCommonContext->sFWCommonContextFWAddr,
psCurrentServerCommonContext->psClientCCB,
pfnDumpDebugPrintf,
pvDumpDebugFile);
@Function RGXFwRawHeapAllocMap
- @Description Register firmware heap for the specified guest OSID
+ @Description Register firmware heap for the specified driver
@Input psDeviceNode - device node
- @Input ui32OSID - Guest OSID
+ @Input ui32DriverID - Guest driver
@Input sDevPAddr - Heap address
@Input ui64DevPSize - Heap size
******************************************************************************/
PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32DriverID,
IMG_DEV_PHYADDR sDevPAddr,
IMG_UINT64 ui64DevPSize)
{
IMG_CHAR szRegionRAName[RA_MAX_NAME_LENGTH];
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
PVRSRV_MEMALLOCFLAGS_T uiRawFwHeapAllocFlags = (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS |
- PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32OSID));
- PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig,
- PHYS_HEAP_USAGE_FW_MAIN);
+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32DriverID));
+ PHYS_HEAP_CONFIG *psFwHeapConfig = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_SHARED);
PHYS_HEAP_CONFIG sFwHeapConfig;
- PVRSRV_VZ_RET_IF_NOT_MODE(HOST, PVRSRV_OK);
+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
- if (psFwMainConfig == NULL)
+ if (psFwHeapConfig == NULL)
{
PVR_DPF((PVR_DBG_ERROR, "FW_MAIN heap config not found."));
return PVRSRV_ERROR_NOT_SUPPORTED;
}
- OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID);
+ OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID);
if (!ui64DevPSize ||
!sDevPAddr.uiAddr ||
- ui32OSID >= RGX_NUM_OS_SUPPORTED ||
+ ui32DriverID >= RGX_NUM_DRIVERS_SUPPORTED ||
ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE)
{
PVR_DPF((PVR_DBG_ERROR, "Invalid parameters for %s", szRegionRAName));
return PVRSRV_ERROR_INVALID_PARAMS;
}
- sFwHeapConfig = *psFwMainConfig;
+ sFwHeapConfig = *psFwHeapConfig;
sFwHeapConfig.sStartAddr.uiAddr = 0;
sFwHeapConfig.sCardBase.uiAddr = sDevPAddr.uiAddr;
sFwHeapConfig.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE;
+ sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_PREMAP;
- eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, szRegionRAName, &psDeviceNode->apsFWPremapPhysHeap[ui32OSID]);
- PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32OSID);
+ eError = PhysmemCreateHeapLMA(psDeviceNode,
+ RGXPhysHeapGetLMAPolicy(sFwHeapConfig.ui32UsageFlags),
+ &sFwHeapConfig,
+ szRegionRAName,
+ &psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]);
+ PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32DriverID);
- eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32OSID]);
- PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32OSID);
+ eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32DriverID]);
+ PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32DriverID);
- psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID] = psDeviceNode->apsFWPremapPhysHeap[ui32OSID];
+ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32DriverID] = psDeviceNode->apsFWPremapPhysHeap[ui32DriverID];
- PDUMPCOMMENT(psDeviceNode, "Allocate and map raw firmware heap for OSID: [%d]", ui32OSID);
+ PDUMPCOMMENT(psDeviceNode, "Allocate and map raw firmware heap for DriverID: [%d]", ui32DriverID);
-#if (RGX_NUM_OS_SUPPORTED > 1)
+#if (RGX_NUM_DRIVERS_SUPPORTED > 1)
/* don't clear the heap of other guests on allocation */
- uiRawFwHeapAllocFlags &= (ui32OSID > RGXFW_HOST_OS) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL);
+ uiRawFwHeapAllocFlags &= (ui32DriverID > RGXFW_HOST_DRIVER_ID) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL);
#endif
/* if the firmware is already powered up, consider the firmware heaps are pre-mapped. */
if (psDeviceNode->bAutoVzFwIsUp)
{
uiRawFwHeapAllocFlags &= RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp);
- DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE);
+ DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_TRUE);
}
eError = DevmemFwAllocate(psDevInfo,
RGX_FIRMWARE_RAW_HEAP_SIZE,
uiRawFwHeapAllocFlags,
- psDevInfo->psGuestFirmwareRawHeap[ui32OSID]->pszName,
- &psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
+ psDevInfo->psPremappedFwRawHeap[ui32DriverID]->pszName,
+ &psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]);
PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate");
/* Mark this devmem heap as premapped so allocations will not require device mapping. */
- DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE);
+ DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_TRUE);
- if (ui32OSID == RGXFW_HOST_OS)
+ if (ui32DriverID == RGXFW_HOST_DRIVER_ID)
{
/* if the Host's raw fw heap is premapped, mark its main & config sub-heaps accordingly
* No memory allocated from these sub-heaps will be individually mapped into the device's
@Function RGXFwRawHeapUnmapFree
- @Description Unregister firmware heap for the specified guest OSID
+ @Description Unregister firmware heap for the specified guest driver
@Input psDeviceNode - device node
- @Input ui32OSID - Guest OSID
+ @Input ui32DriverID
******************************************************************************/
void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32OSID)
+ IMG_UINT32 ui32DriverID)
{
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
/* remove the premap status, so the heap can be unmapped and freed */
- if (psDevInfo->psGuestFirmwareRawHeap[ui32OSID])
+ if (psDevInfo->psPremappedFwRawHeap[ui32DriverID])
{
- DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_FALSE);
+ DevmemHeapSetPremapStatus(psDevInfo->psPremappedFwRawHeap[ui32DriverID], IMG_FALSE);
}
- if (psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID])
+ if (psDevInfo->psPremappedFwRawMemDesc[ui32DriverID])
{
- DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
- psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID] = NULL;
+ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psPremappedFwRawMemDesc[ui32DriverID]);
+ psDevInfo->psPremappedFwRawMemDesc[ui32DriverID] = NULL;
}
}
PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo)
{
#if defined(NO_HARDWARE) && defined(PDUMP)
- PVR_UNREFERENCED_PARAMETER(psDevInfo);
-
PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
PDUMP_FLAGS_CONTINUOUS, "Halt RISC-V FW");
PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo)
{
#if defined(NO_HARDWARE) && defined(PDUMP)
- PVR_UNREFERENCED_PARAMETER(psDevInfo);
-
PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode,
PDUMP_FLAGS_CONTINUOUS, "Resume RISC-V FW");
*/
static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value)
{
- IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM;
+ void __iomem *pvRegBase = psDevInfo->pvSecureRegsBaseKM;
+ IMG_UINT8 __iomem *pui8RegBase = pvRegBase;
IMG_UINT32 ui32PollValue;
IMG_UINT32 ui32PollMask;
IMG_UINT32 ui32PollRegOffset;
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
{
- ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
- | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN;
- ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
- | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN;
- ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES;
- ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES;
- ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES__RD_EN;
- CHECK_HWBRN_68777(ui32WriteValue);
- ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__META_REGISTER_UNPACKED_ACCESSES;
+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)
+ {
+ ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN;
+ ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN;
+ ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED;
+ ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED;
+ ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__RD_EN;
+ CHECK_HWBRN_68777(ui32WriteValue);
+ ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_GT1_AND_METAREG_UNPACKED;
+ }
+ else
+ {
+ ui32PollValue = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN;
+ ui32PollMask = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN;
+ ui32PollRegOffset = RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED;
+ ui32WriteOffset = RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED;
+ ui32WriteValue = ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED__RD_EN;
+ CHECK_HWBRN_68777(ui32WriteValue);
+ ui32ReadOffset = RGX_CR_META_SP_MSLVDATAX__HOST_SECURITY_V1_AND_METAREG_UNPACKED;
+ }
}
else
{
}
/* Issue the Read */
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32WriteOffset, ui32WriteValue);
- (void)OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32WriteOffset);
+ OSWriteUncheckedHWReg32(pvRegBase, ui32WriteOffset, ui32WriteValue);
+ (void)OSReadUncheckedHWReg32(pvRegBase, ui32WriteOffset);
/* Wait for Slave Port to be Ready: read complete */
if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
}
/* Read the value */
- *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32ReadOffset);
+ *pui32Value = OSReadUncheckedHWReg32(pvRegBase, ui32ReadOffset);
return PVRSRV_OK;
}
*/
static PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value)
{
- IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM;
+ void __iomem *pvRegBase = psDevInfo->pvSecureRegsBaseKM;
+ IMG_UINT8 __iomem *pui8RegBase = pvRegBase;
+
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_REGISTER_UNPACKED_ACCESSES))
{
- /* Wait for Slave Port to be Ready */
- if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
- (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES),
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
- | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
- | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN,
- POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)
{
- return PVRSRV_ERROR_TIMEOUT;
+ /* Wait for Slave Port to be Ready */
+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+ (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED),
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN,
+ POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+ {
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ /* Issue the Write */
+ CHECK_HWBRN_68777(ui32METAAddr);
+ OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, ui32METAAddr);
+ OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, ui32Value);
}
+ else
+ {
+ /* Wait for Slave Port to be Ready */
+ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode,
+ (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED),
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN,
+ POLL_FLAG_LOG_ERROR) != PVRSRV_OK)
+ {
+ return PVRSRV_ERROR_TIMEOUT;
+ }
- /* Issue the Write */
- CHECK_HWBRN_68777(ui32METAAddr);
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0__META_REGISTER_UNPACKED_ACCESSES, ui32METAAddr);
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT__META_REGISTER_UNPACKED_ACCESSES, ui32Value);
+ /* Issue the Write */
+ CHECK_HWBRN_68777(ui32METAAddr);
+ OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0__HOST_SECURITY_V1_AND_METAREG_UNPACKED, ui32METAAddr);
+ OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT__HOST_SECURITY_V1_AND_METAREG_UNPACKED, ui32Value);
+ }
}
else
{
}
/* Issue the Write */
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr);
- (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value);
- (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT); /* Fence write */
+ OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr);
+ (void) OSReadUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */
+ OSWriteUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT, ui32Value);
+ (void) OSReadUncheckedHWReg32(pvRegBase, RGX_CR_META_SP_MSLVDATAT); /* Fence write */
}
return PVRSRV_OK;
MMU_FAULT_DATA sFaultData = {0U};
MMU_CONTEXT *psFwMMUCtx = psDevInfo->psKernelMMUCtx;
IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_BASE & UINT_MAX);
- IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (RGX_NUM_OS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE);
- IMG_UINT32 ui32OSID = (ui32FwVA - ui32FwHeapBase) / RGX_FIRMWARE_RAW_HEAP_SIZE;
+ IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (RGX_NUM_DRIVERS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE);
+ IMG_UINT32 ui32DriverID = (ui32FwVA - ui32FwHeapBase) / RGX_FIRMWARE_RAW_HEAP_SIZE;
IMG_UINT32 ui32HeapId;
PHYS_HEAP *psPhysHeap;
IMG_UINT64 ui64FwDataBaseMask;
IMG_UINT32 ui32FwPageSize = BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT);
IMG_UINT32 ui32PageOffset = (ui32FwVA & (ui32FwPageSize - 1));
- PVR_LOG_GOTO_IF_INVALID_PARAM((ui32OSID < RGX_NUM_OS_SUPPORTED),
+ PVR_LOG_GOTO_IF_INVALID_PARAM((ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED),
eError, ErrorExit);
PVR_LOG_GOTO_IF_INVALID_PARAM(((psCpuPA != NULL) ||
(ui32FwVA < ui32FwHeapEnd)),
eError, ErrorExit);
- ui32HeapId = (ui32OSID == RGXFW_HOST_OS) ?
- PVRSRV_PHYS_HEAP_FW_MAIN : (PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID);
+ ui32HeapId = (ui32DriverID == RGXFW_HOST_DRIVER_ID) ?
+ PVRSRV_PHYS_HEAP_FW_MAIN : (PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32DriverID);
psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[ui32HeapId];
if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
sDevPA.uiAddr = ui32PageOffset + (ui64RawPTE & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK);
/* Only the Host's Firmware heap is present in the Host's CPU IPA space */
- if (ui32OSID == RGXFW_HOST_OS)
+ if (ui32DriverID == RGXFW_HOST_DRIVER_ID)
{
PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPA, &sDevPA);
}
ErrorExit:
return eError;
}
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+/*!
+*******************************************************************************
+@Function RGXIsValidWorkloadEstCCBCommand
+
+@Description Checks if command type can be used for workload estimation
+
+@Input eType Command type to check
+
+
+@Return IMG_BOOL
+******************************************************************************/
+INLINE IMG_BOOL RGXIsValidWorkloadEstCCBCommand(RGXFWIF_CCB_CMD_TYPE eType)
+{
+ switch (eType)
+ {
+ case RGXFWIF_CCB_CMD_TYPE_GEOM:
+ case RGXFWIF_CCB_CMD_TYPE_3D:
+ case RGXFWIF_CCB_CMD_TYPE_CDM:
+ case RGXFWIF_CCB_CMD_TYPE_RAY:
+ case RGXFWIF_CCB_CMD_TYPE_TQ_TDM:
+ return IMG_TRUE;
+ default:
+ PVR_ASSERT(IMG_FALSE);
+ return IMG_FALSE;
+ }
+}
+#endif
+
/******************************************************************************
End of file (rgxfwutils.c)
******************************************************************************/
#include "devicemem_utils.h"
#include "rgxmem.h"
-#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "FwRawOSID%d" /*!< RGX Raw Firmware Heap identifier */
+#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "FwRawDriverID%d" /*!< RGX Raw Firmware Heap identifier */
static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo,
PVRSRV_MEMALLOCFLAGS_T *puiFlags,
DEVMEM_HEAP **ppsFwHeap)
{
- PVRSRV_PHYS_HEAP ePhysHeap = (PVRSRV_PHYS_HEAP)(PVRSRV_PHYS_HEAP)PVRSRV_GET_PHYS_HEAP_HINT(*puiFlags);
+ PVRSRV_PHYS_HEAP ePhysHeap = (PVRSRV_PHYS_HEAP)PVRSRV_GET_PHYS_HEAP_HINT(*puiFlags);
PVRSRV_ERROR eError = PVRSRV_OK;
switch (ePhysHeap)
{
-#if defined(SUPPORT_SECURITY_VALIDATION)
- /* call with GPU_SECURE from RGXSetupFwSysData */
- case PVRSRV_PHYS_HEAP_GPU_SECURE:
-#endif
case PVRSRV_PHYS_HEAP_FW_CODE:
case PVRSRV_PHYS_HEAP_FW_PRIV_DATA:
case PVRSRV_PHYS_HEAP_FW_MAIN:
case PVRSRV_PHYS_HEAP_FW_PREMAP6:
case PVRSRV_PHYS_HEAP_FW_PREMAP7:
{
- IMG_UINT32 ui32OSID = ePhysHeap - PVRSRV_PHYS_HEAP_FW_PREMAP0;
+ IMG_UINT32 ui32DriverID = ePhysHeap - PVRSRV_PHYS_HEAP_FW_PREMAP0;
- PVR_LOG_RETURN_IF_INVALID_PARAM(ui32OSID < RGX_NUM_OS_SUPPORTED, "ui32OSID");
- *ppsFwHeap = psDevInfo->psGuestFirmwareRawHeap[ui32OSID];
+ PVR_LOG_RETURN_IF_INVALID_PARAM(ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED, "ui32DriverID");
+ *ppsFwHeap = psDevInfo->psPremappedFwRawHeap[ui32DriverID];
break;
}
default:
PVR_DPF_RETURN_RC(eError);
}
- uiAlign = (psFwHeap == psDevInfo->psFirmwareConfigHeap) ?
- (RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY) :
- (GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)));
+ if (psFwHeap == psDevInfo->psFirmwareConfigHeap)
+ {
+ /*
+ * All structures allocated from the Firmware Config subheap must start at the same pre-determined
+ * offsets, regardless of the system's page size (e.g. 4k,16k,64k). The alignment requirement is
+ * satisfied for virtual addresses during the mapping stage. Physical allocations do not take
+ * alignment into consideration.
+ * VZ drivers usually preallocate and premap the entire Firmware heap range. Any allocations from
+ * this heap are physical alloc only, having their device VAs derived from their PAs. This makes
+ * it impossible to fulfil alignment requirements.
+ * To work around this limitation, allocation sizes are rounded to the nearest multiple of 64kb,
+ * regardless of the actual size of object.
+ */
+ uiAlign = RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY;
+
+ uiSize = PVR_ALIGN(uiSize, RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY);
+ }
+ else
+ {
+ uiAlign = (GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)));
+ }
eError = DevmemAllocateAndMap(psFwHeap,
uiSize,
is set
*/
eError = DevmemMapToDevice(*ppsMemDescPtr,
- psDevInfo->psFirmwareMainHeap,
+ psFwHeap,
&sTmpDevVAddr);
if (eError != PVRSRV_OK)
{
static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
eError = DevmemAllocateSparse(psDevInfo->psDeviceNode,
uiSize,
- uiChunkSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
pui32MappingTable,
IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
-#if defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS)
-IMG_BOOL RGXPowmonBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
-PVRSRV_ERROR RGXPowmonBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo);
-#endif
-
#if defined(SUPPORT_TBI_INTERFACE)
IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo);
IMG_UINT32 *pui32USRMNumRegions,
IMG_UINT64 *pui64UVBRMNumRegions,
RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+ IMG_BOOL bSPUClockGating,
FW_PERF_CONF eFirmwarePerf,
IMG_UINT32 ui32KCCBSizeLog2,
IMG_UINT32 ui32AvailableSPUMask,
@Input ui32CCBAllocSizeLog2 Size of the CCB for this context
@Input ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context
@Input ui32ContextFlags Flags which specify properties of the context
-@Input ui32Priority Priority of the context
+@Input i32Priority Priority of the context
@Input ui32MaxDeadlineMS Max deadline limit in MS that the workload can run
@Input ui64RobustnessAddress Address for FW to signal a context reset
@Input psInfo Structure that contains extra info
IMG_UINT32 ui32CCBAllocSizeLog2,
IMG_UINT32 ui32CCBMaxAllocSizeLog2,
IMG_UINT32 ui32ContextFlags,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32MaxDeadlineMS,
IMG_UINT64 ui64RobustnessAddress,
RGX_COMMON_CONTEXT_INFO *psInfo,
SERVER_MMU_CONTEXT *psServerMMUContext,
PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr);
+PRGXFWIF_FWCOMMONCONTEXT RGXGetFWCommonContextAddrFromServerCommonCtx(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DLLIST_NODE *psNode);
+
PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
IMG_UINT32 ui32ContextFlags);
PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
CONNECTION_DATA *psConnection,
PVRSRV_RGXDEV_INFO *psDevInfo,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
RGXFWIF_DM eDM);
/*!
PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
IMG_UINT32 ui32HCSDeadlineMs);
-/*!
-*******************************************************************************
-@Function RGXFWChangeOSidPriority
-
-@Description Requests the Firmware to change the priority of an operating
- system. Higher priority number equals higher priority on the
- scheduling system.
-
-@Input psDevInfo pointer to device info
-@Input ui32OSid The OSid whose priority is to be altered
-@Input ui32Priority The new priority number for the specified OSid
-
-@Return PVRSRV_ERROR
-******************************************************************************/
-PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
- IMG_UINT32 ui32OSid,
- IMG_UINT32 ui32Priority);
-
/*!
*******************************************************************************
@Function RGXFWHealthCheckCmd
@Description Requests the Firmware to change the guest OS Online states.
This should be initiated by the VMM when a guest VM comes
online or goes offline. If offline, the FW offloads any current
- resource from that OSID. The request is repeated until the FW
- has had time to free all the resources or has waited for
+ resource from that DriverID. The request is repeated until the
+ FW has had time to free all the resources or has waited for
workloads to finish.
@Input psDevInfo pointer to device info
-@Input ui32OSid The Guest OSid whose state is being altered
+@Input ui32DriverID The driver whose state is being altered
@Input eOSOnlineState The new state (Online or Offline)
@Return PVRSRV_ERROR
******************************************************************************/
PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo,
- IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32DriverID,
RGXFWIF_OS_STATE_CHANGE eOSOnlineState);
#if defined(SUPPORT_AUTOVZ)
PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
IMG_BOOL bCheckAfterTimePassed);
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM);
+
#if defined(SUPPORT_AUTOVZ)
/*!
*******************************************************************************
void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode);
#endif /* SUPPORT_AUTOVZ */
-PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM);
-
void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
void *pvDumpDebugFile,
Otherwise, a PVRSRV error code
******************************************************************************/
PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32DriverID,
IMG_DEV_PHYADDR sDevPAddr,
IMG_UINT64 ui64DevPSize);
******************************************************************************/
void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32OSID);
+ IMG_UINT32 ui32DriverID);
/*!
*******************************************************************************
IMG_DEV_PHYADDR *psDevPA,
IMG_UINT64 *pui64RawPTE);
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+/*!
+*******************************************************************************
+@Function RGXIsValidWorkloadEstCCBCommand
+
+@Description Checks if command type can be used for workload estimation
+
+@Input eType Command type to check
+
+@Return IMG_BOOL
+******************************************************************************/
+INLINE IMG_BOOL RGXIsValidWorkloadEstCCBCommand(RGXFWIF_CCB_CMD_TYPE eType);
+
+#endif
+
+/*!
+*******************************************************************************
+@Function RGXFWInjectFault
+
+@Description Injecting firmware fault to validate recovery through Host
+
+@Input psDevInfo Pointer to device info
+
+@Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXFWInjectFault(PVRSRV_RGXDEV_INFO *psDevInfo);
+
#if defined(SUPPORT_AUTOVZ_HW_REGS) && !defined(SUPPORT_AUTOVZ)
#error "VZ build configuration error: use of OS scratch registers supported only in AutoVz drivers."
#endif
#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken, val)
#endif /* defined(SUPPORT_AUTOVZ) */
-#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1)))
+#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED == 1)))
/* native, static-vz and AutoVz using shared memory */
#define KM_GET_FW_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionFwState)
#define KM_GET_OS_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState)
#define KM_GET_FW_CONNECTION(psDevInfo) (RGXFW_CONNECTION_FW_ACTIVE)
#define KM_GET_OS_CONNECTION(psDevInfo) (RGXFW_CONNECTION_OS_ACTIVE)
#define KM_SET_OS_CONNECTION(val, psDevInfo)
-#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_OS_SUPPORTED == 1) */
+#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED == 1) */
#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */
-#if defined(SUPPORT_AUTOVZ)
-#define RGX_FIRST_RAW_HEAP_OSID RGXFW_HOST_OS
+#if defined(RGX_PREMAP_FW_HEAPS)
+#define RGX_FIRST_RAW_HEAP_DRIVER_ID RGXFW_HOST_DRIVER_ID
#else
-#define RGX_FIRST_RAW_HEAP_OSID RGXFW_GUEST_OSID_START
+#define RGX_FIRST_RAW_HEAP_DRIVER_ID RGXFW_GUEST_DRIVER_ID_START
#endif
#define KM_OS_CONNECTION_IS(val, psDevInfo) (KM_GET_OS_CONNECTION(psDevInfo) == RGXFW_CONNECTION_OS_##val)
}
#ifdef SUPPORT_WORKLOAD_ESTIMATION
- /* Not a part of BVNC feature line and so doesn't need the feature supported check */
- psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION;
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Not a part of BVNC feature line and so doesn't need the feature supported check */
+ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION;
+ }
#endif
/* Define the HW counter block counts. */
OSDeviceMemSet(&psBlocks[uiCount], 0, (RGX_HWPERF_MAX_BVNC_BLOCK_LEN - uiCount) * sizeof(*psBlocks));
}
+ /* The GPU core count is overwritten by the FW */
+ psBVNC->ui16BvncGPUCores = 0;
+
return PVRSRV_OK;
}
return PVRSRV_OK;
}
+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCounters(PVRSRV_DEVICE_NODE *psDevNode,
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl,
+ IMG_UINT32 ui32BlockID,
+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_RETURN_IF_FALSE(psDevNode != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_RETURN_IF_FALSE(psHWPerfCtl != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_RETURN_IF_FALSE(psConfiguredCounters != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+
+ if ((ui32BlockID & ~RGX_CNTBLK_ID_UNIT_ALL_MASK) < RGX_CNTBLK_ID_LAST)
+ {
+ RGXFWIF_HWPERF_CTL_BLK *psBlock = NULL;
+ RGX_HWPERF_CONFIG_CNTBLK sBlockConfig;
+
+ PVR_RETURN_IF_ERROR(PVRSRVPowerLock(psDevNode));
+
+ for (i = 0; i < psHWPerfCtl->ui32NumBlocks; i++)
+ {
+ if (psHWPerfCtl->sBlkCfg[i].uiBlockID != ui32BlockID)
+ {
+ continue;
+ }
+ else if (psHWPerfCtl->sBlkCfg[i].uiEnabled == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Block (0x%04x) is not enabled.", ui32BlockID));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error);
+ }
+
+ psBlock = &psHWPerfCtl->sBlkCfg[i];
+ break;
+ }
+
+ if (psBlock == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Block ID (0x%04x) was invalid.", ui32BlockID));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, Error);
+ }
+
+ sBlockConfig.ui16BlockID = psBlock->uiBlockID;
+ sBlockConfig.ui16NumCounters = psBlock->uiNumCounters;
+
+ for (i = 0; i < psBlock->uiNumCounters; i++)
+ {
+ sBlockConfig.ui16Counters[i] = psBlock->aui32CounterCfg[i];
+ }
+
+ *psConfiguredCounters = sBlockConfig;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Block ID (0x%04x) was invalid.", ui32BlockID));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, InvalidIDError);
+ }
+
+Error:
+ PVRSRVPowerUnlock(psDevNode);
+
+InvalidIDError:
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocks(PVRSRV_DEVICE_NODE *psDevNode,
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl,
+ IMG_UINT32 ui32ArrayLen,
+ IMG_UINT32 *pui32BlockCount,
+ IMG_UINT32 *pui32EnabledBlockIDs)
+{
+ IMG_UINT32 ui32LastIDIdx = 0;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_RETURN_IF_FALSE(psDevNode != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_RETURN_IF_FALSE(psHWPerfCtl != NULL, PVRSRV_ERROR_INVALID_PARAMS);
+
+ *pui32BlockCount = 0;
+
+ if (ui32ArrayLen > 0 && pui32EnabledBlockIDs == NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "ui32ArrayLen is greater than 0 but pui32EnabledBlockIDs is NULL"));
+ }
+
+ PVR_RETURN_IF_ERROR(PVRSRVPowerLock(psDevNode));
+
+ for (i = 0; i < psHWPerfCtl->ui32NumBlocks; i++)
+ {
+ if (psHWPerfCtl->sBlkCfg[i].uiEnabled)
+ {
+ *pui32BlockCount += 1;
+
+ if (pui32EnabledBlockIDs == NULL)
+ {
+ continue;
+ }
+
+ if (ui32LastIDIdx + 1 > ui32ArrayLen)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ui32ArrayLen less than the number of enabled blocks."));
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, Error);
+ }
+
+ pui32EnabledBlockIDs[ui32LastIDIdx] = psHWPerfCtl->sBlkCfg[i].uiBlockID;
+ ui32LastIDIdx += 1;
+ }
+ }
+
+Error:
+ PVRSRVPowerUnlock(psDevNode);
+ return eError;
+}
+
/******************************************************************************
End of file (rgxhwperf.c)
******************************************************************************/
IMG_UINT32 ui32ArrayLen,
RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs);
+PVRSRV_ERROR PVRSRVRGXGetConfiguredHWPerfCounters(PVRSRV_DEVICE_NODE *psDevNode,
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl,
+ IMG_UINT32 ui32BlockID,
+ RGX_HWPERF_CONFIG_CNTBLK *psConfiguredCounters);
+
+PVRSRV_ERROR PVRSRVRGXGetEnabledHWPerfBlocks(PVRSRV_DEVICE_NODE *psDevNode,
+ RGXFWIF_HWPERF_CTL *psHWPerfCtl,
+ IMG_UINT32 ui32ArrayLength,
+ IMG_UINT32 *pui32BlockCount,
+ IMG_UINT32 *pui32EnabledBlockIDs);
+
#endif /* RGXHWPERF_H_ */
static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode);
static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode);
-#if (RGX_NUM_OS_SUPPORTED > 1)
-static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid);
+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1)
+static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32DriverID);
static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap);
-#endif
+#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */
/* Services internal heap identification used in this file only */
#define RGX_FIRMWARE_MAIN_HEAP_IDENT "FwMain" /*!< RGX Main Firmware Heap identifier */
RGXFWIF_GPU_UTIL_STATS *psReturnStats)
{
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
- volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ const volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+ IMG_UINT64 (*paaui64DMOSTmpCounters)[RGX_NUM_DRIVERS_SUPPORTED][RGXFWIF_GPU_UTIL_STATE_NUM];
+ IMG_UINT64 (*paui64DMOSTmpLastWord)[RGX_NUM_DRIVERS_SUPPORTED];
+ IMG_UINT64 (*paui64DMOSTmpLastState)[RGX_NUM_DRIVERS_SUPPORTED];
+ IMG_UINT64 (*paui64DMOSTmpLastPeriod)[RGX_NUM_DRIVERS_SUPPORTED];
+ IMG_UINT64 (*paui64DMOSTmpLastTime)[RGX_NUM_DRIVERS_SUPPORTED];
IMG_UINT64 ui64TimeNow;
IMG_UINT32 ui32Attempts;
IMG_UINT32 ui32Remainder;
+ IMG_UINT32 ui32DriverID;
+ IMG_UINT32 ui32MaxDMCount;
+ RGXFWIF_DM eDM;
/***** (1) Initialise return stats *****/
psReturnStats->ui64GpuStatBlocked = 0;
psReturnStats->ui64GpuStatCumulative = 0;
+ memset(psReturnStats->aaui64DMOSStatIdle, 0, sizeof(psReturnStats->aaui64DMOSStatIdle));
+ memset(psReturnStats->aaui64DMOSStatActive, 0, sizeof(psReturnStats->aaui64DMOSStatActive));
+ memset(psReturnStats->aaui64DMOSStatBlocked, 0, sizeof(psReturnStats->aaui64DMOSStatBlocked));
+ memset(psReturnStats->aaui64DMOSStatCumulative, 0, sizeof(psReturnStats->aaui64DMOSStatCumulative));
+
if (hGpuUtilUser == NULL)
{
return PVRSRV_ERROR_INVALID_PARAMS;
}
psAggregateStats = hGpuUtilUser;
+ ui32MaxDMCount = psDevInfo->sDevFeatureCfg.ui32MAXDMCount;
+
+ /* Allocate temporary counters used in the attempts loop */
+ paaui64DMOSTmpCounters = OSAllocMem(sizeof(*paaui64DMOSTmpCounters) * ui32MaxDMCount);
+ PVR_LOG_GOTO_IF_FALSE(paaui64DMOSTmpCounters != NULL, "OSAllocMem:1", failTmpCountersAlloc);
+ paui64DMOSTmpLastWord = OSAllocMem(sizeof(*paui64DMOSTmpLastWord) * ui32MaxDMCount);
+ PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastWord != NULL, "OSAllocMem:2", failTmpLastWordAlloc);
+ paui64DMOSTmpLastState = OSAllocMem(sizeof(*paui64DMOSTmpLastState) * ui32MaxDMCount);
+ PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastState != NULL, "OSAllocMem:3", failTmpLastStateAlloc);
+ paui64DMOSTmpLastPeriod = OSAllocMem(sizeof(*paui64DMOSTmpLastPeriod) * ui32MaxDMCount);
+ PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastPeriod != NULL, "OSAllocMem:4", failTmpLastPeriodAlloc);
+ paui64DMOSTmpLastTime = OSAllocMem(sizeof(*paui64DMOSTmpLastTime) * ui32MaxDMCount);
+ PVR_LOG_GOTO_IF_FALSE(paui64DMOSTmpLastTime != NULL, "OSAllocMem:5", failTmpLastTimeAlloc);
/* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */
for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++)
{
- IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0};
- IMG_UINT64 ui64LastPeriod = 0, ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0;
+ const volatile IMG_UINT64 *pui64GpuStatsCounters = &psUtilFWCb->aui64GpuStatsCounters[0];
+ const volatile IMG_UINT64 (*paui64DMOSLastWord)[RGXFW_MAX_NUM_OSIDS] = &psUtilFWCb->aaui64DMOSLastWord[0];
+ const volatile IMG_UINT64 (*paaui64DMOSStatsCounters)[RGXFW_MAX_NUM_OSIDS][RGXFWIF_GPU_UTIL_STATE_NUM] = &psUtilFWCb->aaaui64DMOSStatsCounters[0];
+
+ IMG_UINT64 aui64GpuTmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0};
+ IMG_UINT64 ui64GpuLastPeriod = 0, ui64GpuLastWord = 0, ui64GpuLastState = 0, ui64GpuLastTime = 0;
IMG_UINT32 i = 0;
* First attempt at detecting if the FW is in the middle of an update.
* This should also help if the FW is in the middle of a 64 bit variable update.
*/
- while (((ui64LastWord != psUtilFWCb->ui64LastWord) ||
- (aui64TmpCounters[ui64LastState] !=
- psUtilFWCb->aui64StatsCounters[ui64LastState])) &&
+ while (((ui64GpuLastWord != psUtilFWCb->ui64GpuLastWord) ||
+ (aui64GpuTmpCounters[ui64GpuLastState] !=
+ pui64GpuStatsCounters[ui64GpuLastState])) &&
(i < MAX_ITERATIONS))
{
- ui64LastWord = psUtilFWCb->ui64LastWord;
- ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord);
- aui64TmpCounters[GPU_IDLE] = psUtilFWCb->aui64StatsCounters[GPU_IDLE];
- aui64TmpCounters[GPU_ACTIVE] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE];
- aui64TmpCounters[GPU_BLOCKED] = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED];
+ ui64GpuLastWord = psUtilFWCb->ui64GpuLastWord;
+ ui64GpuLastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64GpuLastWord);
+ aui64GpuTmpCounters[GPU_IDLE] = pui64GpuStatsCounters[GPU_IDLE];
+ aui64GpuTmpCounters[GPU_ACTIVE] = pui64GpuStatsCounters[GPU_ACTIVE];
+ aui64GpuTmpCounters[GPU_BLOCKED] = pui64GpuStatsCounters[GPU_BLOCKED];
+
+ for (eDM = 0; eDM < ui32MaxDMCount; eDM++)
+ {
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ paui64DMOSTmpLastWord[eDM][ui32DriverID] = paui64DMOSLastWord[eDM][ui32DriverID];
+ paui64DMOSTmpLastState[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_STATE(paui64DMOSTmpLastWord[eDM][ui32DriverID]);
+ paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_IDLE] = paaui64DMOSStatsCounters[eDM][ui32DriverID][GPU_IDLE];
+ paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_ACTIVE] = paaui64DMOSStatsCounters[eDM][ui32DriverID][GPU_ACTIVE];
+ paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_BLOCKED] = paaui64DMOSStatsCounters[eDM][ui32DriverID][GPU_BLOCKED];
+ }
+ }
+
i++;
}
{
PVR_DPF((PVR_DBG_WARNING,
"RGXGetGpuUtilStats could not get reliable data after trying %u times", i));
+
+ OSFreeMem(paaui64DMOSTmpCounters);
+ OSFreeMem(paui64DMOSTmpLastWord);
+ OSFreeMem(paui64DMOSTmpLastState);
+ OSFreeMem(paui64DMOSTmpLastPeriod);
+ OSFreeMem(paui64DMOSTmpLastTime);
+
return PVRSRV_ERROR_TIMEOUT;
}
/* Update temp counters to account for the time since the last update to the shared ones */
OSMemoryBarrier(NULL); /* Ensure the current time is read after the loop above */
ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDeviceNode));
- ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord);
- ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
- aui64TmpCounters[ui64LastState] += ui64LastPeriod;
+
+ ui64GpuLastTime = RGXFWIF_GPU_UTIL_GET_TIME(ui64GpuLastWord);
+ ui64GpuLastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64GpuLastTime);
+ aui64GpuTmpCounters[ui64GpuLastState] += ui64GpuLastPeriod;
/* Get statistics for a user since its last request */
- psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE],
+ psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64GpuTmpCounters[GPU_IDLE],
psAggregateStats->ui64GpuStatIdle);
- psReturnStats->ui64GpuStatActive = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE],
+ psReturnStats->ui64GpuStatActive = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64GpuTmpCounters[GPU_ACTIVE],
psAggregateStats->ui64GpuStatActive);
- psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED],
+ psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64GpuTmpCounters[GPU_BLOCKED],
psAggregateStats->ui64GpuStatBlocked);
psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatIdle +
psReturnStats->ui64GpuStatActive +
psReturnStats->ui64GpuStatBlocked;
+ for (eDM = 0; eDM < ui32MaxDMCount; eDM++)
+ {
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ paui64DMOSTmpLastTime[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_TIME(paui64DMOSTmpLastWord[eDM][ui32DriverID]);
+ paui64DMOSTmpLastPeriod[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, paui64DMOSTmpLastTime[eDM][ui32DriverID]);
+ paaui64DMOSTmpCounters[eDM][ui32DriverID][paui64DMOSTmpLastState[eDM][ui32DriverID]] += paui64DMOSTmpLastPeriod[eDM][ui32DriverID];
+
+ /* Get statistics for a user since its last request */
+ psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_IDLE],
+ psAggregateStats->aaui64DMOSStatIdle[eDM][ui32DriverID]);
+ psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_ACTIVE],
+ psAggregateStats->aaui64DMOSStatActive[eDM][ui32DriverID]);
+ psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_GET_PERIOD(paaui64DMOSTmpCounters[eDM][ui32DriverID][GPU_BLOCKED],
+ psAggregateStats->aaui64DMOSStatBlocked[eDM][ui32DriverID]);
+ psReturnStats->aaui64DMOSStatCumulative[eDM][ui32DriverID] = psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID] +
+ psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID] +
+ psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID];
+ }
+ }
+
if (psAggregateStats->ui64TimeStamp != 0)
{
IMG_UINT64 ui64TimeSinceLastCall = ui64TimeNow - psAggregateStats->ui64TimeStamp;
break;
}
-
/***** (4) Update aggregate stats for the current user *****/
psAggregateStats->ui64GpuStatIdle += psReturnStats->ui64GpuStatIdle;
psAggregateStats->ui64GpuStatBlocked += psReturnStats->ui64GpuStatBlocked;
psAggregateStats->ui64TimeStamp = ui64TimeNow;
+ for (eDM = 0; eDM < ui32MaxDMCount; eDM++)
+ {
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ psAggregateStats->aaui64DMOSStatIdle[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID];
+ psAggregateStats->aaui64DMOSStatActive[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID];
+ psAggregateStats->aaui64DMOSStatBlocked[eDM][ui32DriverID] += psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID];
+ }
+ }
/***** (5) Convert return stats to microseconds *****/
psReturnStats->ui64GpuStatBlocked = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder);
psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder);
+ for (eDM = 0; eDM < ui32MaxDMCount; eDM++)
+ {
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID] = OSDivide64(psReturnStats->aaui64DMOSStatIdle[eDM][ui32DriverID], 1000, &ui32Remainder);
+ psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID] = OSDivide64(psReturnStats->aaui64DMOSStatActive[eDM][ui32DriverID], 1000, &ui32Remainder);
+ psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID] = OSDivide64(psReturnStats->aaui64DMOSStatBlocked[eDM][ui32DriverID], 1000, &ui32Remainder);
+ psReturnStats->aaui64DMOSStatCumulative[eDM][ui32DriverID] = OSDivide64(psReturnStats->aaui64DMOSStatCumulative[eDM][ui32DriverID], 1000, &ui32Remainder);
+ }
+ }
+
+ OSFreeMem(paui64DMOSTmpLastTime);
+failTmpLastTimeAlloc:
+ OSFreeMem(paui64DMOSTmpLastPeriod);
+failTmpLastPeriodAlloc:
+ OSFreeMem(paui64DMOSTmpLastState);
+failTmpLastStateAlloc:
+ OSFreeMem(paui64DMOSTmpLastWord);
+failTmpLastWordAlloc:
+ OSFreeMem(paaui64DMOSTmpCounters);
+
+failTmpCountersAlloc:
/* Check that the return stats make sense */
if (psReturnStats->ui64GpuStatCumulative == 0)
{
- /* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD
- * returned 0. This could happen if the GPU frequency value
+ /* We can enter here only if allocating the temporary stats
+ * buffers failed, or all the RGXFWIF_GPU_UTIL_GET_PERIOD
+ * returned 0. The latter could happen if the GPU frequency value
* is not well calibrated and the FW is updating the GPU state
* while the Host is reading it.
* When such an event happens frequently, timers or the aggregate
/* NoStats used since this may be called outside of the register/de-register
* process calls which track memory use. */
- psAggregateStats = OSAllocMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS));
+ psAggregateStats = OSAllocZMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS));
if (psAggregateStats == NULL)
{
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
- psAggregateStats->ui64GpuStatIdle = 0;
- psAggregateStats->ui64GpuStatActive = 0;
- psAggregateStats->ui64GpuStatBlocked = 0;
- psAggregateStats->ui64TimeStamp = 0;
-
- /* Not used */
- psAggregateStats->bValid = IMG_FALSE;
- psAggregateStats->ui64GpuStatCumulative = 0;
-
*phGpuUtilUser = psAggregateStats;
return PVRSRV_OK;
RGXTimeCorrRestartPeriodic(psDeviceNode);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Process Workload Estimation Specific commands from the FW */
- WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Process Workload Estimation Specific commands from the FW */
+ WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice);
+ }
#endif
if (psDevInfo->pvAPMISRData == NULL)
* that is not used for mapping.
* To program the Device's BIF with the correct PC address, use the base
* address of the carveout reserved for MMU mappings as Kernel MMU PC Address */
-#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR)
- sKernelMMUCtxPCAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR;
-#else
- PHYS_HEAP_CONFIG *psFwHeapCfg = FindPhysHeapConfig(psDevConfig,
- PHYS_HEAP_USAGE_FW_MAIN);
- eError = (psFwHeapCfg != NULL) ? PVRSRV_OK : PVRSRV_ERROR_PHYSHEAP_CONFIG;
- PVR_LOG_RETURN_IF_ERROR(eError, "FindPhysHeapConfig(PHYS_HEAP_USAGE_FW_MAIN)");
-
- sKernelMMUCtxPCAddr.uiAddr = psFwHeapCfg->sCardBase.uiAddr +
- (RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED);
-#endif /* PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR */
+ IMG_DEV_PHYADDR sDevPAddr;
+ PHYS_HEAP *psFwPageTableHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT];
+
+ PVR_LOG_RETURN_IF_FALSE((NULL != psFwPageTableHeap),
+ "Firmware Page Table heap not defined.",
+ PVRSRV_ERROR_INVALID_HEAP);
+
+ PhysHeapGetDevPAddr(psFwPageTableHeap, &sDevPAddr);
+ sKernelMMUCtxPCAddr.uiAddr = sDevPAddr.uiAddr;
}
else
{
value NO_COHERENCY 0x0 {SoC does not support any form of Coherency}
value ACE_LITE_COHERENCY 0x1 {SoC supports ACE-Lite or I/O Coherency}
value FULL_ACE_COHERENCY 0x2 {SoC supports full ACE or 2-Way Coherency} */
- ui32DeviceFabricCoherency = OSReadHWReg32(pvRegsBaseKM, RGX_CR_SOC_AXI);
+ ui32DeviceFabricCoherency = OSReadHWReg32((void __iomem *)pvRegsBaseKM, RGX_CR_SOC_AXI);
PVR_LOG(("AXI fabric coherency (RGX_CR_SOC_AXI): 0x%x", ui32DeviceFabricCoherency));
#if defined(DEBUG)
if (ui32DeviceFabricCoherency & ~((IMG_UINT32)RGX_CR_SOC_AXI_MASKFULL))
IMG_UINT32 ui32FBCDCVersionOverride = 0;
#endif
- {
-
#if defined(SUPPORT_VALIDATION)
+ void *pvAppHintState = NULL;
+
+ IMG_UINT32 ui32AppHintDefault;
+
+ OSCreateKMAppHintState(&pvAppHintState);
+ ui32AppHintDefault = PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE;
+ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FBCDCVersionOverride,
+ &ui32AppHintDefault, &ui32FBCDCVersionOverride);
+ OSFreeKMAppHintState(pvAppHintState);
+
+ if (ui32FBCDCVersionOverride > 0)
+ {
if (ui32FBCDCVersionOverride == 2)
{
- PVR_DPF((PVR_DBG_WARNING,
- "%s: FBCDCVersionOverride forces FBC3.1 but this core doesn't support it!",
- __func__));
+ return IMG_TRUE;
}
+ }
+ else
#endif
-
-#if !defined(NO_HARDWARE)
+ {
if (psDeviceNode->psDevConfig->bHasFBCDCVersion31)
{
- PVR_DPF((PVR_DBG_ERROR,
- "%s: System uses FBCDC3.1 but GPU doesn't support it!",
- __func__));
+ return IMG_TRUE;
}
-#endif
}
return IMG_FALSE;
}
+/*
+ RGXGetTFBCLossyGroup
+*/
+static IMG_UINT32 RGXGetTFBCLossyGroup(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ /* Volcanic cores don't use TFBC */
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ return 0;
+}
+
/*
RGXDevMMUAttributes
*/
static MMU_DEVICEATTRIBS *RGXDevMMUAttributes(PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_BOOL bKernelMemoryCtx)
+ IMG_BOOL bKernelFWMemoryCtx)
{
MMU_DEVICEATTRIBS *psMMUDevAttrs = NULL;
/* bKernelMemoryCtx is only used for rogue cores */
- PVR_UNREFERENCED_PARAMETER(bKernelMemoryCtx);
+ PVR_UNREFERENCED_PARAMETER(bKernelFWMemoryCtx);
if (psDeviceNode->pfnCheckDeviceFeature)
{
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Initialise work estimation lock */
- eError = OSLockCreate(&psDevInfo->hWorkEstLock);
- PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Initialise work estimation lock */
+ eError = OSLockCreate(&psDevInfo->hWorkEstLock);
+ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit);
+ }
#endif
/* Initialise lists of ZSBuffers */
IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) ||
(eActivePMConf == RGX_ACTIVEPM_FORCE_ON);
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ)
+ /* The AutoVz driver enables a virtualisation watchdog not compatible with APM */
if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE)))
{
- PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in virtualization mode", __func__));
+ PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in AutoVz mode", __func__));
bEnableAPM = IMG_FALSE;
}
-#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ)
- /* The AutoVz driver enable a virtualisation watchdog not compatible with APM */
PVR_ASSERT(bEnableAPM == IMG_FALSE);
#endif
eError = PVRSRVTQLoadShaders(psDeviceNode);
PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVTQLoadShaders", ErrorExit);
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+ eError = OSAllocateSecBuf(psDeviceNode, RGXFWIF_KM_GENERAL_HEAP_TOTAL_BYTES, "SharedSecMem", &psDevInfo->psGenHeapSecMem);
+ PVR_LOG_GOTO_IF_ERROR(eError, "OSAllocateSecBuf", ErrorExit);
+#endif
+
psDevInfo->bDevInit2Done = IMG_TRUE;
return PVRSRV_OK;
PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
PVRSRV_ERROR eError;
-#if defined(SUPPORT_AUTOVZ)
+#if defined(RGX_PREMAP_FW_HEAPS)
PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap;
- if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp))
+ if ((!PVRSRV_VZ_MODE_IS(GUEST)) && (!psDeviceNode->bAutoVzFwIsUp))
{
+ PHYS_HEAP *psFwPageTableHeap =
+ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT];
+
+ PVR_LOG_GOTO_IF_INVALID_PARAM((psFwPageTableHeap != NULL),
+ eError, failed_to_create_ctx);
+
/* Temporarily swap the MMU and default GPU physheap to allow the page
* tables of all memory mapped by the FwKernel context to be placed
* in a dedicated memory carveout. This should allow the firmware mappings to
* persist after a Host kernel crash or driver reset. */
-
- psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap;
+ psDeviceNode->psMMUPhysHeap = psFwPageTableHeap;
}
#endif
psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
/* Create the memory context for the firmware. */
- eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_META,
+ eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_FORFW,
&psDevInfo->psKernelDevmemCtx);
if (eError != PVRSRV_OK)
{
goto failed_to_find_heap;
}
-#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
- if (PVRSRV_VZ_MODE_IS(HOST))
+#if (defined(RGX_PREMAP_FW_HEAPS)) || (defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1))
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- IMG_UINT32 ui32OSID;
- for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+ IMG_UINT32 ui32DriverID;
+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID)
{
IMG_CHAR szHeapName[RA_MAX_NAME_LENGTH];
- OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID);
+ OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID);
eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName,
- &psDevInfo->psGuestFirmwareRawHeap[ui32OSID]);
+ &psDevInfo->psPremappedFwRawHeap[ui32DriverID]);
PVR_LOG_GOTO_IF_ERROR(eError, "DevmemFindHeapByName", failed_to_find_heap);
}
}
#endif
-#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
- if (PVRSRV_VZ_MODE_IS(HOST))
+#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
IMG_DEV_PHYADDR sPhysHeapBase;
- IMG_UINT32 ui32OSID;
+ IMG_UINT32 ui32DriverID;
- eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN], &sPhysHeapBase);
+ eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM], &sPhysHeapBase);
PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", failed_to_find_heap);
- for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID)
{
- IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)};
+ IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32DriverID * RGX_FIRMWARE_RAW_HEAP_SIZE)};
eError = RGXFwRawHeapAllocMap(psDeviceNode,
- ui32OSID,
+ ui32DriverID,
sRawFwHeapBase,
RGX_FIRMWARE_RAW_HEAP_SIZE);
if (eError != PVRSRV_OK)
{
- for (; ui32OSID > RGX_FIRST_RAW_HEAP_OSID; ui32OSID--)
+ for (; ui32DriverID > RGX_FIRST_RAW_HEAP_DRIVER_ID; ui32DriverID--)
{
- RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID);
+ RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID);
}
PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", failed_to_find_heap);
}
}
-#if defined(SUPPORT_AUTOVZ)
+#if defined(RGX_PREMAP_FW_HEAPS)
/* restore default Px setup */
psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap;
#endif
}
-#else
+#endif /* defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
+
+#if !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+ /* On setups with dynamically mapped Guest heaps, the Guest makes
+ * a PVZ call to the Host to request the mapping during init. */
+
if (PVRSRV_VZ_MODE_IS(GUEST))
{
eError = PvzClientMapDevPhysHeap(psDeviceNode->psDevConfig);
PVR_LOG_GOTO_IF_ERROR(eError, "PvzClientMapDevPhysHeap", failed_to_find_heap);
}
-#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
+#endif /* !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
if (PVRSRV_VZ_MODE_IS(GUEST))
{
{
PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
PVRSRV_ERROR eError;
+#if defined(RGX_PREMAP_FW_HEAPS)
+ PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap;
+#endif
-#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
- if (PVRSRV_VZ_MODE_IS(HOST))
+#if defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
-#if defined(SUPPORT_AUTOVZ)
- PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap;
-
- psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap;
+#if defined(RGX_PREMAP_FW_HEAPS)
+ psDeviceNode->psMMUPhysHeap =
+ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT];
if (!psDeviceNode->bAutoVzFwIsUp)
#endif
{
- IMG_UINT32 ui32OSID;
+ IMG_UINT32 ui32DriverID;
- for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++)
+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID)
{
- RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID);
+ RGXFwRawHeapUnmapFree(psDeviceNode, ui32DriverID);
}
}
-#if defined(SUPPORT_AUTOVZ)
- psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap;
-#endif
}
-#else
+#endif /* defined(RGX_PREMAP_FW_HEAPS) || defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
+
+#if !defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
if (PVRSRV_VZ_MODE_IS(GUEST))
{
(void) PvzClientUnmapDevPhysHeap(psDeviceNode->psDevConfig);
eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
PVR_ASSERT(eError == PVRSRV_OK);
}
+
+#if defined(RGX_PREMAP_FW_HEAPS)
+ psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap;
+#endif
}
static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode,
{
ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart;
#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
- /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
- ui32BuildOptionsMismatch &= ~OPTIONS_DEBUG_MASK;
+ /*Mask non-critical options out as we do support combining them in UM & KM */
+ ui32BuildOptionsMismatch &= ~FW_OPTIONS_STRICT;
#endif
if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
{
ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport;
if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) ||
- (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED)))
+ (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_DRIVERS_SUPPORTED)))
{
PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)",
- __func__, (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount));
+ __func__, (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_DRIVERS_SUPPORTED), ui8FwOsCount));
}
#endif /* defined(NO_HARDWARE) */
* The FW code must be the first allocation in the firmware heap, otherwise
* the bootloader will not work (the FW will not be able to find the bootloader).
*/
- PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_MAIN_HEAP_BASE);
+ PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_RAW_HEAP_BASE);
/*
* Set up Allocation for FW data section
IMG_UINT64 *pui64UVBRMNumRegions,
IMG_UINT32 ui32HWPerfCountersDataSize,
RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+ IMG_BOOL bSPUClockGating,
FW_PERF_CONF eFirmwarePerf,
IMG_UINT32 ui32KCCBSizeLog2,
IMG_UINT32 ui32ConfigFlagsExt,
pui32USRMNumRegions,
pui64UVBRMNumRegions,
eRGXRDPowerIslandingConf,
+ bSPUClockGating,
eFirmwarePerf,
ui32KCCBSizeLog2,
ui32AvailablePowUnitsMask,
psDevInfo->bDevInit2Done = IMG_FALSE;
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+ if (psDevInfo->psGenHeapSecMem != NULL)
+ {
+ OSFreeSecBuf(psDevInfo->psGenHeapSecMem);
+ }
+#endif
+
if ((psDevInfo->hTQUSCSharedMem != NULL) &&
(psDevInfo->hTQCLISharedMem != NULL))
{
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* De-init work estimation lock */
- if (psDevInfo->hWorkEstLock != NULL)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- OSLockDestroy(psDevInfo->hWorkEstLock);
+ /* De-init work estimation lock */
+ if (psDevInfo->hWorkEstLock != NULL)
+ {
+ OSLockDestroy(psDevInfo->hWorkEstLock);
+ }
}
#endif
PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
PVRSRV_ERROR eError;
DEVICE_MEMORY_INFO *psDevMemoryInfo;
- IMG_UINT32 ui32Temp=0;
if (!psDevInfo)
{
DeviceDepBridgeDeInit(psDevInfo);
-#if defined(PDUMP)
- DevmemIntFreeDefBackingPage(psDeviceNode,
- &psDeviceNode->sDummyPage,
- DUMMY_PAGE);
- DevmemIntFreeDefBackingPage(psDeviceNode,
- &psDeviceNode->sDevZeroPage,
- DEV_ZERO_PAGE);
-#endif
-
-#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
- if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
- {
- OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0);
- PVR_UNREFERENCED_PARAMETER(ui32Temp);
- }
- else
-#else
- {
- /*Delete the Dummy page related info */
- ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter);
- if (0 != ui32Temp)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Dummy page reference counter is non zero (%u)",
- __func__,
- ui32Temp));
- PVR_ASSERT(0);
- }
- }
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ OSLockDestroy(psDevInfo->hCounterDumpingLock);
#endif
- /*Delete the Dummy page related info */
- ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDevZeroPage.atRefCounter);
- if (0 != ui32Temp)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Zero page reference counter is non zero (%u)",
- __func__,
- ui32Temp));
- }
-
-#if defined(PDUMP)
- if (NULL != psDeviceNode->sDummyPage.hPdumpPg)
- {
- PDUMPCOMMENT(psDeviceNode, "Error dummy page handle is still active");
- }
-
- if (NULL != psDeviceNode->sDevZeroPage.hPdumpPg)
- {
- PDUMPCOMMENT(psDeviceNode, "Error Zero page handle is still active");
- }
-#endif
-
- /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */
- OSLockDestroy(psDeviceNode->sDummyPage.psPgLock);
-
- /* Destroy the zero page lock */
- OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock);
-
/* Unregister debug request notifiers first as they could depend on anything. */
RGXDebugDeinit(psDevInfo);
psDevInfo->pvRegsBaseKM = NULL;
}
+ if (psDevInfo->pvSecureRegsBaseKM != NULL)
+ {
+#if !defined(NO_HARDWARE)
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, HOST_SECURITY_VERSION) &&
+ (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1))
+ {
+ /* undo the VA offset performed in RGXRegisterDevice() to allow the allocation to be unmapped */
+ psDevInfo->pvSecureRegsBaseKM = (void __iomem *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM + RGX_HOST_SECURE_REGBANK_OFFSET);
+ OSUnMapPhysToLin((void __force *) psDevInfo->pvSecureRegsBaseKM, RGX_HOST_SECURE_REGBANK_SIZE);
+ }
+#endif /* !NO_HARDWARE */
+ psDevInfo->pvSecureRegsBaseKM = NULL;
+ }
+
#if 0 /* not required at this time */
if (psDevInfo->hTimer)
{
IMG_DEVMEM_SIZE_T uiHeapReservedRegionLength;
IMG_UINT32 ui32Log2ImportAlignment;
PFN_IS_PRESENT pfnIsHeapPresent;
+ PFN_HEAP_INIT pfnInit;
+ PFN_HEAP_DEINIT pfnDeInit;
IMG_UINT32 ui32HeapInstanceFlags;
};
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+/* Private data struct for general heap. */
+typedef struct RGX_GENERAL_HEAP_DATA_TAG
+{
+ DEVMEMINT_RESERVATION *psSecMemReservation;
+ DEVMEMINT_MAPPING *psSecMemMapping;
+} RGX_GENERAL_HEAP_DATA;
+
+/* Init callback function for general heap. */
+static PVRSRV_ERROR GeneralHeapInit(PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEMINT_HEAP *psDevmemHeap,
+ IMG_HANDLE *phPrivData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ RGX_GENERAL_HEAP_DATA *psHeapData;
+ IMG_DEV_VIRTADDR sCarveOutAddr;
+ PVRSRV_ERROR eError;
+
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemHeap, "psDevmemHeap");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(phPrivData, "phPrivData");
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ psHeapData = OSAllocMem(sizeof(*psHeapData));
+ PVR_LOG_RETURN_IF_NOMEM(psHeapData, "psHeapData");
+
+ /* Map the per device secure mem PMR allocation to the general devmem heap carveout. */
+ sCarveOutAddr = DevmemIntHeapGetBaseAddr(psDevmemHeap);
+ sCarveOutAddr.uiAddr += RGX_HEAP_KM_GENERAL_RESERVED_REGION_OFFSET;
+
+ eError = DevmemIntReserveRange(psDevmemHeap,
+ sCarveOutAddr,
+ RGXFWIF_KM_GENERAL_HEAP_TOTAL_BYTES,
+ &psHeapData->psSecMemReservation);
+ PVR_GOTO_IF_ERROR(eError, ErrorFreeHeapData);
+
+ eError = DevmemIntMapPMR(psDevmemHeap, psHeapData->psSecMemReservation, psDevInfo->psGenHeapSecMem,
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE
+ | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE,
+ &psHeapData->psSecMemMapping);
+ PVR_GOTO_IF_ERROR(eError, ErrorUnreserve);
+
+ *phPrivData = (IMG_HANDLE)psHeapData;
+
+ return PVRSRV_OK;
+
+ErrorUnreserve:
+ DevmemIntUnreserveRange(psHeapData->psSecMemReservation);
+ErrorFreeHeapData:
+ OSFreeMem(psHeapData);
+
+ return eError;
+}
+
+/* Deinit callback function for general heap. */
+static void GeneralHeapDeInit(IMG_HANDLE hPrivData)
+{
+ RGX_GENERAL_HEAP_DATA *psHeapData = (RGX_GENERAL_HEAP_DATA*)hPrivData;
+
+ PVR_ASSERT(hPrivData);
+
+ DevmemIntUnmapPMR(psHeapData->psSecMemMapping);
+ DevmemIntUnreserveRange(psHeapData->psSecMemReservation);
+
+ OSFreeMem(psHeapData);
+}
+#else
+/* Callbacks not used */
+#define GeneralHeapInit NULL
+#define GeneralHeapDeInit NULL
+#endif
+
/* Feature Present function prototypes */
/* FW Feature Present function prototypes */
return (pksHeapInfo->ui64HeapBase == RGX_FIRMWARE_MAIN_HEAP_BASE) ? IMG_TRUE : IMG_FALSE;
}
+static IMG_BOOL BPHandlerHeapPresent(PVRSRV_RGXDEV_INFO* psDevInfo, const RGX_HEAP_INFO* pksHeapInfo)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ PVR_UNREFERENCED_PARAMETER(pksHeapInfo);
+
+#if defined(SUPPORT_USC_BREAKPOINT)
+ return IMG_TRUE;
+#else
+ return IMG_FALSE;
+#endif
+}
+
static const RGX_HEAP_INFO gasRGXHeapLayoutApp[] =
{
- /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent HeapInstanceFlags */
- {RGX_GENERAL_SVM_HEAP_IDENT, RGX_GENERAL_SVM_HEAP_BASE, RGX_GENERAL_SVM_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_HEAP_BASE, RGX_GENERAL_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_HEAP_BASE, RGX_GENERAL_NON4K_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG},
- {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_HEAP_BASE, RGX_PDSCODEDATA_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_HEAP_BASE, RGX_USCCODE_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_VK_CAPT_REPLAY_HEAP_IDENT, RGX_VK_CAPT_REPLAY_HEAP_BASE, RGX_VK_CAPT_REPLAY_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_SIGNALS_HEAP_IDENT, RGX_SIGNALS_HEAP_BASE, RGX_SIGNALS_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_COMPONENT_CTRL_HEAP_IDENT, RGX_COMPONENT_CTRL_HEAP_BASE, RGX_COMPONENT_CTRL_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_FBCDC_HEAP_IDENT, RGX_FBCDC_HEAP_BASE, RGX_FBCDC_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_FBCDC_LARGE_HEAP_IDENT, RGX_FBCDC_LARGE_HEAP_BASE, RGX_FBCDC_LARGE_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_PDS_INDIRECT_STATE_HEAP_IDENT, RGX_PDS_INDIRECT_STATE_HEAP_BASE, RGX_PDS_INDIRECT_STATE_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_CMP_MISSION_RMW_HEAP_IDENT, RGX_CMP_MISSION_RMW_HEAP_BASE, RGX_CMP_MISSION_RMW_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_CMP_SAFETY_RMW_HEAP_IDENT, RGX_CMP_SAFETY_RMW_HEAP_BASE, RGX_CMP_SAFETY_RMW_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_TEXTURE_STATE_HEAP_IDENT, RGX_TEXTURE_STATE_HEAP_BASE, RGX_TEXTURE_STATE_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE},
- {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_HEAP_BASE, RGX_VISIBILITY_TEST_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE}
+ /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent pfnInit pfnDeInit HeapInstanceFlags */
+ {RGX_GENERAL_SVM_HEAP_IDENT, RGX_GENERAL_SVM_HEAP_BASE, RGX_GENERAL_SVM_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_HEAP_BASE, RGX_GENERAL_HEAP_SIZE, RGX_HEAP_GENERAL_RESERVED_TOTAL_SIZE, 0, NULL, GeneralHeapInit, GeneralHeapDeInit, HEAP_INST_DEFAULT_VALUE},
+ {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_HEAP_BASE, RGX_GENERAL_NON4K_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG},
+ {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_HEAP_BASE, RGX_PDSCODEDATA_HEAP_SIZE, RGX_HEAP_PDS_RESERVED_TOTAL_SIZE, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_HEAP_BASE, RGX_USCCODE_HEAP_SIZE, RGX_HEAP_USC_RESERVED_TOTAL_SIZE, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_USCCODE_BPH_HEAP_IDENT, RGX_USCCODE_BPH_HEAP_BASE, RGX_USCCODE_BPH_HEAP_SIZE, 0, 0, BPHandlerHeapPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_VK_CAPT_REPLAY_HEAP_IDENT, RGX_VK_CAPT_REPLAY_HEAP_BASE, RGX_VK_CAPT_REPLAY_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_SIGNALS_HEAP_IDENT, RGX_SIGNALS_HEAP_BASE, RGX_SIGNALS_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_COMPONENT_CTRL_HEAP_IDENT, RGX_COMPONENT_CTRL_HEAP_BASE, RGX_COMPONENT_CTRL_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_FBCDC_HEAP_IDENT, RGX_FBCDC_HEAP_BASE, RGX_FBCDC_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_FBCDC_LARGE_HEAP_IDENT, RGX_FBCDC_LARGE_HEAP_BASE, RGX_FBCDC_LARGE_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_PDS_INDIRECT_STATE_HEAP_IDENT, RGX_PDS_INDIRECT_STATE_HEAP_BASE, RGX_PDS_INDIRECT_STATE_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_CMP_MISSION_RMW_HEAP_IDENT, RGX_CMP_MISSION_RMW_HEAP_BASE, RGX_CMP_MISSION_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_CMP_SAFETY_RMW_HEAP_IDENT, RGX_CMP_SAFETY_RMW_HEAP_BASE, RGX_CMP_SAFETY_RMW_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_TEXTURE_STATE_HEAP_IDENT, RGX_TEXTURE_STATE_HEAP_BASE, RGX_TEXTURE_STATE_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_HEAP_BASE, RGX_VISIBILITY_TEST_HEAP_SIZE, 0, 0, NULL, NULL, NULL, HEAP_INST_DEFAULT_VALUE}
};
static const RGX_HEAP_INFO gasRGXHeapLayoutFW[] =
{
- /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnPresent HeapInstanceFlags*/
- {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE, 0, 0, FWVZMainHeapPresent, HEAP_INST_DEFAULT_VALUE},
- {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_CONFIG_HEAP_BASE, RGX_FIRMWARE_CONFIG_HEAP_SIZE, 0, 0, FWVZConfigPresent, HEAP_INST_DEFAULT_VALUE}
+ /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnPresent pfnInit pfnDeInit HeapInstanceFlags*/
+ {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE, 0, 0, FWVZMainHeapPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE},
+ {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_CONFIG_HEAP_BASE, RGX_FIRMWARE_CONFIG_HEAP_SIZE, 0, 0, FWVZConfigPresent, NULL, NULL, HEAP_INST_DEFAULT_VALUE}
};
/* Generic counting method. */
if (psHeapInfo->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG)
{
- ui32Log2DataPageSize = psDevInfo->ui32Log2Non4KPgSize;
+ ui32Log2DataPageSize = psDevInfo->psDeviceNode->ui32RGXLog2Non4KPgSize;
}
else
{
psHeapInfo->uiHeapReservedRegionLength,
ui32Log2DataPageSize,
psHeapInfo->ui32Log2ImportAlignment,
+ psHeapInfo->pfnInit,
+ psHeapInfo->pfnDeInit,
*psDeviceMemoryHeapCursor);
(*psDeviceMemoryHeapCursor)++;
PVRSRV_ERROR eError;
DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor;
- IMG_UINT32 ui32HeapListSize = ARRAY_SIZE(gasRGXHeapLayoutApp);
+ IMG_UINT32 ui32AppHeapListSize = ARRAY_SIZE(gasRGXHeapLayoutApp);
IMG_UINT32 ui32FWHeapListSize = ARRAY_SIZE(gasRGXHeapLayoutFW);
IMG_UINT32 ui32CountedHeapSize;
- IMG_UINT32 ui32HeapCount = 0;
- IMG_UINT32 ui32FWHeapCount = 0;
+ IMG_UINT32 ui32AppHeapCount = 0U;
+ IMG_UINT32 ui32FWHeapCount = 0U;
/* Count heaps required for the app heaps */
_CountRequiredHeaps(psDevInfo,
gasRGXHeapLayoutApp,
- ui32HeapListSize,
- &ui32HeapCount);
+ ui32AppHeapListSize,
+ &ui32AppHeapCount);
/* Count heaps required for the FW heaps */
_CountRequiredHeaps(psDevInfo,
ui32FWHeapListSize,
&ui32FWHeapCount);
- ui32CountedHeapSize = (ui32HeapCount + ui32FWHeapCount + RGX_NUM_OS_SUPPORTED);
+ ui32CountedHeapSize = (ui32AppHeapCount + ui32FWHeapCount + RGX_NUM_DRIVERS_SUPPORTED);
psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * ui32CountedHeapSize);
PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeap, eError, e0);
/* Instantiate App Heaps */
_InstantiateRequiredHeaps(psDevInfo,
gasRGXHeapLayoutApp,
- ui32HeapListSize,
+ ui32AppHeapListSize,
&psDeviceMemoryHeapCursor);
/* Instantiate FW Heaps */
/* Check we have allocated the correct # of heaps, minus any VZ heaps as these
* have not been created at this point
*/
- PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_OS_SUPPORTED));
+ PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_DRIVERS_SUPPORTED));
/*
In the new heap setup, we initialise 2 configurations:
if (RGX_GET_FEATURE_VALUE(psDevInfo, MMU_VERSION) >= 4)
{
IMG_UINT32 i;
- const IMG_UINT32 ui32GeneralNon4KHeapPageSize = (1 << psDevInfo->ui32Log2Non4KPgSize);
+ const IMG_UINT32 ui32GeneralNon4KHeapPageSize = (1 << psDevInfo->psDeviceNode->ui32RGXLog2Non4KPgSize);
const IMG_UINT32 ui32RgxDefaultPageSize = (1 << RGXHeapDerivePageSize(OSGetPageShift()));
/*
}
}
-#if (RGX_NUM_OS_SUPPORTED > 1)
- if (PVRSRV_VZ_MODE_IS(HOST))
+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32DriverID;
/* Create additional raw firmware heaps */
- for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID)
{
- if (RGXInitFwRawHeap(psDeviceMemoryHeapCursor, ui32OSid) != PVRSRV_OK)
+ if (RGXInitFwRawHeap(psDeviceMemoryHeapCursor, ui32DriverID) != PVRSRV_OK)
{
/* if any allocation fails, free previously allocated heaps and abandon initialisation */
- for (; ui32OSid > RGX_FIRST_RAW_HEAP_OSID; ui32OSid--)
+ for (; ui32DriverID > RGX_FIRST_RAW_HEAP_DRIVER_ID; ui32DriverID--)
{
RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor);
psDeviceMemoryHeapCursor--;
psDeviceMemoryHeapCursor++;
}
}
-#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */
return PVRSRV_OK;
e1:
static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo)
{
-#if (RGX_NUM_OS_SUPPORTED > 1)
- if (PVRSRV_VZ_MODE_IS(HOST))
+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32DriverID;
DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap;
/* Delete all guest firmware heaps */
- for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+ FOREACH_DRIVER_RAW_HEAP(ui32DriverID)
{
RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor);
psDeviceMemoryHeapCursor++;
}
}
-#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */
OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray);
OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap);
}
-static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+static PVRSRV_ERROR RGXInitSharedFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
{
PVRSRV_ERROR eError = PVRSRV_OK;
- PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig,
- PHYS_HEAP_USAGE_FW_MAIN);
+ PHYS_HEAP_CONFIG *psSysHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_SHARED);
-#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)
+#if defined(RGX_NUM_DRIVERS_SUPPORTED) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
/* VZ heap validation */
if (PVRSRV_VZ_MODE_IS(GUEST))
{
- PVR_LOG_RETURN_IF_FALSE(psFwMainConfig != NULL,
+ PVR_LOG_RETURN_IF_FALSE(psSysHeapCfg != NULL,
"FW Main heap is required for VZ Guest.",
PVRSRV_ERROR_PHYSHEAP_CONFIG);
}
#endif
- if (psFwMainConfig != NULL)
+ if (psSysHeapCfg != NULL)
{
- /* Check FW_MAIN for multiple usage flags. Because FW_MAIN is divided
+ /* Check FW_SHARED for multiple usage flags. Because FW_SHARED is divided
into subheaps, shared usage with other heaps is not allowed. */
- PVR_LOG_RETURN_IF_FALSE(psFwMainConfig->ui32UsageFlags == PHYS_HEAP_USAGE_FW_MAIN,
- "FW Main phys heap config specified with more than one usage. FW Main must be FW Main only.",
+ PVR_LOG_RETURN_IF_FALSE(psSysHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_SHARED,
+ "FW_SHARED phys heap config not specified with more than one usage."
+ "FW_SHARED heap must be exclusively used as FW_SHARED.",
PVRSRV_ERROR_PHYSHEAP_CONFIG);
}
- if (psFwMainConfig == NULL)
+ if (psSysHeapCfg == NULL)
{
PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap not set", __func__));
+ /* Nothing to do. Default to the physheap fallback option */
}
- else if (psFwMainConfig->eType == PHYS_HEAP_TYPE_UMA)
+ else if (psSysHeapCfg->eType == PHYS_HEAP_TYPE_UMA)
{
PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses OS System memory (UMA)", __func__));
+
+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode,
+ psSysHeapCfg,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap");
+
+ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG] = psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN];
}
else /* PHYS_HEAP_TYPE_LMA or PHYS_HEAP_TYPE_DMA */
{
- PHYS_HEAP_CONFIG sFwHeapConfig;
+ PHYS_HEAP_CONFIG sFwMainHeapCfg, sFwCfgHeapCfg;
PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses local memory managed by the driver (LMA)", __func__));
- PVR_LOG_GOTO_IF_FALSE(psFwMainConfig->uiSize >= RGX_FIRMWARE_RAW_HEAP_SIZE,
- "Invalid firmware physical heap size.", ErrorDeinit);
-
-#if defined(SUPPORT_AUTOVZ)
- if (PVRSRV_VZ_MODE_IS(HOST))
- {
- /* 1 Mb can hold the maximum amount of page tables for the memory shared between the firmware and all KM drivers:
- * MAX(RAW_HEAP_SIZE) = 32 Mb; MAX(NUMBER_OS) = 8; Total shared memory = 256 Mb;
- * MMU objects required: 65536 PTEs; 16 PDEs; 1 PCE; */
- IMG_UINT64 uMaxFwMmuPageTableSize = 1 * 1024 * 1024;
-
- sFwHeapConfig = *psFwMainConfig;
-
- /* By default the firmware MMU's page tables are allocated from the same carveout memory as the firmware heap.
- * If a different base address is specified for this reserved range, use the overriding define instead. */
-#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR)
- sFwHeapConfig.sStartAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR;
- sFwHeapConfig.sCardBase.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR;
-#else
- sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED;
- sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED;
-#endif
-
- sFwHeapConfig.uiSize = uMaxFwMmuPageTableSize;
- sFwHeapConfig.ui32UsageFlags = 0;
-
- eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw MMU subheap",
- &psDeviceNode->psFwMMUReservedPhysHeap);
- PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MMU", ErrorDeinit);
- }
-#endif
+ /* Subheap layout: Main + Config */
+ sFwMainHeapCfg = *psSysHeapCfg;
+ sFwMainHeapCfg.uiSize -= RGX_FIRMWARE_CONFIG_HEAP_SIZE;
- /* Subheap layout: Main + (optional MIPS reserved range) + Config */
- sFwHeapConfig = *psFwMainConfig;
- sFwHeapConfig.uiSize = RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE;
- sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_MAIN;
-
- eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw Main subheap", &psDeviceNode->psFWMainPhysHeap);
+ eError = PhysmemCreateHeapLMA(psDeviceNode,
+ RGXPhysHeapGetLMAPolicy(sFwMainHeapCfg.ui32UsageFlags),
+ &sFwMainHeapCfg,
+ "Fw Main subheap",
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]);
PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MAIN", ErrorDeinit);
- sFwHeapConfig = *psFwMainConfig;
- sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE;
- sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE;
- sFwHeapConfig.uiSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE;
- sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CONFIG;
+ sFwCfgHeapCfg = *psSysHeapCfg;
+ sFwCfgHeapCfg.sStartAddr.uiAddr += sFwMainHeapCfg.uiSize;
+ sFwCfgHeapCfg.sCardBase.uiAddr += sFwMainHeapCfg.uiSize;
+
+ sFwCfgHeapCfg.uiSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE;
- eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw Cfg subheap", &psDeviceNode->psFWCfgPhysHeap);
+ eError = PhysmemCreateHeapLMA(psDeviceNode,
+ RGXPhysHeapGetLMAPolicy(sFwCfgHeapCfg.ui32UsageFlags),
+ &sFwCfgHeapCfg,
+ "Fw Cfg subheap",
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]);
PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:CFG", ErrorDeinit);
}
/* Acquire FW heaps */
- eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode,
- &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]);
+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]);
PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_MAIN", ErrorDeinit);
- eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode,
- &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]);
+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]);
PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CONFIG", ErrorDeinit);
- eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode,
- &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]);
+ return eError;
+
+ErrorDeinit:
+ PVR_ASSERT(IMG_FALSE);
+
+ return eError;
+}
+
+static PVRSRV_ERROR RGXInitPrivateFwPhysHeaps(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PHYS_HEAP_CONFIG *psFwCodeHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_CODE);
+ PHYS_HEAP_CONFIG *psFwDataHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_PRIV_DATA);
+ PHYS_HEAP_CONFIG *psFwPrivateHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_PRIVATE);
+ PHYS_HEAP_CONFIG sFwPrivateTempCfg;
+
+ if (psFwPrivateHeapCfg != NULL)
+ {
+ PVR_LOG_RETURN_IF_FALSE((psFwCodeHeapCfg == NULL) && (psFwDataHeapCfg == NULL),
+ "FW_PRIVATE and the FW_CODE & FW_PRIV_DATA usage flags "
+ "achieve the same goal and are mutually exclusive.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+
+ /* Fw code and data are both allocated from this unified heap */
+ sFwPrivateTempCfg = *psFwPrivateHeapCfg;
+ sFwPrivateTempCfg.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA;
+
+ psFwCodeHeapCfg = &sFwPrivateTempCfg;
+ psFwDataHeapCfg = &sFwPrivateTempCfg;
+ }
+
+ if ((psFwCodeHeapCfg == NULL) || (psFwDataHeapCfg == NULL))
+ {
+ if (psFwCodeHeapCfg != psFwDataHeapCfg)
+ {
+ /* Private Firmware code and data heaps must be either both defined
+ * or both undefined. There is no point in isolating one but not
+ * the other.*/
+ eError = PVRSRV_ERROR_PHYSHEAP_CONFIG;
+ PVR_LOG_GOTO_IF_ERROR(eError, "PrivateFwPhysHeap check", ErrorDeinit);
+ }
+ else
+ {
+ /* No dedicated heaps, default to the physheap fallback option */
+ }
+ }
+ else if (psFwCodeHeapCfg == psFwDataHeapCfg)
+ {
+ if (psFwCodeHeapCfg->ui32UsageFlags ==
+ (PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA))
+ {
+ /* Fw code and private data allocations come from the same system heap
+ * Instantiate one physheap and share it between them. */
+
+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode,
+ psFwCodeHeapCfg,
+ NULL);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig");
+ }
+ else
+ {
+ /* Not an exclusive heap, can be used for other purposes (e.g. secure buffers).
+ * Expect the PVR layer to have already created a heap for the other uses. */
+ }
+ }
+ else
+ {
+ /*
+ * Separating private Firmware code and data is allowed for backwards compatibility
+ * purposes. New platforms should use the unified FW_PRIVATE heap instead.
+ *
+ * Early security implementations on Rogue cores required separate FW_PRIV_DATA
+ * and FW_CODE heaps, as access permissions to Firmware were granted differently
+ * based on the transaction types (code or data).
+ */
+ PVR_LOG_RETURN_IF_FALSE((psFwCodeHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_CODE) &&
+ (psFwDataHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_PRIV_DATA),
+ "Dedicated private heaps for Fw code and "
+ "data must have one usage flag exclusively.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+
+ /* Dedicated Fw code heap */
+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode,
+ psFwCodeHeapCfg,
+ NULL);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap");
+
+ /* Dedicated Fw private data heap */
+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode,
+ psFwDataHeapCfg,
+ NULL);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap");
+ }
+
+#if defined(RGX_PREMAP_FW_HEAPS) && defined(SUPPORT_TRUSTED_DEVICE)
+ /* When premapping distinct private and shared Firmware phys heaps
+ * inside the same virtual devmem heap, their sizes must add up to
+ * the fixed RGX_FIRMWARE_RAW_HEAP_SIZE for the premapping to work */
+ {
+ PHYS_HEAP_CONFIG *psFwSharedHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_SHARED);
+ IMG_UINT64 ui64FwPrivateHeapSize;
+
+ PVR_LOG_GOTO_IF_FALSE((psFwCodeHeapCfg != NULL) && (psFwDataHeapCfg != NULL),
+ "Security support requires Fw code and data memory be"
+ " separate from the heap shared with the kernel driver.", ErrorDeinit);
+
+ if (psFwCodeHeapCfg != psFwDataHeapCfg)
+ {
+ /* Private Firmware allocations come from 2 different heaps */
+ ui64FwPrivateHeapSize = psFwCodeHeapCfg->uiSize + psFwDataHeapCfg->uiSize;
+ }
+ else
+ {
+ /* Private Firmware allocations come from a single heap */
+ ui64FwPrivateHeapSize = psFwCodeHeapCfg->uiSize;
+ }
+
+ PVR_LOG_GOTO_IF_FALSE((psFwSharedHeapCfg->uiSize +
+ ui64FwPrivateHeapSize) ==
+ RGX_FIRMWARE_RAW_HEAP_SIZE,
+ "Invalid firmware physical heap size.", ErrorDeinit);
+ }
+#endif
+
+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]);
PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CODE", ErrorDeinit);
- eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode,
- &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]);
+ eError = PhysHeapAcquireByID(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]);
PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_DATA", ErrorDeinit);
return eError;
ErrorDeinit:
PVR_ASSERT(IMG_FALSE);
- PVRSRVPhysMemHeapsDeinit(psDeviceNode);
return eError;
}
-static void _ReadNon4KHeapPageSize(IMG_UINT32 *pui32Log2Non4KPgSize)
+static PVRSRV_ERROR RGXInitFwPageTableHeap(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(RGX_PREMAP_FW_HEAPS)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ PHYS_HEAP_CONFIG *psFwPageTableHeapCfg = PVRSRVFindPhysHeapConfig(psDeviceNode->psDevConfig,
+ PHYS_HEAP_USAGE_FW_PREMAP_PT);
+
+ PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg != NULL),
+ "The Firmware Page Table phys heap config not found.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+
+
+ PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->ui32UsageFlags == PHYS_HEAP_USAGE_FW_PREMAP_PT),
+ "The Firmware Page Table heap must be used exclusively for this purpose",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+
+ PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->eType == PHYS_HEAP_TYPE_LMA) ||
+ (psFwPageTableHeapCfg->eType == PHYS_HEAP_TYPE_DMA),
+ "The Firmware Page Table heap must be LMA or DMA memory.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+
+ PVR_LOG_RETURN_IF_FALSE((psFwPageTableHeapCfg->uiSize >= RGX_FIRMWARE_MAX_PAGETABLE_SIZE),
+ "The Firmware Page Table heap must be able to hold the maximum "
+ "number of pagetables needed to cover the Firmware's VA space.",
+ PVRSRV_ERROR_PHYSHEAP_CONFIG);
+
+ eError = PhysHeapCreateHeapFromConfig(psDeviceNode,
+ psFwPageTableHeapCfg,
+ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig:FwPageTableHeap");
+
+ eError = PhysHeapAcquire(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP_PT]);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquire:FwPageTableHeap");
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+#endif /* defined(RGX_PREMAP_FW_HEAPS) */
+
+ return eError;
+}
+
+static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ eError = RGXInitFwPageTableHeap(psDeviceNode);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitFwPageTableHeap", ErrorDeinit);
+ eError = RGXInitSharedFwPhysHeaps(psDeviceNode);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitSharedFwPhysHeaps", ErrorDeinit);
+ eError = RGXInitPrivateFwPhysHeaps(psDeviceNode);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitPrivateFwPhysHeaps", ErrorDeinit);
+
+ErrorDeinit:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function RGXDeviceFWMainHeapMemCheck
+@Description Checks the free memory in FW Main PhysHeap of a device to ensure
+ there is enough for a connection to be made.
+
+@Input psDeviceNode The device of the FW Main PhysHeap to be checked.
+
+@Return On success PVRSRV_OK, else a PVRSRV_ERROR code.
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXDeviceFWMainHeapMemCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PHYS_HEAP *psFWMainPhysHeap;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode");
+
+ psFWMainPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN];
+ if (psFWMainPhysHeap == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to get device's FW Main PhysHeap"));
+ return PVRSRV_ERROR_INVALID_HEAP;
+ }
+
+ if (PhysHeapGetType(psFWMainPhysHeap) == PHYS_HEAP_TYPE_LMA)
+ {
+ const IMG_UINT32 ui32MinMemInKBs = RGX_FW_PHYSHEAP_MINMEM_ON_CONNECTION;
+ IMG_UINT64 ui64FreePhysHeapMem;
+
+ eError = PhysHeapFreeMemCheck(psFWMainPhysHeap,
+ KB2B(ui32MinMemInKBs),
+ &ui64FreePhysHeapMem);
+
+ if (eError == PVRSRV_ERROR_INSUFFICIENT_PHYS_HEAP_MEMORY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FW_MAIN PhysHeap contains less than the "
+ "minimum free space required to acquire a connection. "
+ "Free space: %"IMG_UINT64_FMTSPEC"KB "
+ "Minimum required: %uKB",
+ B2KB(ui64FreePhysHeapMem),
+ ui32MinMemInKBs));
+ }
+ }
+
+ return eError;
+}
+
+static PVRSRV_ERROR _ReadNon4KHeapPageSize(IMG_UINT32 *pui32Log2Non4KPgSize)
{
void *pvAppHintState = NULL;
IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE;
IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+ IMG_UINT32 uiLog2OSPageSize = OSGetPageShift();
/* Get the page size for the dummy page from the NON4K heap apphint */
OSCreateKMAppHintState(&pvAppHintState);
GeneralNon4KHeapPageSize,&ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize);
*pui32Log2Non4KPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize);
OSFreeKMAppHintState(pvAppHintState);
+
+ /* Check the Non4k page size is at least the size of the OS page size
+ * or larger. The Non4k page size also has to be a multiple of the OS page
+ * size but since we have the log2 value from the apphint we know powers of 2
+ * will always be multiples.
+ */
+ PVR_LOG_RETURN_IF_FALSE(*pui32Log2Non4KPgSize >= uiLog2OSPageSize,
+ "Non4K page size smaller than OS page size",
+ PVRSRV_ERROR_INVALID_NON4K_HEAP_PAGESIZE);
+
+ return PVRSRV_OK;
}
/* RGXRegisterDevice
*
* WARNING!
*
- * No PDUMP statements are allowed in until Part 2 of the device initialisation
- * is reached.
+ * No PDUMP statements are allowed until device initialisation starts.
*/
PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
{
OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK);
OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE);
- psDeviceNode->pfnDevSLCFlushRange = RGXSLCFlushRange;
psDeviceNode->pfnInvalFBSCTable = RGXInvalidateFBSCTable;
psDeviceNode->pfnValidateOrTweakPhysAddrs = NULL;
/* Register callback for soft resetting some device modules */
psDeviceNode->pfnSoftReset = RGXSoftReset;
+#if defined(SUPPORT_VALIDATION)
+ psDeviceNode->pfnValidationGPUUnitsPowerChange = PVRSRVDeviceGPUUnitsPowerChange;
+#endif
+
/* Register callback for resetting the HWR logs */
psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs;
/* Register callback for initialising device-specific physical memory heaps */
psDeviceNode->pfnPhysMemDeviceHeapsInit = RGXPhysMemDeviceHeapsInit;
- /* Set up required support for dummy page */
- OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0);
- OSAtomicWrite(&(psDeviceNode->sDevZeroPage.atRefCounter), 0);
-
- /* Set the order to 0 */
- psDeviceNode->sDummyPage.sPageHandle.uiOrder = 0;
- psDeviceNode->sDevZeroPage.sPageHandle.uiOrder = 0;
-
- /* Set the size of the Dummy page to zero */
- psDeviceNode->sDummyPage.ui32Log2PgSize = 0;
-
- /* Set the size of the Zero page to zero */
- psDeviceNode->sDevZeroPage.ui32Log2PgSize = 0;
-
- /* Set the Dummy page phys addr */
- psDeviceNode->sDummyPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
-
- /* Set the Zero page phys addr */
- psDeviceNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR;
-
- /* The lock can be acquired from MISR (Z-buffer) path */
- eError = OSLockCreate(&psDeviceNode->sDummyPage.psPgLock);
- if (PVRSRV_OK != eError)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__));
- return eError;
- }
-
- /* Create the lock for zero page */
- eError = OSLockCreate(&psDeviceNode->sDevZeroPage.psPgLock);
- if (PVRSRV_OK != eError)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create Zero page lock", __func__));
- goto free_dummy_page;
- }
-#if defined(PDUMP)
- psDeviceNode->sDummyPage.hPdumpPg = NULL;
- psDeviceNode->sDevZeroPage.hPdumpPg = NULL;
-#endif
+ /* Register callback for checking a device's FW Main physical heap for sufficient free memory */
+ psDeviceNode->pfnCheckForSufficientFWPhysMem = RGXDeviceFWMainHeapMemCheck;
psDeviceNode->pfnHasFBCDCVersion31 = RGXSystemHasFBCDCVersion31;
+ psDeviceNode->pfnGetTFBCLossyGroup = RGXGetTFBCLossyGroup;
+
/* The device shared-virtual-memory heap address-space size is stored here for faster
look-up without having to walk the device heap configuration structures during
client device connection (i.e. this size is relative to a zero-based offset) */
eError = PVRSRV_ERROR_BAD_MAPPING;
goto e13;
}
+ psDevInfo->pvSecureRegsBaseKM = NULL;
#else
psDevInfo->pvRegsBaseKM = NULL;
+ psDevInfo->pvSecureRegsBaseKM = NULL;
#endif /* !NO_HARDWARE */
psDeviceNode->pvDevice = psDevInfo;
PVR_DPF((PVR_DBG_ERROR,
"%s: Unsupported HW device detected by driver",
__func__));
- goto e14;
+ goto e15;
}
- _ReadNon4KHeapPageSize(&psDevInfo->ui32Log2Non4KPgSize);
+ /*
+ * We must now setup the SECURITY mappings if supported. We cannot
+ * check on the features until we have reached here as the BVNC is
+ * not setup before now.
+ */
+#if !defined(NO_HARDWARE)
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, HOST_SECURITY_VERSION) &&
+ (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1))
+ {
+ IMG_CPU_PHYADDR sHostSecureRegBankBase = {psDeviceNode->psDevConfig->sRegsCpuPBase.uiAddr + RGX_HOST_SECURE_REGBANK_OFFSET};
+
+ psDevInfo->pvSecureRegsBaseKM = (void __iomem *) OSMapPhysToLin(sHostSecureRegBankBase,
+ RGX_HOST_SECURE_REGBANK_SIZE,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
- /*Set the zero & dummy page sizes as needed for the heap with largest page size */
- psDeviceNode->sDevZeroPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize;
- psDeviceNode->sDummyPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize;
+ if (psDevInfo->pvSecureRegsBaseKM == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRGXInitDevPart2KM: Failed to create RGX secure register mapping"));
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto e13;
+ }
+
+ /*
+ * The secure register bank is mapped into the CPU VA space starting from
+ * the base of the normal register bank + an offset of RGX_HOST_SECURE_REGBAK_OFFSET.
+ * The hardware register addresses are all indexed from the base of the regular register bank.
+ * For the RegBankBase+RegOffset computation to still be accurate for host-secure registers,
+ * we need to compensate for offsets of registers in the secure bank
+ */
+ psDevInfo->pvSecureRegsBaseKM = (void __iomem *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM - RGX_HOST_SECURE_REGBANK_OFFSET);
+ }
+ else
+ {
+ psDevInfo->pvSecureRegsBaseKM = psDevInfo->pvRegsBaseKM;
+ }
+#else
+ psDevInfo->pvSecureRegsBaseKM = NULL;
+#endif /* !NO_HARDWARE */
+
+ eError = _ReadNon4KHeapPageSize(&psDeviceNode->ui32RGXLog2Non4KPgSize);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_ReadNon4KHeapPageSize", e15);
/* Configure MMU specific stuff */
RGXMMUInit_Register(psDeviceNode);
eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo);
if (eError != PVRSRV_OK)
{
- goto e14;
+ goto e15;
}
eError = RGXHWPerfInit(psDevInfo);
- PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e14);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e15);
eError = RGXHWPerfHostInit(psDeviceNode->pvDevice, ui32HWPerfHostBufSizeKB);
PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfHostInit", ErrorDeInitHWPerfFw);
#if defined(SUPPORT_SOC_TIMER)
{
- IMG_BOOL ui32AppHintDefault = IMG_FALSE;
+ const IMG_BOOL bDefaultFalse = IMG_FALSE;
IMG_BOOL bInitSocTimer;
void *pvAppHintState = NULL;
OSCreateKMAppHintState(&pvAppHintState);
- OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ValidateSOCUSCTimer, &ui32AppHintDefault, &bInitSocTimer);
+ OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ValidateSOCUSCTimer, &bDefaultFalse, &bInitSocTimer);
OSFreeKMAppHintState(pvAppHintState);
if (bInitSocTimer)
eError = DeviceDepBridgeInit(psDevInfo);
PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeInit");
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ eError = OSLockCreate(&psDevInfo->hCounterDumpingLock);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for counter sampling.", __func__));
+ goto ErrorDeInitDeviceDepBridge;
+ }
+#endif
+
/* Initialise error counters */
memset(&psDevInfo->sErrorCounts, 0, sizeof(PVRSRV_RGXDEV_ERROR_COUNTS));
return PVRSRV_OK;
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ErrorDeInitDeviceDepBridge:
+ DeviceDepBridgeDeInit(psDevInfo);
+#endif
+
e16:
#if defined(SUPPORT_VALIDATION)
RGXPowerDomainDeInitState(&psDevInfo->sPowerDomainState);
RGXHWPerfHostDeInit(psDevInfo);
ErrorDeInitHWPerfFw:
RGXHWPerfDeinit(psDevInfo);
-e14:
+e15:
#if !defined(NO_HARDWARE)
- OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM,
- psDevInfo->ui32RegSize);
-
+ if (psDevInfo->pvSecureRegsBaseKM != NULL)
+ {
+ /* Adjust pvSecureRegsBaseKM if device has SECURITY_VERSION > 1 */
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, HOST_SECURITY_VERSION) &&
+ (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1))
+ {
+ /* Undo the VA offset adjustment to unmap correct VAddr */
+ psDevInfo->pvSecureRegsBaseKM = (void __iomem *)((uintptr_t)psDevInfo->pvSecureRegsBaseKM + RGX_HOST_SECURE_REGBANK_OFFSET);
+ OSUnMapPhysToLin((void __force *) psDevInfo->pvSecureRegsBaseKM,
+ psDevInfo->ui32RegSize);
+ }
+ }
e13:
+ if (psDevInfo->pvRegsBaseKM != NULL)
+ {
+ OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM,
+ psDevInfo->ui32RegSize);
+ }
#endif /* !NO_HARDWARE */
OSLockDestroy(psDevInfo->hCCBRecoveryLock);
e12:
e0:
OSFreeMem(psDevInfo);
- /* Destroy the zero page lock created above */
- OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock);
-
-free_dummy_page:
- /* Destroy the dummy page lock created above */
- OSLockDestroy(psDeviceNode->sDummyPage.psPgLock);
-
PVR_ASSERT(eError != PVRSRV_OK);
return eError;
}
return PVRSRV_OK;
}
-#if (RGX_NUM_OS_SUPPORTED > 1)
+#if defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1)
/*!
*******************************************************************************
@Description Called to perform additional initialisation
******************************************************************************/
-static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid)
+static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32DriverID)
{
IMG_UINT32 uiStringLength;
IMG_UINT32 uiStringLengthMax = 32;
uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1);
- /* Start by allocating memory for this OSID heap identification string */
+ /* Start by allocating memory for this DriverID heap identification string */
psDevMemHeap->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR));
if (psDevMemHeap->pszName == NULL)
{
return PVRSRV_ERROR_OUT_OF_MEMORY;
}
- /* Append the OSID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */
- OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSid);
+ /* Append the DriverID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */
+ OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32DriverID);
/* Use the common blueprint template support function to initialise the heap */
HeapCfgBlueprintInit(psDevMemHeap->pszName,
- RGX_FIRMWARE_RAW_HEAP_BASE + (ui32OSid * RGX_FIRMWARE_RAW_HEAP_SIZE),
+ RGX_FIRMWARE_RAW_HEAP_BASE + (ui32DriverID * RGX_FIRMWARE_RAW_HEAP_SIZE),
RGX_FIRMWARE_RAW_HEAP_SIZE,
0,
ui32Log2RgxDefaultPageShift,
0,
+ NULL,
+ NULL,
psDevMemHeap);
return PVRSRV_OK;
static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap)
{
IMG_UINT64 uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE;
- IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_OS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE);
+ IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_DRIVERS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE);
/* Safe to do as the guest firmware heaps are last in the list */
if (psDevMemHeap->sHeapBaseAddr.uiAddr >= uiBase &&
OSFreeMem(pszName);
}
}
-#endif /* (RGX_NUM_OS_SUPPORTED > 1) */
+#endif /* defined(RGX_PREMAP_FW_HEAPS) || (RGX_NUM_DRIVERS_SUPPORTED > 1) */
/******************************************************************************
End of file (rgxinit.c)
IMG_UINT64 *pui64UVBRMNumRegions,
IMG_UINT32 ui32HWPerfCountersDataSize,
RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+ IMG_BOOL bSPUClockGating,
FW_PERF_CONF eFirmwarePerf,
IMG_UINT32 ui32KCCBSizeLog2,
IMG_UINT32 ui32ConfigFlagsExt,
#include "cache_km.h"
#if defined(PDUMP)
-#include <stdarg.h>
+#if defined(__linux__)
+ #include <linux/version.h>
+
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ #include <linux/stdarg.h>
+ #else
+ #include <stdarg.h>
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+#else
+ #include <stdarg.h>
+#endif /* __linux__ */
#endif
void RGXMemCopy(const void *hPrivate,
PVR_ASSERT(hPrivate != NULL);
psParams = (RGX_LAYER_PARAMS*)hPrivate;
psDevInfo = psParams->psDevInfo;
- pvRegsBase = psDevInfo->pvRegsBaseKM;
+ pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ?
+ (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM);
#if defined(PDUMP)
if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
#endif
{
- OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue);
+ OSWriteUncheckedHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue);
}
PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
PVR_ASSERT(hPrivate != NULL);
psParams = (RGX_LAYER_PARAMS*)hPrivate;
psDevInfo = psParams->psDevInfo;
- pvRegsBase = psDevInfo->pvRegsBaseKM;
+ pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ?
+ (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM);
#if defined(PDUMP)
if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
#endif
{
- OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue);
+ OSWriteUncheckedHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue);
}
PDUMPREG64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
PVR_ASSERT(hPrivate != NULL);
psParams = (RGX_LAYER_PARAMS*)hPrivate;
psDevInfo = psParams->psDevInfo;
- pvRegsBase = psDevInfo->pvRegsBaseKM;
+ pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ?
+ (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM);
#if defined(PDUMP)
if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
else
#endif
{
- ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr);
+ ui32RegValue = OSReadUncheckedHWReg32(pvRegsBase, ui32RegAddr);
}
PDUMPREGREAD32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
PVR_ASSERT(hPrivate != NULL);
psParams = (RGX_LAYER_PARAMS*)hPrivate;
psDevInfo = psParams->psDevInfo;
- pvRegsBase = psDevInfo->pvRegsBaseKM;
+ pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ?
+ (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM);
#if defined(PDUMP)
if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
else
#endif
{
- ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr);
+ ui64RegValue = OSReadUncheckedHWReg64(pvRegsBase, ui32RegAddr);
}
PDUMPREGREAD64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME,
PVR_ASSERT(hPrivate != NULL);
psParams = (RGX_LAYER_PARAMS*)hPrivate;
psDevInfo = psParams->psDevInfo;
- pvRegsBase = psDevInfo->pvRegsBaseKM;
+ pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ?
+ (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM);
/* only use the new values for bits we update according to the keep mask */
uiRegValueNew &= ~uiRegKeepMask;
#endif
{
- IMG_UINT64 uiRegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr);
+ IMG_UINT64 uiRegValue = OSReadUncheckedHWReg64(pvRegsBase, ui32RegAddr);
uiRegValue &= uiRegKeepMask;
- OSWriteHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew);
+ OSWriteUncheckedHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew);
}
return PVRSRV_OK;
PVR_ASSERT(hPrivate != NULL);
psParams = (RGX_LAYER_PARAMS*)hPrivate;
psDevInfo = psParams->psDevInfo;
- pvRegsBase = psDevInfo->pvRegsBaseKM;
+ pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ?
+ (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM);
#if defined(PDUMP)
if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
PVR_ASSERT(hPrivate != NULL);
psParams = (RGX_LAYER_PARAMS*)hPrivate;
psDevInfo = psParams->psDevInfo;
- pvRegsBase = psDevInfo->pvRegsBaseKM;
+ pvRegsBase = (ui32RegAddr < RGX_HOST_SECURE_REGBANK_OFFSET) ?
+ (psDevInfo->pvRegsBaseKM) : (psDevInfo->pvSecureRegsBaseKM);
#if defined(PDUMP)
if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
/* Write the cat-base address */
- OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal);
+ OSWriteUncheckedHWReg32(psDevInfo->pvSecureRegsBaseKM, ui32PCReg, ui32PCVal);
/* Pdump catbase address */
MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
{
+ IMG_BOOL bPowerWasDown;
IMG_UINT32 ui32MulticoreInfo;
IMG_UINT32 ui32PrimaryCoreIds;
IMG_UINT32 ui32PrimaryId;
IMG_UINT32 ui32NumCores;
IMG_UINT32 id, i;
+ bPowerWasDown = (psDeviceNode->psDevConfig->pfnGpuDomainPower(psDeviceNode) == PVRSRV_SYS_POWER_STATE_OFF);
+
+ /* Power-up the device as required to read the registers */
+ if (bPowerWasDown)
+ {
+ eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON");
+ }
+
ui32NumCores = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_DOMAIN)
& ~RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_CLRMSK)
>> RGX_CR_MULTICORE_DOMAIN_GPU_COUNT_SHIFT;
ui32PrimaryCoreIds >>= 3;
}
+ /* revert power state to what it was on entry to this function */
+ if (bPowerWasDown)
+ {
+ eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_OFF);
+ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF");
+ }
+
/* Register callback to return info about multicore setup to client bridge */
psDeviceNode->pfnGetMultiCoreInfo = RGXGetMultiCoreInfo;
}
#include "pdumpdesc.h"
#if defined(SUPPORT_VALIDATION)
#include "validation_soc.h"
-#include "rgxtbdefs.h"
+#include "rgxtbdefs_km.h"
#endif
/*
}
/*
- * Add a PDUMP POLL on the KZ signature check status.
+ * Add a PDUMP POLL on the WGP signature check status.
*/
- if (psDevInfo->ui32ValidationFlags & RGX_VAL_KZ_SIG_CHECK_NOERR_EN)
+ if (psDevInfo->ui32ValidationFlags & RGX_VAL_WGP_SIG_CHECK_NOERR_EN)
{
- PDUMPCOMMENT(psDeviceNode, "Verify KZ Signature: match required");
+ PDUMPCOMMENT(psDeviceNode, "Verify WGP Signature: match required");
eError = PDUMPREGPOL(psDeviceNode,
RGX_PDUMPREG_NAME,
RGX_CR_SCRATCH11,
ui32PDumpFlags,
PDUMP_POLL_OPERATOR_EQUAL);
}
- else if (psDevInfo->ui32ValidationFlags & RGX_VAL_KZ_SIG_CHECK_ERR_EN)
+ else if (psDevInfo->ui32ValidationFlags & RGX_VAL_WGP_SIG_CHECK_ERR_EN)
{
- PDUMPCOMMENT(psDeviceNode, "Verify KZ Signature: mismatch required");
+ PDUMPCOMMENT(psDeviceNode, "Verify WGP Signature: mismatch required");
eError = PDUMPREGPOL(psDeviceNode,
RGX_PDUMPREG_NAME,
RGX_CR_SCRATCH11,
PVRSRV_DEVICE_NODE * psDeviceNode,
IMG_UINT32 ui32PDumpFlags)
{
-#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_FBCDC_SIGNATURE_CHECK)
+#if defined(SUPPORT_VALIDATION) && (defined(SUPPORT_FBCDC_SIGNATURE_CHECK) || defined(SUPPORT_TRP))
PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
PVRSRV_ERROR eError;
+#if defined(SUPPORT_FBCDC_SIGNATURE_CHECK)
/*
* Add a PDUMP POLL on the FBC/FBDC signature check status.
*/
OSFreeMem(pszLoopCondition);
}
}
+#endif /* SUPPORT_FBCDC_SIGNATURE_CHECK */
+
+#if defined(SUPPORT_TRP)
+ /*
+ * Add a PDUMP POLL on the TRP signature check status.
+ */
+ if (psDevInfo->ui32ValidationFlags & RGX_VAL_TRP_SIG_CHECK_NOERR_EN)
+ {
+ PDUMPCOMMENT(psDeviceNode, "Verify TRP Signature: match required");
+ eError = PDUMPREGPOL(psDeviceNode,
+ RGX_PDUMPREG_NAME,
+ RGXFWIF_CR_TRP_SIGNATURE_STATUS,
+ RGXFWIF_TRP_STATUS_CHECKSUMS_OK,
+ 0xFFFFFFFF,
+ ui32PDumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+ }
+ else if (psDevInfo->ui32ValidationFlags & RGX_VAL_TRP_SIG_CHECK_ERR_EN)
+ {
+ PDUMPCOMMENT(psDeviceNode, "Verify TRP Signature: mismatch required");
+ eError = PDUMPREGPOL(psDeviceNode,
+ RGX_PDUMPREG_NAME,
+ RGXFWIF_CR_TRP_SIGNATURE_STATUS,
+ RGXFWIF_TRP_STATUS_CHECKSUMS_ERROR,
+ 0xFFFFFFFF,
+ ui32PDumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+ }
+#endif /* SUPPORT_TRP */
+
#else
PVR_UNREFERENCED_PARAMETER(psDeviceNode);
PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo)
{
RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb;
- IMG_UINT64 *paui64StatsCounters;
+ IMG_UINT64 (*paui64DMOSLastWord)[RGXFW_MAX_NUM_OSIDS];
+ IMG_UINT64 (*paaui64DMOSStatsCounters)[RGXFW_MAX_NUM_OSIDS][RGXFWIF_GPU_UTIL_STATE_NUM];
IMG_UINT64 ui64LastPeriod;
IMG_UINT64 ui64LastState;
IMG_UINT64 ui64LastTime;
IMG_UINT64 ui64TimeNow;
+ RGXFWIF_DM eDM;
psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
- paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0];
+ paui64DMOSLastWord = &psUtilFWCb->aaui64DMOSLastWord[0];
+ paaui64DMOSStatsCounters = &psUtilFWCb->aaaui64DMOSStatsCounters[0];
OSLockAcquire(psDevInfo->hGPUUtilLock);
ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDevInfo->psDeviceNode));
/* Update counters to account for the time since the last update */
- ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord);
- ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord);
+ ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64GpuLastWord);
+ ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64GpuLastWord);
ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
- paui64StatsCounters[ui64LastState] += ui64LastPeriod;
+ psUtilFWCb->aui64GpuStatsCounters[ui64LastState] += ui64LastPeriod;
/* Update state and time of the latest update */
- psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+ psUtilFWCb->ui64GpuLastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+
+ for (eDM = 0; eDM < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; eDM++)
+ {
+ IMG_UINT32 ui32DriverID;
+
+ FOREACH_SUPPORTED_DRIVER(ui32DriverID)
+ {
+ ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->aaui64DMOSLastWord[eDM][ui32DriverID]);
+ ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->aaui64DMOSLastWord[eDM][ui32DriverID]);
+ ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+ paaui64DMOSStatsCounters[eDM][ui32DriverID][ui64LastState] += ui64LastPeriod;
+
+ /* Update state and time of the latest update */
+ paui64DMOSLastWord[eDM][ui32DriverID] = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+ }
+ }
OSLockRelease(psDevInfo->hGPUUtilLock);
}
LOOP_UNTIL_TIMEOUT(ui32FwTimeout)
{
- IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32DriverID;
IMG_BOOL bGuestOnline = IMG_FALSE;
- for (ui32OSid = RGXFW_GUEST_OSID_START;
- ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++)
+ for (ui32DriverID = RGXFW_GUEST_DRIVER_ID_START;
+ ui32DriverID < RGX_NUM_DRIVERS_SUPPORTED; ui32DriverID++)
{
RGXFWIF_CONNECTION_FW_STATE eGuestState = (RGXFWIF_CONNECTION_FW_STATE)
- psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32OSid].bfOsState;
+ psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32DriverID].bfOsState;
if ((eGuestState == RGXFW_CONNECTION_FW_ACTIVE) ||
(eGuestState == RGXFW_CONNECTION_FW_OFFLOADING))
{
bGuestOnline = IMG_TRUE;
- PVR_DPF((PVR_DBG_WARNING, "%s: Guest OS %u still online.", __func__, ui32OSid));
+ PVR_DPF((PVR_DBG_WARNING, "%s: Guest OS %u still online.", __func__, ui32DriverID));
}
}
#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
/* Guest drivers expect the firmware to have set its end of the
- * connection to Ready state by now. Poll indefinitely otherwise. */
+ * connection to Ready state by now. */
if (!KM_FW_CONNECTION_IS(READY, psDevInfo))
{
PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Ready state. Waiting for Firmware ...", __func__));
}
- while (!KM_FW_CONNECTION_IS(READY, psDevInfo))
+
+ LOOP_UNTIL_TIMEOUT(RGX_VZ_CONNECTION_TIMEOUT_US)
{
- OSSleepms(10);
+ if (KM_FW_CONNECTION_IS(READY, psDevInfo))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware Connection is Ready. Initialisation proceeding.", __func__));
+ break;
+ }
+ else
+ {
+ OSSleepms(10);
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (!KM_FW_CONNECTION_IS(READY, psDevInfo))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Timed out waiting for the Firmware to enter Ready state.", __func__));
+ return PVRSRV_ERROR_TIMEOUT;
}
- PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Ready. Initialisation proceeding.", __func__));
#endif /* RGX_VZ_STATIC_CARVEOUT_FW_HEAPS */
/* Guests can only access the register holding the connection states,
* after the GPU is confirmed to be powered up */
KM_SET_OS_CONNECTION(READY, psDevInfo);
- OSWriteDeviceMem32(pbUpdatedFlag, IMG_FALSE);
+ OSWriteDeviceMem32WithWMB(pbUpdatedFlag, IMG_FALSE);
/* Kick an initial dummy command to make the firmware initialise all
* its internal guest OS data structures and compatibility information.
{
KM_SET_OS_CONNECTION(READY, psDevInfo);
+#if defined(SUPPORT_AUTOVZ)
/* Disable power callbacks that should not be run on virtualised drivers after the GPU
* is fully initialised: system layer pre/post functions and driver idle requests.
* The original device RGX Pre/Post functions are called from this Vz wrapper. */
&RGXVzPrePowerState, &RGXVzPostPowerState,
NULL, NULL, NULL, NULL);
-#if defined(SUPPORT_AUTOVZ)
/* During first-time boot the flag is set here, while subsequent reboots will already
* have set it earlier in RGXInit. Set to true from this point onwards in any case. */
psDeviceNode->bAutoVzFwIsUp = IMG_TRUE;
}
/* Wait for the firmware to accept and enable the connection with this OS by setting its state to Active */
- while (!KM_FW_CONNECTION_IS(ACTIVE, psDevInfo))
+ LOOP_UNTIL_TIMEOUT(RGX_VZ_CONNECTION_TIMEOUT_US)
{
- PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Active state. Waiting for Firmware ...", __func__));
- OSSleepms(100);
+ if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Active. Initialisation proceeding.", __func__));
+ break;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Active state. Waiting for Firmware ...", __func__));
+ OSSleepms(10);
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (!KM_FW_CONNECTION_IS(ACTIVE, psDevInfo))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Timed out waiting for the Firmware to enter Active state.", __func__));
+ return PVRSRV_ERROR_TIMEOUT;
}
- PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Active. Initialisation proceeding.", __func__));
/* poll on the Firmware supplying the compatibility data */
LOOP_UNTIL_TIMEOUT(ui32FwTimeout)
return eError;
}
-#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE)
+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS)
/*
* To validate the MTS unit we do the following:
* - Immediately after firmware loading for each OSID
ui32OsRegBanksMapped = MIN(ui32OsRegBanksMapped, GPUVIRT_VALIDATION_NUM_OS);
- if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OS)
+ if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OSIDS)
{
PVR_DPF((PVR_DBG_WARNING, "The register bank mapped into kernel VA does not cover all OS' registers:"));
- PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OS, ui32OsRegBanksMapped));
+ PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OSIDS, ui32OsRegBanksMapped));
PVR_DPF((PVR_DBG_WARNING, "Only first %d MTS registers will be tested", ui32OsRegBanksMapped));
}
ui32OSid,
ui32ScheduleRegister));
OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32ScheduleRegister, ui32KickType);
- OSMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + ui32ScheduleRegister);
#if defined(PDUMP)
PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "VZ sideband test, kicking MTS register %u", ui32OSid);
PDUMP_FLAGS_CONTINUOUS);
#endif
+#if !defined(NO_HARDWARE)
+ OSMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + ui32ScheduleRegister);
+
/* Wait test enable bit to be unset */
if (PVRSRVPollForValueKM(psDeviceNode,
- (IMG_UINT32 *)&psFwSysInit->ui32OSKickTest,
+ (volatile IMG_UINT32 __iomem *)&psFwSysInit->ui32OSKickTest,
0,
RGXFWIF_KICK_TEST_ENABLED_BIT,
POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK)
}
PVR_DPF((PVR_DBG_MESSAGE, " PASS"));
+#endif
}
PVR_LOG(("MTS passed sideband tests"));
return PVRSRV_OK;
}
-#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) */
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION_MTS) */
#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP)
#define SCRATCH_VALUE (0x12345678U)
static void RGXRiscvDebugModuleTest(PVRSRV_RGXDEV_INFO *psDevInfo)
{
void *pvAppHintState = NULL;
- IMG_UINT32 ui32AppHintDefault = 0;
+ const IMG_BOOL bDefaultFalse = IMG_FALSE;
IMG_BOOL bRunRiscvDmiTest;
IMG_UINT32 *pui32FWCode = NULL;
OSCreateKMAppHintState(&pvAppHintState);
OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, RiscvDmiTest,
- &ui32AppHintDefault, &bRunRiscvDmiTest);
+ &bDefaultFalse, &bRunRiscvDmiTest);
OSFreeKMAppHintState(pvAppHintState);
if (bRunRiscvDmiTest == IMG_FALSE)
#endif /* defined(PDUMP) */
-#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE)
+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS)
eError = RGXVirtualisationPowerupSidebandTest(psDeviceNode, psDevInfo->psRGXFWIfSysInit, psDevInfo);
if (eError != PVRSRV_OK)
{
#endif
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
- SetFirmwareStartTime(psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp);
+ PVRSRVSetFirmwareStartTime(psDeviceNode->psPowerDev,
+ psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp);
#endif
HTBSyncPartitionMarker(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal);
sPowUnitsStateMaskChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
sPowUnitsStateMaskChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUM_UNITS_CHANGE;
sPowUnitsStateMaskChange.uCmdData.sPowData.uPowerReqData.ui32PowUnitsStateMask = ui32PowUnitsStateMask;
+ sPowUnitsStateMaskChange.uCmdData.sPowData.uPowerReqData.ui32RACStateMask = 0;
+
+ if (RGX_GET_FEATURE_VALUE(psDevInfo, POWER_ISLAND_VERSION) >= 3)
+ {
+ sPowUnitsStateMaskChange.uCmdData.sPowData.uPowerReqData.ui32RACStateMask =
+ (1U << psDevInfo->sDevFeatureCfg.ui32MAXRACCount) - 1;
+ }
PDUMPCOMMENT(psDeviceNode,
"Scheduling command to change power units state to 0x%X",
if (psFwSysData->ePowState == RGXFWIF_POW_IDLE)
{
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
- SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFwSysData->ui64StartIdleTime);
+ PVRSRVSetFirmwareHandshakeIdleTime(psDeviceNode->psPowerDev,
+ RGXReadHWTimerReg(psDevInfo)-psFwSysData->ui64StartIdleTime);
#endif
PDUMPPOWCMDSTART(psDeviceNode);
#include "rgxccb.h"
#include "rgxhwperf.h"
#include "ospvr_gputrace.h"
-#include "htbuffer.h"
#include "sync_server.h"
#include "sync_internal.h"
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
#include "rgxworkest.h"
+#include "rgxworkest_ray.h"
#endif
/* Enable this to dump the compiled list of UFOs prior to kick call */
PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_HANDLE hMemCtxPrivData,
IMG_UINT32 ui32ContextFlags,
IMG_UINT32 ui32StaticRayContextStateSize,
RGX_RDM_CCB_SIZE_LOG2,
RGX_RDM_CCB_MAX_SIZE_LOG2,
ui32ContextFlags,
- ui32Priority,
+ i32Priority,
ui32MaxDeadlineMS,
ui64RobustnessAddress,
&sInfo,
goto fail_acquire_cpu_mapping;
}
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ WorkEstInitRay(psDevInfo, &psRayContext->sWorkEstData);
+ }
+#endif
+
OSDeviceMemCopy(&psFWRayContext->sStaticRayContextState, pStaticRayContextState, ui32StaticRayContextStateSize);
DevmemPDumpLoadMem(psRayContext->psFWRayContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS);
DevmemReleaseCpuVirtAddr(psRayContext->psFWRayContextMemDesc);
PVRSRV_ERROR eError = PVRSRV_OK;
PVRSRV_RGXDEV_INFO *psDevInfo = psRayContext->psDeviceNode->pvDevice;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ RGXFWIF_FWRAYCONTEXT *psFWRayContext;
+ IMG_UINT32 ui32WorkEstCCBSubmitted;
+
+ eError = DevmemAcquireCpuVirtAddr(psRayContext->psFWRayContextMemDesc,
+ (void **)&psFWRayContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to map firmware ray context (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ return eError;
+ }
+
+ ui32WorkEstCCBSubmitted = psFWRayContext->ui32WorkEstCCBSubmitted;
+
+ DevmemReleaseCpuVirtAddr(psRayContext->psFWRayContextMemDesc);
+
+ /* Check if all of the workload estimation CCB commands for this workload are read */
+ if (ui32WorkEstCCBSubmitted != psRayContext->sWorkEstData.ui32WorkEstCCBReceived)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
+ __func__, ui32WorkEstCCBSubmitted,
+ psRayContext->sWorkEstData.ui32WorkEstCCBReceived));
+
+ return PVRSRV_ERROR_RETRY;
+ }
+ }
+#endif
/* Check if the FW has finished with this resource ... */
eError = RGXFWRequestCommonContextCleanUp(psRayContext->psDeviceNode,
DevmemFwUnmapAndFree(psDevInfo, psRayContext->psContextStateMemDesc);
psRayContext->psServerCommonContext = NULL;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ WorkEstDeInitRay(psDevInfo, &psRayContext->sWorkEstData);
+ }
+#endif
DevmemFwUnmapAndFree(psDevInfo, psRayContext->psFWRayContextMemDesc);
IMG_UINT32 ui32CmdSize,
IMG_PBYTE pui8DMCmd,
IMG_UINT32 ui32PDumpFlags,
- IMG_UINT32 ui32ExtJobRef)
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32AccStructSizeInBytes,
+ IMG_UINT32 ui32DispatchSize,
+ IMG_UINT64 ui64DeadlineInus)
{
RGXFWIF_KCCB_CMD sRayKCCBCmd;
RGX_CLIENT_CCB *psClientCCB;
IMG_UINT32 ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataRay = {0};
+ IMG_UINT32 ui32RDMWorkloadDataRO = 0;
+ IMG_UINT32 ui32RDMCmdHeaderOffset = 0;
+ IMG_UINT32 ui32RDMCmdOffsetWrapCheck = 0;
+ IMG_UINT32 ui32RDMCmdOffset = 0;
+ RGX_WORKLOAD sWorkloadCharacteristics = {0};
+#endif
+
IMG_BOOL bCCBStateOpen = IMG_FALSE;
IMG_UINT64 ui64FBSCEntryMask;
IMG_UINT32 ui32IntClientFenceCount = 0;
}
}
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ sWorkloadCharacteristics.sRay.ui32AccStructSize = ui32AccStructSizeInBytes;
+ sWorkloadCharacteristics.sRay.ui32DispatchSize = ui32DispatchSize;
+
+ /* Prepare workload estimation */
+ WorkEstPrepare(psRayContext->psDeviceNode->pvDevice,
+ &psRayContext->sWorkEstData,
+ &psRayContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM,
+ RGXFWIF_CCB_CMD_TYPE_RAY,
+ &sWorkloadCharacteristics,
+ ui64DeadlineInus,
+ &sWorkloadKickDataRay);
+
+ if (sWorkloadKickDataRay.ui32CyclesPrediction != 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch size = %u, Acc struct size = %u, prediction = %u",
+ __func__,
+ sWorkloadCharacteristics.sRay.ui32DispatchSize,
+ sWorkloadCharacteristics.sRay.ui32AccStructSize,
+ sWorkloadKickDataRay.ui32CyclesPrediction));
+ }
+ }
+#endif
+
RGXCmdHelperInitCmdCCB(psDevInfo,
psClientCCB,
ui64FBSCEntryMask,
ui32IntJobRef,
ui32PDumpFlags,
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- NULL,
+ &sWorkloadKickDataRay,
#else
NULL,
#endif
if (eError == PVRSRV_OK)
{
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ ui32RDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB);
+ }
+#endif
/*
All the required resources are ready at this point, we can't fail so
take the required server sync operations and commit all the resources
RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "RDM", FWCommonContextGetFWAddress(psRayContext->psServerCommonContext).ui32Addr);
}
- /* Construct the kernel compute CCB command. */
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* The following is used to determine the offset of the command header containing
+ * the workload estimation data so that can be accessed when the KCCB is read */
+ ui32RDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData);
+
+ ui32RDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRayContext->psServerCommonContext));
+
+ /* This checks if the command would wrap around at the end of the CCB and
+ * therefore would start at an offset of 0 rather than the current command
+ * offset */
+ if (ui32RDMCmdOffset < ui32RDMCmdOffsetWrapCheck)
+ {
+ ui32RDMWorkloadDataRO = ui32RDMCmdOffset;
+ }
+ else
+ {
+ ui32RDMWorkloadDataRO = 0;
+ }
+ }
+#endif
+
+ /* Construct the kernel ray CCB command. */
sRayKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
sRayKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRayContext->psServerCommonContext);
sRayKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
sRayKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
sRayKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
- sRayKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+ /* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Store the offset to the CCCB command header so that it can be referenced
+ * when the KCCB command reaches the FW */
+ sRayKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32RDMWorkloadDataRO + ui32RDMCmdHeaderOffset;
+ }
+#endif
ui32FWCtx = FWCommonContextGetFWAddress(psRayContext->psServerCommonContext).ui32Addr;
RGXSRV_HWPERF_ENQ(psRayContext,
ui32FWCtx,
ui32ExtJobRef,
ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_RS,
+ RGX_HWPERF_KICK_TYPE2_RS,
iCheckFence,
iUpdateFence,
iUpdateTimeline,
}
else
{
- PVRGpuTraceEnqueueEvent(psRayContext->psDeviceNode->pvDevice,
+ PVRGpuTraceEnqueueEvent(psRayContext->psDeviceNode,
ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_RS);
+ RGX_HWPERF_KICK_TYPE2_RS);
}
/*
* Now check eError (which may have returned an error from our earlier call
******************************************************************************/
PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_HANDLE hMemCtxPrivData,
IMG_UINT32 ui32ContextFlags,
IMG_UINT32 ui32StaticRayContextStateSize,
IMG_UINT32 ui32CmdSize,
IMG_PBYTE pui8DMCmd,
IMG_UINT32 ui32PDumpFlags,
- IMG_UINT32 ui32ExtJobRef);
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32AccStructSizeInBytes,
+ IMG_UINT32 ui32DispatchSize,
+ IMG_UINT64 ui64DeadlineInus);
#endif /* RGXRAY_H_ */
/*
* Services AppHints initialisation
*/
-#define X(a, b, c, d, e) SrvInitParamInit ## b(a, d, e)
+#define X(a, b, c, d, e, f) SrvInitParamInit ## b(a, d, e)
APPHINT_LIST_ALL
#undef X
#endif /* defined(__linux__) */
FW_PERF_CONF eFirmwarePerf;
RGX_ACTIVEPM_CONF eRGXActivePMConf;
RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf;
+ IMG_BOOL bSPUClockGating;
IMG_BOOL bEnableTrustedDeviceAceConfig;
IMG_UINT32 ui32FWContextSwitchCrossDM;
SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, EnableFWContextSwitch, psHints->ui32EnableFWContextSwitch);
SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, EnableRDPowerIsland, ui32ParamTemp);
psHints->eRGXRDPowerIslandConf = ui32ParamTemp;
+ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableSPUClockGating, psHints->bSPUClockGating);
SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FirmwarePerf, ui32ParamTemp);
psHints->eFirmwarePerf = ui32ParamTemp;
SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile);
SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskVDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_VDM]);
SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskCDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_CDM]);
SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskTDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_TDM]);
+#if defined(SUPPORT_RAY_TRACING)
+ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskRDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_RDM]);
+#endif
#endif
/*
#if defined(SUPPORT_VALIDATION)
ui32FWConfigFlags |= psHints->bEnablePowUnitsStateMaskChange ? RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN : 0;
ui32FWConfigFlags |= psHints->bValidateSOCUSCTimer ? RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER : 0;
+ ui32FWConfigFlags |= psHints->bSPUClockGating ? RGXFWIF_INICFG_SPU_CLOCK_GATE : 0;
if ((ui32FWConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) &&
- ((psHints->eRGXActivePMConf != 0) || (psHints->eRGXRDPowerIslandConf != 0)))
+ (psHints->eRGXActivePMConf != 0) )
{
psHints->eRGXActivePMConf = 0;
- psHints->eRGXRDPowerIslandConf = 0;
- PVR_DPF((PVR_DBG_WARNING, "SoC/USC Timer test needs to run with both EnableAPM and EnableRDPowerIsland disabled.\n"
- "Overriding current value for both with new value 0."));
+ PVR_DPF((PVR_DBG_WARNING, "SoC/USC Timer test needs to run with EnableAPM disabled.\n"
+ "Overriding current value with new value 0."));
}
#endif
ui32FWConfigFlags |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0;
{
void *pvParamState = NULL;
IMG_UINT32 ui32LogType;
- IMG_BOOL bAnyLogGroupConfigured;
IMG_UINT32 ui32BufferSize;
IMG_UINT32 ui32OpMode;
return;
SrvInitParamGetUINT32BitField(INITPARAM_NO_DEVICE, pvParamState, EnableHTBLogGroup, ui32LogType);
- bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE;
SrvInitParamGetUINT32List(INITPARAM_NO_DEVICE, pvParamState, HTBOperationMode, ui32OpMode);
SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HTBufferSizeInKB, ui32BufferSize);
PHYS_HEAP *psHeap = psDeviceNode->apsPhysHeap[ePhysHeap];
PHYS_HEAP_USAGE_FLAGS ui32HeapFlags = PhysHeapGetFlags(psHeap);
PHYS_HEAP_USAGE_FLAGS ui32InvalidFlags = ~(PHYS_HEAP_USAGE_FW_PRIV_DATA | PHYS_HEAP_USAGE_FW_CODE
- | PHYS_HEAP_USAGE_GPU_SECURE);
+ | PHYS_HEAP_USAGE_GPU_SECURE | PHYS_HEAP_USAGE_FW_PRIVATE);
PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32RequiredFlags) != 0,
PVRSRV_ERROR_NOT_SUPPORTED,
psDeviceNode->psDevConfig->pszName);
PDUMPCOMMENT(psDeviceNode, "Device ID: %u (%d)",
psDeviceNode->sDevId.ui32InternalID,
- psDeviceNode->sDevId.i32OsDeviceID);
+ psDeviceNode->sDevId.i32KernelDeviceID);
if (psDeviceNode->psDevConfig->pszVersion)
{
RGXInitMultiCoreInfo(psDeviceNode);
-#if defined(PDUMP)
- eError = DevmemIntAllocDefBackingPage(psDeviceNode,
- &psDeviceNode->sDummyPage,
- PVR_DUMMY_PAGE_INIT_VALUE,
- DUMMY_PAGE,
- IMG_TRUE);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate dummy page.", __func__));
- goto cleanup;
- }
- eError = DevmemIntAllocDefBackingPage(psDeviceNode,
- &psDeviceNode->sDevZeroPage,
- PVR_ZERO_PAGE_INIT_VALUE,
- DEV_ZERO_PAGE,
- IMG_TRUE);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate Zero page.", __func__));
- goto cleanup;
- }
-#endif
-
sLayerParams.psDevInfo = psDevInfo;
#if defined(SUPPORT_TRUSTED_DEVICE)
if ((sApphints.bEnableTrustedDeviceAceConfig) &&
(RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACE)))
{
- SetTrustedDeviceAceEnabled();
+ SetTrustedDeviceAceEnabled(psDeviceNode->psDevConfig->hSysData);
}
#endif
#endif
#endif
ui32HWPerfBlkSize,
sApphints.eRGXRDPowerIslandConf,
+ sApphints.bSPUClockGating,
sApphints.eFirmwarePerf,
sApphints.ui32KCCBSizeLog2,
ui32FWConfigFlagsExt,
{
IMG_DEV_PHYADDR sPCAddr;
IMG_UINT32 uiPCAddr;
+ IMG_UINT32 ui32CBaseMapCtxReg;
+ RGX_LAYER_PARAMS *psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psParams->psDevInfo;
/*
* Acquire the address of the Kernel Page Catalogue.
*/
RGXAcquireKernelMMUPC(hPrivate, &sPCAddr);
- uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT)
- << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT)
- & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK);
- /*
- * Write the kernel catalogue base.
- */
- RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue");
+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)
+ {
+ uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT)
+ << RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT)
+ & ~RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_CLRMSK);
+
+ /*
+ * Write the kernel catalogue base.
+ */
+ RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue");
+ ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT__HOST_SECURITY_GT1_AND_MH_PASID_WIDTH_LT6_AND_MMU_GE4;
- /* Set the mapping context */
- RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWPRIV);
- (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */
+ /* Set the mapping context */
+ RGXWriteReg32(hPrivate, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWPRIV);
+ (void)RGXReadReg32(hPrivate, ui32CBaseMapCtxReg); /* Fence write */
- /* Write the cat-base address */
- RGXWriteKernelMMUPC32(hPrivate,
- RGX_CR_MMU_CBASE_MAPPING,
- RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
- RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
- uiPCAddr);
+ /* Write the cat-base address */
+ RGXWriteKernelMMUPC32(hPrivate,
+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1,
+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT,
+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT,
+ uiPCAddr);
#if (MMU_CONTEXT_MAPPING_FWIF != MMU_CONTEXT_MAPPING_FWPRIV)
- /* Set-up different MMU ID mapping to the same PC used above */
- RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWIF);
- (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */
-
- RGXWriteKernelMMUPC32(hPrivate,
- RGX_CR_MMU_CBASE_MAPPING,
- RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
- RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
- uiPCAddr);
+ /* Set-up different MMU ID mapping to the same PC used above */
+ RGXWriteReg32(hPrivate, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWIF);
+ (void)RGXReadReg32(hPrivate, ui32CBaseMapCtxReg); /* Fence write */
+
+ RGXWriteKernelMMUPC32(hPrivate,
+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1,
+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_ALIGNSHIFT,
+ RGX_CR_MMU_CBASE_MAPPING__HOST_SECURITY_GT1__BASE_ADDR_SHIFT,
+ uiPCAddr);
#endif
+ }
+ else
+ {
+ uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT)
+ << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT)
+ & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK);
+
+ /*
+ * Write the kernel catalogue base.
+ */
+ RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue");
+
+ ui32CBaseMapCtxReg = RGX_CR_MMU_CBASE_MAPPING_CONTEXT;
+
+ /* Set the mapping context */
+ RGXWriteReg32(hPrivate, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWPRIV);
+ (void)RGXReadReg32(hPrivate, ui32CBaseMapCtxReg); /* Fence write */
+
+ /* Write the cat-base address */
+ RGXWriteKernelMMUPC32(hPrivate,
+ RGX_CR_MMU_CBASE_MAPPING,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+ uiPCAddr);
+
+#if (MMU_CONTEXT_MAPPING_FWIF != MMU_CONTEXT_MAPPING_FWPRIV)
+ /* Set-up different MMU ID mapping to the same PC used above */
+ RGXWriteReg32(hPrivate, ui32CBaseMapCtxReg, MMU_CONTEXT_MAPPING_FWIF);
+ (void)RGXReadReg32(hPrivate, ui32CBaseMapCtxReg); /* Fence write */
+
+ RGXWriteKernelMMUPC32(hPrivate,
+ RGX_CR_MMU_CBASE_MAPPING,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+ uiPCAddr);
+#endif
+ }
}
static void RGXResetSequence(const void *hPrivate, const IMG_CHAR *pcRGXFW_PROCESSOR)
{
/* Set RGX in soft-reset */
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR))
+ {
+ RGXCommentLog(hPrivate, "RGXStart: soft reset cpu core");
+ RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 0);
+ }
+
RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1");
RGXSPUSoftResetAssert(hPrivate);
{
return eError;
}
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT))
+ {
+ /* Set OR reduce for ECC faults to ensure faults are not missed during early boot stages */
+ RGXWriteReg32(hPrivate, RGX_CR_MULTICORE_EVENT_REDUCE, RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_FW_EN | RGX_CR_MULTICORE_EVENT_REDUCE_FAULT_GPU_EN);
+ }
+
+ /* Route fault events to the host */
+ RGXWriteReg32(hPrivate, RGX_CR_EVENT_ENABLE, RGX_CR_EVENT_ENABLE_FAULT_FW_EN);
}
if (RGX_DEVICE_HAS_BRN(hPrivate, BRN_66927))
* threads to avoid a race condition).
* This is only really needed for PDumps but we do it anyway driver-live.
*/
- RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES, 0x0);
- (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__META_REGISTER_UNPACKED_ACCESSES); /* Fence write */
-
- eError = RGXPollReg32(hPrivate,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
- | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN,
- RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__READY_EN
- | RGX_CR_META_SP_MSLVCTRL1__META_REGISTER_UNPACKED_ACCESSES__GBLPORT_IDLE_EN);
+ if (RGX_GET_FEATURE_VALUE(psDevInfo, HOST_SECURITY_VERSION) > 1)
+ {
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED, 0x0);
+ (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_GT1_AND_METAREG_UNPACKED); /* Fence write */
+
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_GT1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN);
+ }
+ else
+ {
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED, 0x0);
+ (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS__HOST_SECURITY_V1_AND_METAREG_UNPACKED); /* Fence write */
+
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__READY_EN
+ | RGX_CR_META_SP_MSLVCTRL1__HOST_SECURITY_V1_AND_METAREG_UNPACKED__GBLPORT_IDLE_EN);
+ }
}
else
{
#include "rgxhwperf.h"
#include "ospvr_gputrace.h"
#include "rgxsyncutils.h"
-#include "htbuffer.h"
+#include "htbserver.h"
#include "rgxdefs_km.h"
#include "rgx_fwif_km.h"
typedef struct {
DEVMEM_MEMDESC *psContextStateMemDesc;
RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
- IMG_UINT32 ui32Priority;
+ IMG_INT32 i32Priority;
} RGX_SERVER_RC_TA_DATA;
typedef struct {
DEVMEM_MEMDESC *psContextStateMemDesc;
RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
- IMG_UINT32 ui32Priority;
+ IMG_INT32 i32Priority;
} RGX_SERVER_RC_3D_DATA;
struct _RGX_SERVER_RENDER_CONTEXT_ {
{
uiSize -= RGX_BIF_PM_PHYSICAL_PAGE_SIZE;
}
+
eError = PhysmemNewRamBackedPMR(psFreeList->psConnection,
psFreeList->psDevInfo->psDeviceNode,
uiSize,
- uiSize,
1,
1,
&ui32MappingTable,
RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
- PVRSRV_MEMALLOCFLAG_GPU_READABLE,
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(GPU_PRIVATE),
sizeof(szAllocName),
szAllocName,
psFreeList->ownerPid,
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
/* Update Stats */
- PVRSRVStatsUpdateFreelistStats(0,
+ PVRSRVStatsUpdateFreelistStats(psDevInfo->psDeviceNode,
+ 0,
1, /* Add 1 to the appropriate counter (Requests by FW) */
psFreeList->ui32InitFLPages,
psFreeList->ui32NumHighPages,
*/
PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet)
{
- PVRSRV_RGXDEV_INFO *psDevInfo;
PVRSRV_DEVICE_NODE *psDevNode;
PVRSRV_ERROR eError;
PRGXFWIF_HWRTDATA psHWRTData;
PVR_ASSERT(psKMHWRTDataSet);
psDevNode = psKMHWRTDataSet->psDeviceNode;
- psDevInfo = psDevNode->pvDevice;
eError = RGXSetFirmwareAddress(&psHWRTData,
psKMHWRTDataSet->psHWRTDataFwMemDesc, 0,
}
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
/* Update Stats */
- PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+ PVRSRVStatsUpdateFreelistStats(psDeviceNode,
+ 1, /* Add 1 to the appropriate counter (Requests by App)*/
0,
psFreeList->ui32InitFLPages,
psFreeList->ui32NumHighPages,
psZSBuffer->ui32NumReqByApp++;
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
- PVRSRVStatsUpdateZSBufferStats(1, 0, psZSBuffer->owner);
+ PVRSRVStatsUpdateZSBufferStats(psZSBuffer->psDevInfo->psDeviceNode,
+ 1, 0, psZSBuffer->owner);
#endif
/* Do the backing */
psZSBuffer->ui32NumReqByFW++;
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
- PVRSRVStatsUpdateZSBufferStats(0, 1, psZSBuffer->owner);
+ PVRSRVStatsUpdateZSBufferStats(psDevInfo->psDeviceNode,
+ 0, 1, psZSBuffer->owner);
#endif
}
DEVMEM_MEMDESC *psAllocatedMemDesc,
IMG_UINT32 ui32AllocatedOffset,
DEVMEM_MEMDESC *psFWMemContextMemDesc,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32MaxDeadlineMS,
IMG_UINT64 ui64RobustnessAddress,
RGX_COMMON_CONTEXT_INFO *psInfo,
ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2,
ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2,
ui32ContextFlags,
- ui32Priority,
+ i32Priority,
ui32MaxDeadlineMS,
ui64RobustnessAddress,
psInfo,
PDUMP_FLAGS_CONTINUOUS);
#endif
- psTAData->ui32Priority = ui32Priority;
+ psTAData->i32Priority = i32Priority;
return PVRSRV_OK;
fail_tacommoncontext:
DEVMEM_MEMDESC *psAllocatedMemDesc,
IMG_UINT32 ui32AllocatedOffset,
DEVMEM_MEMDESC *psFWMemContextMemDesc,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32MaxDeadlineMS,
IMG_UINT64 ui64RobustnessAddress,
RGX_COMMON_CONTEXT_INFO *psInfo,
ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2,
ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2,
ui32ContextFlags,
- ui32Priority,
+ i32Priority,
ui32MaxDeadlineMS,
ui64RobustnessAddress,
psInfo,
sizeof(RGXFWIF_3DCTX_STATE),
PDUMP_FLAGS_CONTINUOUS);
- ps3DData->ui32Priority = ui32Priority;
+ ps3DData->i32Priority = i32Priority;
return PVRSRV_OK;
fail_3dcommoncontext:
*/
PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32FrameworkRegisterSize,
IMG_PBYTE pabyFrameworkRegisters,
IMG_HANDLE hMemCtxPrivData,
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData);
+ }
#endif
if (ui32FrameworkRegisterSize)
psRenderContext->psFWRenderContextMemDesc,
offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),
psFWMemContextMemDesc,
- ui32Priority,
+ i32Priority,
ui32Max3DDeadlineMS,
ui64RobustnessAddress,
&sInfo,
psRenderContext->psFWRenderContextMemDesc,
offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext),
psFWMemContextMemDesc,
- ui32Priority,
+ i32Priority,
ui32MaxTADeadlineMS,
ui64RobustnessAddress,
&sInfo,
}
OSDeviceMemCopy(&psFWRenderContext->sStaticRenderContextState, pStaticRenderContextState, ui32StaticRenderContextStateSize);
+#if defined(SUPPORT_TRP)
+ psFWRenderContext->eTRPGeomCoreAffinity = RGXFWIF_DM_MAX;
+#endif
DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc, 0, sizeof(RGXFWIF_FWRENDERCONTEXT), PDUMP_FLAGS_CONTINUOUS);
DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
{
PVRSRV_ERROR eError;
PVRSRV_RGXDEV_INFO *psDevInfo = psRenderContext->psDeviceNode->pvDevice;
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- RGXFWIF_FWRENDERCONTEXT *psFWRenderContext;
- IMG_UINT32 ui32WorkEstCCBSubmitted;
-#endif
/* remove node from list before calling destroy - as destroy, if successful
* will invalidate the node
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
- (void **)&psFWRenderContext);
- if (eError != PVRSRV_OK)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to map firmware render context (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- goto e0;
- }
+ RGXFWIF_FWRENDERCONTEXT *psFWRenderContext;
+ IMG_UINT32 ui32WorkEstCCBSubmitted;
- ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted;
+ eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+ (void **)&psFWRenderContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to map firmware render context (%s)",
+ __func__,
+ PVRSRVGetErrorString(eError)));
+ goto e0;
+ }
- DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+ ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted;
- /* Check if all of the workload estimation CCB commands for this workload are read */
- if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)
- {
+ DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
- PVR_DPF((PVR_DBG_WARNING,
- "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
- __func__, ui32WorkEstCCBSubmitted,
- psRenderContext->sWorkEstData.ui32WorkEstCCBReceived));
+ /* Check if all of the workload estimation CCB commands for this workload are read */
+ if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)
+ {
+
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
+ __func__, ui32WorkEstCCBSubmitted,
+ psRenderContext->sWorkEstData.ui32WorkEstCCBReceived));
- eError = PVRSRV_ERROR_RETRY;
- goto e0;
+ eError = PVRSRV_ERROR_RETRY;
+ goto e0;
+ }
}
#endif
SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData);
+ }
#endif
OSLockDestroy(psRenderContext->hLock);
IMG_BOOL bUseCombined3DAnd3DPR = bKickPR && bKick3D && !pui83DPRDMCmd;
- IMG_UINT32 ui32TACmdSizeTmp = 0, ui323DCmdSizeTmp = 0;
-
- IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE;
-
RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData;
RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData;
IMG_BOOL bUseSingleFWCommand = bKickTA && (bKickPR || bKick3D);
+ IMG_UINT32 ui32TACmdSizeTmp = 0, ui323DCmdSizeTmp = 0;
+
+ IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE;
+
PVRSRV_FENCE iUpdateTAFence = PVRSRV_NO_FENCE;
PVRSRV_FENCE iUpdate3DFence = PVRSRV_NO_FENCE;
if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 ||
iCheck3DFence >= 0 || iUpdate3DTimeline >= 0)
{
- PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL;
- PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL;
-
CHKPT_DBG((PVR_DBG_ERROR,
"%s: [TA] iCheckFence = %d, iUpdateTimeline = %d",
__func__, iCheckTAFence, iUpdateTATimeline));
(void*)psTAFenceTimelineUpdateSync,
ui32TAFenceTimelineUpdateValue));
- /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */
- pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint);
- CHKPT_DBG((PVR_DBG_ERROR,
- "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x",
- __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr));
+#if defined(TA3D_CHECKPOINT_DEBUG)
+ {
+ PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL;
+
+ /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */
+ pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint);
+ CHKPT_DBG((PVR_DBG_ERROR,
+ "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x",
+ __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr));
+ }
+#endif
}
/* Append the sync prim update for the TA timeline (if required) */
(void*)ps3DFenceTimelineUpdateSync,
ui323DFenceTimelineUpdateValue));
- /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */
- pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint);
- CHKPT_DBG((PVR_DBG_ERROR,
- "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x",
- __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr));
+#if defined(TA3D_CHECKPOINT_DEBUG)
+ {
+ PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL;
+
+ /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */
+ pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint);
+ CHKPT_DBG((PVR_DBG_ERROR,
+ "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x",
+ __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr));
+ }
+#endif
}
/* Append the sync prim update for the 3D timeline (if required) */
}
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- if (bKickTA || bKick3D || bAbort)
+ if ((!PVRSRV_VZ_MODE_IS(GUEST)) && (bKickTA || bKick3D || bAbort))
{
sWorkloadCharacteristics.sTA3D.ui32RenderTargetSize = ui32RenderTargetSize;
sWorkloadCharacteristics.sTA3D.ui32NumberOfDrawCalls = ui32NumberOfDrawCalls;
RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData;
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Prepare workload estimation */
- WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
- &psRenderContext->sWorkEstData,
- &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA,
- RGXFWIF_CCB_CMD_TYPE_GEOM,
- &sWorkloadCharacteristics,
- ui64DeadlineInus,
- &sWorkloadKickDataTA);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Prepare workload estimation */
+ WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+ &psRenderContext->sWorkEstData,
+ &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA,
+ RGXFWIF_CCB_CMD_TYPE_GEOM,
+ &sWorkloadCharacteristics,
+ ui64DeadlineInus,
+ &sWorkloadKickDataTA);
+ }
#endif
/* Init the TA command helper */
const RGXFWIF_CCB_CMD_TYPE e3DCmdType = bAbort ? RGXFWIF_CCB_CMD_TYPE_ABORT : RGXFWIF_CCB_CMD_TYPE_3D;
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Prepare workload estimation */
- WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
- &psRenderContext->sWorkEstData,
- &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D,
- e3DCmdType,
- &sWorkloadCharacteristics,
- ui64DeadlineInus,
- &sWorkloadKickData3D);
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Prepare workload estimation */
+ WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+ &psRenderContext->sWorkEstData,
+ &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D,
+ e3DCmdType,
+ &sWorkloadCharacteristics,
+ ui64DeadlineInus,
+ &sWorkloadKickData3D);
+ }
#endif
/* Init the 3D command helper */
FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
-
- /* This checks if the command would wrap around at the end of the CCB and therefore would start at an
- offset of 0 rather than the current command offset */
- if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- ui32TACommandOffset = ui32TACmdOffset;
- }
- else
- {
- ui32TACommandOffset = 0;
+ ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+ /* This checks if the command would wrap around at the end of the CCB and therefore would start at an
+ offset of 0 rather than the current command offset */
+ if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck)
+ {
+ ui32TACommandOffset = ui32TACmdOffset;
+ }
+ else
+ {
+ ui32TACommandOffset = 0;
+ }
}
#endif
}
FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
-
- if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
{
- ui323DCommandOffset = ui323DCmdOffset;
- }
- else
- {
- ui323DCommandOffset = 0;
+ ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+ if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck)
+ {
+ ui323DCommandOffset = ui323DCmdOffset;
+ }
+ else
+ {
+ ui323DCommandOffset = 0;
+ }
}
#endif
}
sTACmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Add the Workload data into the KCCB kick */
- sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset;
-#else
- sTACmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Add the Workload data into the KCCB kick */
+ sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset;
+ }
#endif
eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl,
ui32FWCtx,
ui32ExtJobRef,
ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_TA,
+ RGX_HWPERF_KICK_TYPE2_GEOM,
iCheckTAFence,
iUpdateTAFence,
iUpdateTATimeline,
goto fail_taacquirecmd;
}
- PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode->pvDevice,
+ PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode,
ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_TA);
+ RGX_HWPERF_KICK_TYPE2_GEOM);
}
if (ui323DCmdCount)
/* Add the Workload data into the KCCB kick */
#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
- s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset;
-#else
- s3DCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
+ s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset;
+ }
#endif
eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl,
ui32FWCtx,
ui32ExtJobRef,
ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_3D,
+ RGX_HWPERF_KICK_TYPE2_3D,
iCheck3DFence,
iUpdate3DFence,
iUpdate3DTimeline,
goto fail_3dacquirecmd;
}
- PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode->pvDevice,
+ PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode,
ui32FWCtx, ui32ExtJobRef, ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_3D);
+ RGX_HWPERF_KICK_TYPE2_3D);
}
/*
PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE * psDeviceNode,
RGX_SERVER_RENDER_CONTEXT *psRenderContext,
- IMG_UINT32 ui32Priority)
+ IMG_INT32 i32Priority)
{
PVRSRV_ERROR eError;
OSLockAcquire(psRenderContext->hLock);
- if (psRenderContext->sTAData.ui32Priority != ui32Priority)
+ if (psRenderContext->sTAData.i32Priority != i32Priority)
{
eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext,
psConnection,
psRenderContext->psDeviceNode->pvDevice,
- ui32Priority,
+ i32Priority,
RGXFWIF_DM_GEOM);
if (eError != PVRSRV_OK)
{
__func__, PVRSRVGetErrorString(eError)));
goto fail_tacontext;
}
- psRenderContext->sTAData.ui32Priority = ui32Priority;
+ psRenderContext->sTAData.i32Priority = i32Priority;
}
- if (psRenderContext->s3DData.ui32Priority != ui32Priority)
+ if (psRenderContext->s3DData.i32Priority != i32Priority)
{
eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext,
psConnection,
psRenderContext->psDeviceNode->pvDevice,
- ui32Priority,
+ i32Priority,
RGXFWIF_DM_3D);
if (eError != PVRSRV_OK)
{
__func__, PVRSRVGetErrorString(eError)));
goto fail_3dcontext;
}
- psRenderContext->s3DData.ui32Priority = ui32Priority;
+ psRenderContext->s3DData.i32Priority = i32Priority;
}
OSLockRelease(psRenderContext->hLock);
IMG_UINT32 ui32RefCount;
IMG_BOOL bOnDemand;
- IMG_BOOL ui32NumReqByApp; /* Number of Backing Requests from Application */
- IMG_BOOL ui32NumReqByFW; /* Number of Backing Requests from Firmware */
+ IMG_UINT32 ui32NumReqByApp; /* Number of Backing Requests from Application */
+ IMG_UINT32 ui32NumReqByFW; /* Number of Backing Requests from Firmware */
IMG_PID owner;
Server-side implementation of RGXCreateRenderContext
@Input pvDeviceNode - device node
- @Input ui32Priority - context priority
+ @Input i32Priority - context priority
@Input hMemCtxPrivData - memory context private data
@Input ui32PackedCCBSizeU8888 :
ui8TACCBAllocSizeLog2 - TA CCB size
******************************************************************************/
PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE *psDeviceNode,
- IMG_UINT32 ui32Priority,
+ IMG_INT32 i32Priority,
IMG_UINT32 ui32FrameworkCommandSize,
IMG_PBYTE pabyFrameworkCommand,
IMG_HANDLE hMemCtxPrivData,
PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
PVRSRV_DEVICE_NODE * psDevNode,
RGX_SERVER_RENDER_CONTEXT *psRenderContext,
- IMG_UINT32 ui32Priority);
+ IMG_INT32 i32Priority);
PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
RGX_CONTEXT_PROPERTY eContextProperty,
+++ /dev/null
-/*************************************************************************/ /*!
-@File rgxtdmtransfer.c
-@Title Device specific TDM transfer queue routines
-@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
-@Description Device specific functions
-@License Dual MIT/GPLv2
-
-The contents of this file are subject to the MIT license as set out below.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-Alternatively, the contents of this file may be used under the terms of
-the GNU General Public License Version 2 ("GPL") in which case the provisions
-of GPL are applicable instead of those above.
-
-If you wish to allow use of your version of this file only under the terms of
-GPL, and not to allow others to use your version of this file under the terms
-of the MIT license, indicate your decision by deleting the provisions above
-and replace them with the notice and other provisions required by GPL as set
-out in the file called "GPL-COPYING" included in this distribution. If you do
-not delete the provisions above, a recipient may use your version of this file
-under the terms of either the MIT license or GPL.
-
-This License is also included in this distribution in the file called
-"MIT-COPYING".
-
-EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
-PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
-BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
-PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/ /**************************************************************************/
-
-#include "pdump_km.h"
-#include "rgxdevice.h"
-#include "rgxccb.h"
-#include "rgxutils.h"
-#include "rgxfwutils.h"
-#include "rgxtdmtransfer.h"
-#include "rgx_tq_shared.h"
-#include "rgxmem.h"
-#include "allocmem.h"
-#include "devicemem.h"
-#include "devicemem_pdump.h"
-#include "osfunc.h"
-#include "pvr_debug.h"
-#include "pvrsrv.h"
-#include "rgx_memallocflags.h"
-#include "rgxhwperf.h"
-#include "ospvr_gputrace.h"
-#include "htbuffer.h"
-#include "rgxshader.h"
-
-#include "pdump_km.h"
-
-#include "sync_server.h"
-#include "sync_internal.h"
-#include "sync.h"
-
-#if defined(SUPPORT_BUFFER_SYNC)
-#include "pvr_buffer_sync.h"
-#endif
-
-#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER)
-#include "validation_soc.h"
-#endif
-
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
-#include "rgxworkest.h"
-#endif
-
-#include "rgxtimerquery.h"
-
-/* Enable this to dump the compiled list of UFOs prior to kick call */
-#define ENABLE_TDM_UFO_DUMP 0
-
-//#define TDM_CHECKPOINT_DEBUG 1
-
-#if defined(TDM_CHECKPOINT_DEBUG)
-#define CHKPT_DBG(X) PVR_DPF(X)
-#else
-#define CHKPT_DBG(X)
-#endif
-
-typedef struct {
- RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
- IMG_UINT32 ui32Priority;
-#if defined(SUPPORT_BUFFER_SYNC)
- struct pvr_buffer_sync_context *psBufferSyncContext;
-#endif
-} RGX_SERVER_TQ_TDM_DATA;
-
-
-struct _RGX_SERVER_TQ_TDM_CONTEXT_ {
- PVRSRV_DEVICE_NODE *psDeviceNode;
- DEVMEM_MEMDESC *psFWTransferContextMemDesc;
- DEVMEM_MEMDESC *psFWFrameworkMemDesc;
- IMG_UINT32 ui32Flags;
- RGX_SERVER_TQ_TDM_DATA sTDMData;
- DLLIST_NODE sListNode;
- SYNC_ADDR_LIST sSyncAddrListFence;
- SYNC_ADDR_LIST sSyncAddrListUpdate;
- POS_LOCK hLock;
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- WORKEST_HOST_DATA sWorkEstData;
-#endif
-};
-
-static PVRSRV_ERROR _CreateTDMTransferContext(
- CONNECTION_DATA * psConnection,
- PVRSRV_DEVICE_NODE * psDeviceNode,
- DEVMEM_MEMDESC * psAllocatedMemDesc,
- IMG_UINT32 ui32AllocatedOffset,
- SERVER_MMU_CONTEXT * psServerMMUContext,
- DEVMEM_MEMDESC * psFWMemContextMemDesc,
- IMG_UINT32 ui32Priority,
- RGX_COMMON_CONTEXT_INFO * psInfo,
- RGX_SERVER_TQ_TDM_DATA * psTDMData,
- IMG_UINT32 ui32CCBAllocSizeLog2,
- IMG_UINT32 ui32CCBMaxAllocSizeLog2,
- IMG_UINT32 ui32ContextFlags,
- IMG_UINT64 ui64RobustnessAddress)
-{
- PVRSRV_ERROR eError;
-
-#if defined(SUPPORT_BUFFER_SYNC)
- psTDMData->psBufferSyncContext =
- pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
- "rogue-tdm");
- if (IS_ERR(psTDMData->psBufferSyncContext))
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: failed to create buffer_sync context (err=%ld)",
- __func__, PTR_ERR(psTDMData->psBufferSyncContext)));
-
- eError = PVRSRV_ERROR_INVALID_PARAMS;
- goto fail_buffer_sync_context_create;
- }
-#endif
-
- eError = FWCommonContextAllocate(
- psConnection,
- psDeviceNode,
- REQ_TYPE_TQ_TDM,
- RGXFWIF_DM_TDM,
- psServerMMUContext,
- psAllocatedMemDesc,
- ui32AllocatedOffset,
- psFWMemContextMemDesc,
- NULL,
- ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TDM_CCB_SIZE_LOG2,
- ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TDM_CCB_MAX_SIZE_LOG2,
- ui32ContextFlags,
- ui32Priority,
- UINT_MAX, /* max deadline MS */
- ui64RobustnessAddress,
- psInfo,
- &psTDMData->psServerCommonContext);
- if (eError != PVRSRV_OK)
- {
- goto fail_contextalloc;
- }
-
- psTDMData->ui32Priority = ui32Priority;
- return PVRSRV_OK;
-
-fail_contextalloc:
-#if defined(SUPPORT_BUFFER_SYNC)
- pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext);
- psTDMData->psBufferSyncContext = NULL;
-fail_buffer_sync_context_create:
-#endif
- PVR_ASSERT(eError != PVRSRV_OK);
- return eError;
-}
-
-
-static PVRSRV_ERROR _DestroyTDMTransferContext(
- RGX_SERVER_TQ_TDM_DATA * psTDMData,
- PVRSRV_DEVICE_NODE * psDeviceNode)
-{
- PVRSRV_ERROR eError;
-
- /* Check if the FW has finished with this resource ... */
- eError = RGXFWRequestCommonContextCleanUp(
- psDeviceNode,
- psTDMData->psServerCommonContext,
- RGXFWIF_DM_TDM,
- PDUMP_FLAGS_CONTINUOUS);
- if (eError == PVRSRV_ERROR_RETRY)
- {
- return eError;
- }
- else if (eError != PVRSRV_OK)
- {
- PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- return eError;
- }
-
- /* ... it has so we can free it's resources */
- FWCommonContextFree(psTDMData->psServerCommonContext);
-
-#if defined(SUPPORT_BUFFER_SYNC)
- pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext);
- psTDMData->psBufferSyncContext = NULL;
-#endif
-
- return PVRSRV_OK;
-}
-
-/*
- * PVRSRVCreateTransferContextKM
- */
-PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
- CONNECTION_DATA * psConnection,
- PVRSRV_DEVICE_NODE * psDeviceNode,
- IMG_UINT32 ui32Priority,
- IMG_UINT32 ui32FrameworkCommandSize,
- IMG_PBYTE pabyFrameworkCommand,
- IMG_HANDLE hMemCtxPrivData,
- IMG_UINT32 ui32PackedCCBSizeU88,
- IMG_UINT32 ui32ContextFlags,
- IMG_UINT64 ui64RobustnessAddress,
- RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext)
-{
- RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext;
-
- DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
- PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice;
- RGX_COMMON_CONTEXT_INFO sInfo = {NULL};
- PVRSRV_ERROR eError = PVRSRV_OK;
-
- /* Allocate the server side structure */
- *ppsTransferContext = NULL;
- psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
- if (psTransferContext == NULL)
- {
- return PVRSRV_ERROR_OUT_OF_MEMORY;
- }
-
- /*
- Create the FW transfer context, this has the TDM common
- context embedded within it
- */
- eError = DevmemFwAllocate(psDevInfo,
- sizeof(RGXFWIF_FWTDMCONTEXT),
- RGX_FWCOMCTX_ALLOCFLAGS,
- "FwTransferContext",
- &psTransferContext->psFWTransferContextMemDesc);
- if (eError != PVRSRV_OK)
- {
- goto fail_fwtransfercontext;
- }
-
- eError = OSLockCreate(&psTransferContext->hLock);
-
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- goto fail_lockcreate;
- }
-
- psTransferContext->psDeviceNode = psDeviceNode;
-
- if (ui32FrameworkCommandSize)
- {
- eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
- &psTransferContext->psFWFrameworkMemDesc,
- ui32FrameworkCommandSize);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to allocate firmware GPU framework state (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- goto fail_frameworkcreate;
- }
-
- /* Copy the Framework client data into the framework buffer */
- eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode,
- psTransferContext->psFWFrameworkMemDesc,
- pabyFrameworkCommand,
- ui32FrameworkCommandSize);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to populate the framework buffer (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- goto fail_frameworkcopy;
- }
- sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
- }
-
- eError = _CreateTDMTransferContext(psConnection,
- psDeviceNode,
- psTransferContext->psFWTransferContextMemDesc,
- offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext),
- hMemCtxPrivData,
- psFWMemContextMemDesc,
- ui32Priority,
- &sInfo,
- &psTransferContext->sTDMData,
- U32toU8_Unpack1(ui32PackedCCBSizeU88),
- U32toU8_Unpack2(ui32PackedCCBSizeU88),
- ui32ContextFlags,
- ui64RobustnessAddress);
- if (eError != PVRSRV_OK)
- {
- goto fail_tdmtransfercontext;
- }
-
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- WorkEstInitTDM(psDevInfo, &psTransferContext->sWorkEstData);
-#endif
-
- SyncAddrListInit(&psTransferContext->sSyncAddrListFence);
- SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate);
-
- OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
- dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
- OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
- *ppsTransferContext = psTransferContext;
-
- return PVRSRV_OK;
-
-fail_tdmtransfercontext:
-fail_frameworkcopy:
- if (psTransferContext->psFWFrameworkMemDesc != NULL)
- {
- DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
- }
-fail_frameworkcreate:
- OSLockDestroy(psTransferContext->hLock);
-fail_lockcreate:
- DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc);
-fail_fwtransfercontext:
- OSFreeMem(psTransferContext);
- PVR_ASSERT(eError != PVRSRV_OK);
- *ppsTransferContext = NULL;
- return eError;
-}
-
-PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM(
- CONNECTION_DATA * psConnection,
- PVRSRV_DEVICE_NODE * psDeviceNode,
- PMR ** ppsCLIPMRMem,
- PMR ** ppsUSCPMRMem)
-{
- PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem);
-
- return PVRSRV_OK;
-}
-
-PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psPMRMem)
-{
- PVR_UNREFERENCED_PARAMETER(psPMRMem);
-
- return PVRSRV_OK;
-}
-
-PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext)
-{
- PVRSRV_ERROR eError;
- PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- RGXFWIF_FWTDMCONTEXT *psFWTransferContext;
- IMG_UINT32 ui32WorkEstCCBSubmitted;
-
- eError = DevmemAcquireCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc,
- (void **)&psFWTransferContext);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to map firmware transfer context (%s)",
- __func__,
- PVRSRVGetErrorString(eError)));
- return eError;
- }
-
- ui32WorkEstCCBSubmitted = psFWTransferContext->ui32WorkEstCCBSubmitted;
-
- DevmemReleaseCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc);
-
- /* Check if all of the workload estimation CCB commands for this workload are read */
- if (ui32WorkEstCCBSubmitted != psTransferContext->sWorkEstData.ui32WorkEstCCBReceived)
- {
- PVR_DPF((PVR_DBG_WARNING,
- "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch",
- __func__, ui32WorkEstCCBSubmitted,
- psTransferContext->sWorkEstData.ui32WorkEstCCBReceived));
-
- return PVRSRV_ERROR_RETRY;
- }
-#endif
-
-
- /* remove node from list before calling destroy - as destroy, if successful
- * will invalidate the node
- * must be re-added if destroy fails
- */
- OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
- dllist_remove_node(&(psTransferContext->sListNode));
- OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
-
-
- eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData,
- psTransferContext->psDeviceNode);
- if (eError != PVRSRV_OK)
- {
- goto fail_destroyTDM;
- }
-
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- WorkEstDeInitTDM(psDevInfo, &psTransferContext->sWorkEstData);
-#endif
-
- SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence);
- SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate);
-
- if (psTransferContext->psFWFrameworkMemDesc != NULL)
- {
- DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
- }
- DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc);
-
- OSLockDestroy(psTransferContext->hLock);
-
- OSFreeMem(psTransferContext);
-
- return PVRSRV_OK;
-
-fail_destroyTDM:
-
- OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
- dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
- OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
- PVR_ASSERT(eError != PVRSRV_OK);
- return eError;
-}
-
-
-/*
- * PVRSRVSubmitTQ3DKickKM
- */
-PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
- RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
- IMG_UINT32 ui32PDumpFlags,
- IMG_UINT32 ui32ClientUpdateCount,
- SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock,
- IMG_UINT32 * paui32ClientUpdateSyncOffset,
- IMG_UINT32 * paui32ClientUpdateValue,
- PVRSRV_FENCE iCheckFence,
- PVRSRV_TIMELINE iUpdateTimeline,
- PVRSRV_FENCE * piUpdateFence,
- IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH],
- IMG_UINT32 ui32FWCommandSize,
- IMG_UINT8 * pui8FWCommand,
- IMG_UINT32 ui32ExtJobRef,
- IMG_UINT32 ui32SyncPMRCount,
- IMG_UINT32 * paui32SyncPMRFlags,
- PMR ** ppsSyncPMRs,
- IMG_UINT32 ui32TDMCharacteristic1,
- IMG_UINT32 ui32TDMCharacteristic2,
- IMG_UINT64 ui64DeadlineInus)
-{
- PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
- RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
- PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress = NULL;
- PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress = NULL;
- IMG_UINT32 ui32IntClientFenceCount = 0;
- IMG_UINT32 * paui32IntUpdateValue = paui32ClientUpdateValue;
- IMG_UINT32 ui32IntClientUpdateCount = ui32ClientUpdateCount;
- PVRSRV_ERROR eError;
- PVRSRV_ERROR eError2;
- PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
- PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psTransferContext->sTDMData.psServerCommonContext);
- RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext);
- IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
-
- IMG_UINT64 ui64FBSCEntryMask;
-
- IMG_UINT32 ui32CmdOffset = 0;
- IMG_BOOL bCCBStateOpen;
-
- PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
- PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
- PRGXFWIF_UFO_ADDR pRMWUFOAddr;
-
- IMG_UINT64 uiCheckFenceUID = 0;
- IMG_UINT64 uiUpdateFenceUID = 0;
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTransfer = {0};
- IMG_UINT32 ui32TDMWorkloadDataRO = 0;
- IMG_UINT32 ui32TDMCmdHeaderOffset = 0;
- IMG_UINT32 ui32TDMCmdOffsetWrapCheck = 0;
- RGX_WORKLOAD sWorkloadCharacteristics = {0};
-#endif
-
-#if defined(SUPPORT_BUFFER_SYNC)
- struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
- PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
- IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
- PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
-#endif
-
- PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
- PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
- IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
- IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
- PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
- IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
- void *pvUpdateFenceFinaliseData = NULL;
-
- if (iUpdateTimeline >= 0 && !piUpdateFence)
- {
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
-
-#if !defined(SUPPORT_WORKLOAD_ESTIMATION)
- PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic1);
- PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic2);
- PVR_UNREFERENCED_PARAMETER(ui64DeadlineInus);
-#endif
-
- /* Ensure we haven't been given a null ptr to
- * update values if we have been told we
- * have updates
- */
- if (ui32ClientUpdateCount > 0)
- {
- PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL,
- "paui32ClientUpdateValue NULL but "
- "ui32ClientUpdateCount > 0",
- PVRSRV_ERROR_INVALID_PARAMS);
- }
-
- /* Ensure the string is null-terminated (Required for safety) */
- szUpdateFenceName[31] = '\0';
-
- if (ui32SyncPMRCount != 0)
- {
- if (!ppsSyncPMRs)
- {
- return PVRSRV_ERROR_INVALID_PARAMS;
- }
- }
-
- OSLockAcquire(psTransferContext->hLock);
-
- /* We can't allocate the required amount of stack space on all consumer architectures */
- psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA));
- if (psCmdHelper == NULL)
- {
- eError = PVRSRV_ERROR_OUT_OF_MEMORY;
- goto fail_allochelper;
- }
-
-
- /*
- Init the command helper commands for all the prepares
- */
- {
- IMG_CHAR *pszCommandName;
- RGXFWIF_CCB_CMD_TYPE eType;
-#if defined(SUPPORT_BUFFER_SYNC)
- struct pvr_buffer_sync_context *psBufferSyncContext;
-#endif
-
- pszCommandName = "TQ-TDM";
-
- if (ui32FWCommandSize == 0)
- {
- /* A NULL CMD for TDM is used to append updates to a non finished
- * FW command. bCCBStateOpen is used in case capture range is
- * entered on this command, to not drain CCB up to the Roff for this
- * command, but the finished command prior to this.
- */
- bCCBStateOpen = IMG_TRUE;
- eType = RGXFWIF_CCB_CMD_TYPE_NULL;
- }
- else
- {
- bCCBStateOpen = IMG_FALSE;
- eType = RGXFWIF_CCB_CMD_TYPE_TQ_TDM;
- }
-
-#if defined(SUPPORT_BUFFER_SYNC)
- psBufferSyncContext = psTransferContext->sTDMData.psBufferSyncContext;
-#endif
-
- eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence,
- 0,
- NULL,
- NULL);
- if (eError != PVRSRV_OK)
- {
- goto fail_populate_sync_addr_list;
- }
-
- eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate,
- ui32ClientUpdateCount,
- pauiClientUpdateUFODevVarBlock,
- paui32ClientUpdateSyncOffset);
- if (eError != PVRSRV_OK)
- {
- goto fail_populate_sync_addr_list;
- }
- paui32IntUpdateValue = paui32ClientUpdateValue;
- pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
-
-
- if (ui32SyncPMRCount)
- {
-#if defined(SUPPORT_BUFFER_SYNC)
- int err;
-
- CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__));
- err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext,
- psTransferContext->psDeviceNode->hSyncCheckpointContext,
- ui32SyncPMRCount,
- ppsSyncPMRs,
- paui32SyncPMRFlags,
- &ui32BufferFenceSyncCheckpointCount,
- &apsBufferFenceSyncCheckpoints,
- &psBufferUpdateSyncCheckpoint,
- &psBufferSyncData);
- if (err)
- {
- switch (err)
- {
- case -EINTR:
- eError = PVRSRV_ERROR_RETRY;
- break;
- case -ENOMEM:
- eError = PVRSRV_ERROR_OUT_OF_MEMORY;
- break;
- default:
- eError = PVRSRV_ERROR_INVALID_PARAMS;
- break;
- }
-
- if (eError != PVRSRV_ERROR_RETRY)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError)));
- }
- goto fail_resolve_input_fence;
- }
-
- /* Append buffer sync fences */
- if (ui32BufferFenceSyncCheckpointCount > 0)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence , (void*)pauiIntFenceUFOAddress));
- SyncAddrListAppendAndDeRefCheckpoints(&psTransferContext->sSyncAddrListFence,
- ui32BufferFenceSyncCheckpointCount,
- apsBufferFenceSyncCheckpoints);
- if (!pauiIntFenceUFOAddress)
- {
- pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
- }
- ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
- }
-
- if (psBufferUpdateSyncCheckpoint)
- {
- /* Append the update (from output fence) */
- SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
- 1,
- &psBufferUpdateSyncCheckpoint);
- if (!pauiIntUpdateUFOAddress)
- {
- pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
- }
- ui32IntClientUpdateCount++;
- }
-#else /* defined(SUPPORT_BUFFER_SYNC) */
- PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount));
- eError = PVRSRV_ERROR_INVALID_PARAMS;
- goto fail_populate_sync_addr_list;
-#endif /* defined(SUPPORT_BUFFER_SYNC) */
- }
-
- /* Resolve the sync checkpoints that make up the input fence */
- eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext,
- iCheckFence,
- &ui32FenceSyncCheckpointCount,
- &apsFenceSyncCheckpoints,
- &uiCheckFenceUID,
- ui32PDumpFlags);
- if (eError != PVRSRV_OK)
- {
- goto fail_resolve_input_fence;
- }
-#if defined(TDM_CHECKPOINT_DEBUG)
- {
- IMG_UINT32 ii;
- for (ii=0; ii<32; ii++)
- {
- PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
- CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii]));
- }
- }
-#endif
- /* Create the output fence (if required) */
- if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
- {
- eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode,
- szUpdateFenceName,
- iUpdateTimeline,
- psTransferContext->psDeviceNode->hSyncCheckpointContext,
- &iUpdateFence,
- &uiUpdateFenceUID,
- &pvUpdateFenceFinaliseData,
- &psUpdateSyncCheckpoint,
- (void*)&psFenceTimelineUpdateSync,
- &ui32FenceTimelineUpdateValue,
- ui32PDumpFlags);
- if (eError != PVRSRV_OK)
- {
- goto fail_create_output_fence;
- }
-
- /* Append the sync prim update for the timeline (if required) */
- if (psFenceTimelineUpdateSync)
- {
- IMG_UINT32 *pui32TimelineUpdateWp = NULL;
-
- /* Allocate memory to hold the list of update values (including our timeline update) */
- pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
- if (!pui32IntAllocatedUpdateValues)
- {
- /* Failed to allocate memory */
- eError = PVRSRV_ERROR_OUT_OF_MEMORY;
- goto fail_alloc_update_values_mem;
- }
- OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
- /* Copy the update values into the new memory, then append our timeline update value */
- if (paui32IntUpdateValue)
- {
- OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
- }
- /* Now set the additional update value */
- pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
- *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
- ui32IntClientUpdateCount++;
-#if defined(TDM_CHECKPOINT_DEBUG)
- {
- IMG_UINT32 iii;
- IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
-
- for (iii=0; iii<ui32IntClientUpdateCount; iii++)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
- pui32Tmp++;
- }
- }
-#endif
- /* Now append the timeline sync prim addr to the transfer context update list */
- SyncAddrListAppendSyncPrim(&psTransferContext->sSyncAddrListUpdate,
- psFenceTimelineUpdateSync);
-#if defined(TDM_CHECKPOINT_DEBUG)
- {
- IMG_UINT32 iii;
- IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
-
- for (iii=0; iii<ui32IntClientUpdateCount; iii++)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
- pui32Tmp++;
- }
- }
-#endif
- /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
- paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
- }
- }
-
- if (ui32FenceSyncCheckpointCount)
- {
- /* Append the checks (from input fence) */
- if (ui32FenceSyncCheckpointCount > 0)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence));
-#if defined(TDM_CHECKPOINT_DEBUG)
- {
- IMG_UINT32 iii;
- IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
-
- for (iii=0; iii<ui32IntClientUpdateCount; iii++)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
- pui32Tmp++;
- }
- }
-#endif
- SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListFence,
- ui32FenceSyncCheckpointCount,
- apsFenceSyncCheckpoints);
- if (!pauiIntFenceUFOAddress)
- {
- pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
- }
- ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
- }
-#if defined(TDM_CHECKPOINT_DEBUG)
- {
- IMG_UINT32 iii;
- IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
-
- for (iii=0; iii<ui32IntClientUpdateCount; iii++)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
- pui32Tmp++;
- }
- }
-#endif
- }
- if (psUpdateSyncCheckpoint)
- {
- /* Append the update (from output fence) */
- CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
- SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
- 1,
- &psUpdateSyncCheckpoint);
- if (!pauiIntUpdateUFOAddress)
- {
- pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
- }
- ui32IntClientUpdateCount++;
-#if defined(TDM_CHECKPOINT_DEBUG)
- {
- IMG_UINT32 iii;
- IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
-
- for (iii=0; iii<ui32IntClientUpdateCount; iii++)
- {
- CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
- pui32Tmp++;
- }
- }
-#endif
- }
-
-#if (ENABLE_TDM_UFO_DUMP == 1)
- PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__));
- {
- IMG_UINT32 ii;
- PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
- PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
- IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
-
- /* Dump Fence syncs and Update syncs */
- PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM fence syncs (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
- for (ii=0; ii<ui32IntClientFenceCount; ii++)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
- psTmpIntFenceUFOAddress++;
- }
- PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM update syncs (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
- for (ii=0; ii<ui32IntClientUpdateCount; ii++)
- {
- if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
- }
- else
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
- pui32TmpIntUpdateValue++;
- }
- psTmpIntUpdateUFOAddress++;
- }
- }
-#endif
-
- RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice,
- &pPreAddr,
- &pPostAddr,
- &pRMWUFOAddr);
- /*
- * Extract the FBSC entries from MMU Context for the deferred FBSC invalidate command,
- * in other words, take the value and set it to zero afterwards.
- * FBSC Entry Mask must be extracted from MMU ctx and updated just before the kick starts
- * as it must be ready at the time of context activation.
- */
- {
- eError = RGXExtractFBSCEntryMaskFromMMUContext(psTransferContext->psDeviceNode,
- FWCommonContextGetServerMMUCtx(psTransferContext->sTDMData.psServerCommonContext),
- &ui64FBSCEntryMask);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "Failed to extract FBSC Entry Mask (%d)", eError));
- goto fail_invalfbsc;
- }
- }
-
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- sWorkloadCharacteristics.sTransfer.ui32Characteristic1 = ui32TDMCharacteristic1;
- sWorkloadCharacteristics.sTransfer.ui32Characteristic2 = ui32TDMCharacteristic2;
-
- /* Prepare workload estimation */
- WorkEstPrepare(psDeviceNode->pvDevice,
- &psTransferContext->sWorkEstData,
- &psTransferContext->sWorkEstData.uWorkloadMatchingData.sTransfer.sDataTDM,
- eType,
- &sWorkloadCharacteristics,
- ui64DeadlineInus,
- &sWorkloadKickDataTransfer);
-#endif
-
- /*
- Create the command helper data for this command
- */
- RGXCmdHelperInitCmdCCB(psDevInfo,
- psClientCCB,
- ui64FBSCEntryMask,
- ui32IntClientFenceCount,
- pauiIntFenceUFOAddress,
- NULL,
- ui32IntClientUpdateCount,
- pauiIntUpdateUFOAddress,
- paui32IntUpdateValue,
- ui32FWCommandSize,
- pui8FWCommand,
- &pPreAddr,
- &pPostAddr,
- &pRMWUFOAddr,
- eType,
- ui32ExtJobRef,
- ui32IntJobRef,
- ui32PDumpFlags,
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- &sWorkloadKickDataTransfer,
-#else /* SUPPORT_WORKLOAD_ESTIMATION */
- NULL,
-#endif /* SUPPORT_WORKLOAD_ESTIMATION */
- pszCommandName,
- bCCBStateOpen,
- psCmdHelper);
- }
-
- /*
- Acquire space for all the commands in one go
- */
-
- eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper);
- if (eError != PVRSRV_OK)
- {
- goto fail_3dcmdacquire;
- }
-
-
- /*
- We should acquire the kernel CCB(s) space here as the schedule could fail
- and we would have to roll back all the syncs
- */
-
- /*
- Only do the command helper release (which takes the server sync
- operations if the acquire succeeded
- */
- ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
- RGXCmdHelperReleaseCmdCCB(1,
- psCmdHelper,
- "TQ_TDM",
- FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr);
-
-
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* The following is used to determine the offset of the command header containing
- the workload estimation data so that can be accessed when the KCCB is read */
- ui32TDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(psCmdHelper);
-
- ui32TDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
-
- /* This checks if the command would wrap around at the end of the CCB and
- * therefore would start at an offset of 0 rather than the current command
- * offset */
- if (ui32CmdOffset < ui32TDMCmdOffsetWrapCheck)
- {
- ui32TDMWorkloadDataRO = ui32CmdOffset;
- }
- else
- {
- ui32TDMWorkloadDataRO = 0;
- }
-#endif
-
- /*
- Even if we failed to acquire the client CCB space we might still need
- to kick the HW to process a padding packet to release space for us next
- time round
- */
- {
- RGXFWIF_KCCB_CMD sTDMKCCBCmd;
- IMG_UINT32 ui32FWAddr = FWCommonContextGetFWAddress(
- psTransferContext->sTDMData.psServerCommonContext).ui32Addr;
-
- /* Construct the kernel 3D CCB command. */
- sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
- sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
- sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB);
- sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB);
- sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
-
- /* Add the Workload data into the KCCB kick */
-#if defined(SUPPORT_WORKLOAD_ESTIMATION)
- /* Store the offset to the CCCB command header so that it can be referenced
- * when the KCCB command reaches the FW */
- sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TDMWorkloadDataRO + ui32TDMCmdHeaderOffset;
-#else
- sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
-#endif
-
- /* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */
- /* s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */
- /* ui323DCmdOffset); */
- RGXSRV_HWPERF_ENQ(psTransferContext,
- OSGetCurrentClientProcessIDKM(),
- FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr,
- ui32ExtJobRef,
- ui32IntJobRef,
- RGX_HWPERF_KICK_TYPE_TQTDM,
- iCheckFence,
- iUpdateFence,
- iUpdateTimeline,
- uiCheckFenceUID,
- uiUpdateFenceUID,
- NO_DEADLINE,
- NO_CYCEST);
-
- LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
- {
- eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
- RGXFWIF_DM_TDM,
- & sTDMKCCBCmd,
- ui32PDumpFlags);
- if (eError2 != PVRSRV_ERROR_RETRY)
- {
- break;
- }
- OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
- } END_LOOP_UNTIL_TIMEOUT();
-
- if (eError2 != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXTDMSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2));
- if (eError == PVRSRV_OK)
- {
- eError = eError2;
- }
- goto fail_2dcmdacquire;
- }
-
- PVRGpuTraceEnqueueEvent(psDeviceNode->pvDevice, ui32FWAddr, ui32ExtJobRef,
- ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQTDM);
- }
-
- /*
- * Now check eError (which may have returned an error from our earlier calls
- * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
- * so we check it now...
- */
- if (eError != PVRSRV_OK )
- {
- goto fail_2dcmdacquire;
- }
-
-#if defined(NO_HARDWARE)
- /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
- if (psUpdateSyncCheckpoint)
- {
- SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
- }
- if (psFenceTimelineUpdateSync)
- {
- SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
- }
- SyncCheckpointNoHWUpdateTimelines(NULL);
-#endif /* defined(NO_HARDWARE) */
-
-#if defined(SUPPORT_BUFFER_SYNC)
- if (psBufferSyncData)
- {
- pvr_buffer_sync_kick_succeeded(psBufferSyncData);
- }
- if (apsBufferFenceSyncCheckpoints)
- {
- kfree(apsBufferFenceSyncCheckpoints);
- }
-#endif /* defined(SUPPORT_BUFFER_SYNC) */
-
- * piUpdateFence = iUpdateFence;
- if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
- {
- SyncCheckpointFinaliseFence(psDeviceNode, iUpdateFence, pvUpdateFenceFinaliseData,
- psUpdateSyncCheckpoint, szUpdateFenceName);
- }
-
- OSFreeMem(psCmdHelper);
-
- /* Drop the references taken on the sync checkpoints in the
- * resolved input fence */
- SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
- apsFenceSyncCheckpoints);
- /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
- if (apsFenceSyncCheckpoints)
- {
- SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
- }
- /* Free memory allocated to hold the internal list of update values */
- if (pui32IntAllocatedUpdateValues)
- {
- OSFreeMem(pui32IntAllocatedUpdateValues);
- pui32IntAllocatedUpdateValues = NULL;
- }
-
- OSLockRelease(psTransferContext->hLock);
- return PVRSRV_OK;
-
-/*
- No resources are created in this function so there is nothing to free
- unless we had to merge syncs.
- If we fail after the client CCB acquire there is still nothing to do
- as only the client CCB release will modify the client CCB
-*/
-fail_2dcmdacquire:
-fail_3dcmdacquire:
-
-fail_invalfbsc:
- SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListFence);
- SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListUpdate);
-fail_alloc_update_values_mem:
-
-/* fail_pdumpcheck: */
-/* fail_cmdtype: */
-
- if (iUpdateFence != PVRSRV_NO_FENCE)
- {
- SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
- }
-fail_create_output_fence:
- /* Drop the references taken on the sync checkpoints in the
- * resolved input fence */
- SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
- apsFenceSyncCheckpoints);
-
-fail_resolve_input_fence:
-
-#if defined(SUPPORT_BUFFER_SYNC)
- if (psBufferSyncData)
- {
- pvr_buffer_sync_kick_failed(psBufferSyncData);
- }
- if (apsBufferFenceSyncCheckpoints)
- {
- kfree(apsBufferFenceSyncCheckpoints);
- }
-#endif /* defined(SUPPORT_BUFFER_SYNC) */
-
-fail_populate_sync_addr_list:
- PVR_ASSERT(eError != PVRSRV_OK);
- OSFreeMem(psCmdHelper);
-fail_allochelper:
-
- if (apsFenceSyncCheckpoints)
- {
- SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
- }
- OSLockRelease(psTransferContext->hLock);
- return eError;
-}
-
-
-PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
- RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
- IMG_UINT32 ui32PDumpFlags)
-{
- RGXFWIF_KCCB_CMD sKCCBCmd;
- PVRSRV_ERROR eError;
-
- OSLockAcquire(psTransferContext->hLock);
-
- /* Schedule the firmware command */
- sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
- sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
-
- LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
- {
- eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice,
- RGXFWIF_DM_TDM,
- &sKCCBCmd,
- ui32PDumpFlags);
- if (eError != PVRSRV_ERROR_RETRY)
- {
- break;
- }
- OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
- } END_LOOP_UNTIL_TIMEOUT();
-
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to schedule the FW command %d (%s)",
- __func__, eError, PVRSRVGETERRORSTRING(eError)));
- }
-
- OSLockRelease(psTransferContext->hLock);
- return eError;
-}
-
-PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE * psDevNode,
- RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
- IMG_UINT32 ui32Priority)
-{
- PVRSRV_ERROR eError;
-
- PVR_UNREFERENCED_PARAMETER(psDevNode);
-
- OSLockAcquire(psTransferContext->hLock);
-
- if (psTransferContext->sTDMData.ui32Priority != ui32Priority)
- {
- eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext,
- psConnection,
- psTransferContext->psDeviceNode->pvDevice,
- ui32Priority,
- RGXFWIF_DM_TDM);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __func__, PVRSRVGetErrorString(eError)));
-
- OSLockRelease(psTransferContext->hLock);
- return eError;
- }
- }
-
- OSLockRelease(psTransferContext->hLock);
- return PVRSRV_OK;
-}
-
-PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
- RGX_CONTEXT_PROPERTY eContextProperty,
- IMG_UINT64 ui64Input,
- IMG_UINT64 *pui64Output)
-{
- PVRSRV_ERROR eError = PVRSRV_OK;
-
- switch (eContextProperty)
- {
- case RGX_CONTEXT_PROPERTY_FLAGS:
- {
- IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input;
-
- OSLockAcquire(psTransferContext->hLock);
- eError = FWCommonContextSetFlags(psTransferContext->sTDMData.psServerCommonContext,
- ui32ContextFlags);
- OSLockRelease(psTransferContext->hLock);
- break;
- }
-
- default:
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty));
- eError = PVRSRV_ERROR_NOT_SUPPORTED;
- }
- }
-
- return eError;
-}
-
-void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
- DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
- void *pvDumpDebugFile,
- IMG_UINT32 ui32VerbLevel)
-{
- DLLIST_NODE *psNode, *psNext;
-
- OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
-
- dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
- {
- RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
- IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
-
- DumpFWCommonContextInfo(psCurrentServerTransferCtx->sTDMData.psServerCommonContext,
- pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel);
- }
-
- OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
-}
-
-
-IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
-{
- DLLIST_NODE *psNode, *psNext;
- IMG_UINT32 ui32ContextBitMask = 0;
-
- OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
-
- dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
- {
- RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
- IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
-
- if (CheckStalledClientCommonContext(
- psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D)
- == PVRSRV_ERROR_CCCB_STALLED) {
- ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D;
- }
- }
-
- OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
- return ui32ContextBitMask;
-}
-
-/**************************************************************************//**
- End of file (rgxtdmtransfer.c)
-******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxworkest_ray.c
+@Title RGX Workload Estimation Functionality for ray datamaster
+@Codingstyle IMG
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Kernel mode workload estimation functionality for ray datamaster.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxdevice.h"
+#include "rgxworkest.h"
+#include "rgxworkest_ray.h"
+#include "rgxfwutils.h"
+#include "rgxpdvfs.h"
+#include "rgx_options.h"
+#include "device.h"
+#include "hash.h"
+#include "pvr_debug.h"
+
+
+static IMG_BOOL WorkEstHashCompareRay(size_t uKeySize, void *pKey1, void *pKey2)
+{
+ RGX_WORKLOAD *psWorkload1;
+ RGX_WORKLOAD *psWorkload2;
+ PVR_UNREFERENCED_PARAMETER(uKeySize);
+
+ if (pKey1 && pKey2)
+ {
+ psWorkload1 = *((RGX_WORKLOAD **)pKey1);
+ psWorkload2 = *((RGX_WORKLOAD **)pKey2);
+
+ PVR_ASSERT(psWorkload1);
+ PVR_ASSERT(psWorkload2);
+
+ if (psWorkload1->sRay.ui32DispatchSize == psWorkload2->sRay.ui32DispatchSize &&
+ psWorkload1->sRay.ui32AccStructSize == psWorkload2->sRay.ui32AccStructSize)
+ {
+ /* This is added to allow this memory to be freed */
+ *(uintptr_t*)pKey2 = *(uintptr_t*)pKey1;
+ return IMG_TRUE;
+ }
+ }
+
+ return IMG_FALSE;
+}
+
+static IMG_UINT32 WorkEstHashFuncRay(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+ RGX_WORKLOAD *psWorkload = *((RGX_WORKLOAD**)pKey);
+ IMG_UINT32 ui32HashKey = 0;
+ PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+ PVR_UNREFERENCED_PARAMETER(uKeySize);
+
+ /* Hash key predicated on transfer src/dest attributes */
+ ui32HashKey += _WorkEstDoHash(psWorkload->sRay.ui32DispatchSize);
+ ui32HashKey += _WorkEstDoHash(psWorkload->sRay.ui32AccStructSize);
+
+ return ui32HashKey;
+}
+
+void WorkEstInitRay(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData)
+{
+ _WorkEstInit(psDevInfo,
+ &psWorkEstData->uWorkloadMatchingData.sRay.sDataRDM,
+ (HASH_FUNC *)WorkEstHashFuncRay,
+ (HASH_KEY_COMP *)WorkEstHashCompareRay);
+}
+
+void WorkEstDeInitRay(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData)
+{
+ _WorkEstDeInit(psDevInfo, &psWorkEstData->uWorkloadMatchingData.sRay.sDataRDM);
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File rgxworkest_ray.h
+@Title RGX Workload Estimation Functionality for ray datamaster
+@Codingstyle IMG
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the kernel mode workload estimation functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXWORKEST_RAY_H
+#define RGXWORKEST_RAY_H
+
+#include "img_types.h"
+
+void WorkEstInitRay(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData);
+void WorkEstDeInitRay(PVRSRV_RGXDEV_INFO *psDevInfo, WORKEST_HOST_DATA *psWorkEstData);
+
+#endif /* RGXWORKEST_RAY_H */
ifeq ($(SUPPORT_RGX),1)
$(PVRSRV_MODNAME)-y += \
services/server/devices/rgx_bridge_init.o \
- services/server/env/linux/pvr_gputrace.o \
services/server/devices/rgxfwdbg.o \
services/server/devices/rgxtimerquery.o \
services/server/devices/rgxccb.o \
- services/server/devices/$(PVR_ARCH_DEFS)/rgxdebug.o \
+ services/server/devices/$(PVR_ARCH)/rgxdebug.o \
services/server/devices/rgxfwtrace_strings.o \
services/server/devices/$(PVR_ARCH)/rgxfwutils.o \
services/server/devices/$(PVR_ARCH)/rgxinit.o \
services/server/devices/rgxbvnc.o \
- services/server/devices/rgxkicksync.o \
services/server/devices/$(PVR_ARCH)/rgxlayer_impl.o \
services/server/devices/rgxmem.o \
services/server/devices/$(PVR_ARCH)/rgxmmuinit.o \
services/server/devices/rgxregconfig.o \
services/server/devices/$(PVR_ARCH)/rgxta3d.o \
services/server/devices/rgxsyncutils.o \
- services/server/devices/$(PVR_ARCH)/rgxtdmtransfer.o \
+ services/server/devices/rgxtdmtransfer.o \
services/server/devices/rgxutils.o \
services/server/devices/rgxhwperf_common.o \
services/server/devices/$(PVR_ARCH)/rgxhwperf.o \
services/server/devices/$(PVR_ARCH)/rgxpower.o \
services/server/devices/$(PVR_ARCH)/rgxstartstop.o \
services/server/devices/rgxtimecorr.o \
- services/server/devices/$(PVR_ARCH)/rgxcompute.o \
+ services/server/devices/rgxcompute.o \
services/server/devices/$(PVR_ARCH)/rgxmulticore.o \
services/server/devices/rgxshader.o
+$(PVRSRV_MODNAME)-$(CONFIG_EVENT_TRACING) += services/server/env/linux/pvr_gputrace.o
+
+ifeq ($(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/env/linux/pvr_gpuwork.o
+endif
+
+ifeq ($(SUPPORT_RGXKICKSYNC_BRIDGE),1)
+$(PVRSRV_MODNAME)-y += \
+ services/server/devices/rgxkicksync.o
+endif
+
ifeq ($(SUPPORT_USC_BREAKPOINT),1)
$(PVRSRV_MODNAME)-y += \
services/server/devices/rgxbreakpoint.o
ifeq ($(SUPPORT_WORKLOAD_ESTIMATION),1)
$(PVRSRV_MODNAME)-y += \
services/server/devices/rgxworkest.o
+ ifeq ($(PVR_ARCH),volcanic)
+ $(PVRSRV_MODNAME)-y += \
+ services/server/devices/$(PVR_ARCH)/rgxworkest_ray.o
+ endif
endif
ifeq ($(SUPPORT_VALIDATION),1)
$(PVRSRV_MODNAME)-y += \
services/server/devices/rgxsoctimer.o
endif
-endif
+endif # SUPPORT_RGX
ifeq ($(SUPPORT_DISPLAY_CLASS),1)
$(PVRSRV_MODNAME)-y += \
$(PVRSRV_MODNAME)-y += services/server/common/devicemem_history_server.o
+ifeq ($(PVRSRV_PHYSMEM_CPUMAP_HISTORY),1)
+$(PVRSRV_MODNAME)-y += services/server/common/physmem_cpumap_history.o
+endif
+
ifeq ($(PVR_HANDLE_BACKEND),generic)
$(PVRSRV_MODNAME)-y += services/server/common/handle_generic.o
else
services/server/env/linux/pvr_ion_stats.o
endif
+ifeq ($(SUPPORT_GPUVIRT_VALIDATION),1)
+ ifeq ($(PVRSRV_TEST_FW_PREMAP_MMU),1)
+ $(PVRSRV_MODNAME)-y += \
+ services/system/common/tee/xt_mmu_fw_premap.o
+ endif
+endif
+
$(PVRSRV_MODNAME)-$(CONFIG_X86) += services/server/env/linux/osfunc_x86.o
$(PVRSRV_MODNAME)-$(CONFIG_ARM) += services/server/env/linux/osfunc_arm.o
$(PVRSRV_MODNAME)-$(CONFIG_ARM64) += services/server/env/linux/osfunc_arm64.o
services/server/devices/rgxfwimageutils.o
ifeq ($(PVR_ARCH),rogue)
$(PVRSRV_MODNAME)-y += \
- services/shared/devices/$(PVR_ARCH_DEFS)/rgx_hwperf_table.o
+ services/shared/devices/$(PVR_ARCH)/rgx_hwperf_table.o
endif
endif
-I$(TOP)/services/shared/common \
-I$(TOP)/services/shared/devices \
-I$(TOP)/services/system/include \
+ -I$(TOP)/services/system/common/tee \
-I$(TOP)/services/system/$(PVR_ARCH)/include \
-I$(TOP)/services/server/common/$(PVR_ARCH) -I$(TOP)/services/server/common
ifeq ($(SUPPORT_RGX),1)
ccflags-y += \
+ -I$(bridge_base)/rgxtq2_bridge \
-I$(bridge_base)/rgxta3d_bridge \
-I$(bridge_base)/rgxhwperf_bridge \
- -I$(bridge_base)/rgxkicksync_bridge \
-I$(bridge_base)/rgxcmp_bridge \
-I$(bridge_base)/rgxregconfig_bridge \
-I$(bridge_base)/rgxtimerquery_bridge \
ccflags-y += \
-I$(bridge_base)/rgxtq_bridge
endif
-# Oceanic does not support TDM
-ifneq ($(PVR_ARCH_DEFS),oceanic)
-ccflags-y += \
- -I$(bridge_base)/rgxtq2_bridge
-endif
ifeq ($(SUPPORT_USC_BREAKPOINT),1)
ccflags-y += \
-I$(bridge_base)/rgxbreakpoint_bridge
endif
+ifeq ($(SUPPORT_RGXKICKSYNC_BRIDGE),1)
+ccflags-y += \
+ -I$(bridge_base)/rgxkicksync_bridge
+endif
endif
$(PVRSRV_MODNAME)-y += \
ifeq ($(SUPPORT_RGX),1)
$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/rgxtq2_bridge/server_rgxtq2_bridge.o \
generated/$(PVR_ARCH)/rgxta3d_bridge/server_rgxta3d_bridge.o \
generated/$(PVR_ARCH)/rgxhwperf_bridge/server_rgxhwperf_bridge.o \
- generated/$(PVR_ARCH)/rgxkicksync_bridge/server_rgxkicksync_bridge.o \
generated/$(PVR_ARCH)/rgxcmp_bridge/server_rgxcmp_bridge.o \
generated/$(PVR_ARCH)/rgxregconfig_bridge/server_rgxregconfig_bridge.o \
generated/$(PVR_ARCH)/rgxtimerquery_bridge/server_rgxtimerquery_bridge.o \
$(PVRSRV_MODNAME)-y += \
generated/$(PVR_ARCH)/rgxtq_bridge/server_rgxtq_bridge.o
endif
-# Oceanic does not support TDM
-ifneq ($(PVR_ARCH_DEFS),oceanic)
-$(PVRSRV_MODNAME)-y += \
- generated/$(PVR_ARCH)/rgxtq2_bridge/server_rgxtq2_bridge.o
-endif
ifeq ($(SUPPORT_USC_BREAKPOINT),1)
$(PVRSRV_MODNAME)-y += \
generated/$(PVR_ARCH)/rgxbreakpoint_bridge/server_rgxbreakpoint_bridge.o
endif
+ifeq ($(SUPPORT_RGXKICKSYNC_BRIDGE),1)
+$(PVRSRV_MODNAME)-y += \
+ generated/$(PVR_ARCH)/rgxkicksync_bridge/server_rgxkicksync_bridge.o
+endif
endif
ifeq ($(SUPPORT_WRAP_EXTMEM),1)
* enabled, since all allocations are tracked in DebugFS mem_area files.
*/
#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
-#define ALLOCMEM_MEMSTATS_PADDING sizeof(IMG_UINT32)
+/* kmalloc guarantees a minimal alignment which is ARCH_KMALLOC_MINALIGN. This
+ * alignment is architecture specific and can be quite big, e.g. on Aarch64
+ * it can be 64 bytes. This is too much for keeping a single PID field and could
+ * lead to a lot of wasted memory. This is a reason why we're defaulting to 8
+ * bytes alignment which should be enough for any architecture.
+ */
+#define ALLOCMEM_PID_SIZE_PADDING PVR_ALIGN(sizeof(IMG_UINT32), 8)
#else
-#define ALLOCMEM_MEMSTATS_PADDING 0UL
+#define ALLOCMEM_PID_SIZE_PADDING 0UL
#endif
/* How many times kmalloc can fail before the allocation threshold is reduced */
kfree(pvAddr);
}
-static inline void _pvr_alloc_stats_add(void *pvAddr, IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS)
+static inline void *_pvr_alloc_stats_add(void *pvAddr, IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS)
{
#if !defined(PVRSRV_ENABLE_PROCESS_STATS)
PVR_UNREFERENCED_PARAMETER(pvAddr);
pvAddr,
sCpuPAddr,
ksize(pvAddr),
- NULL,
OSGetCurrentClientProcessIDKM()
DEBUG_MEMSTATS_ARGS);
#else
- {
- /* Store the PID in the final additional 4 bytes allocated */
- IMG_UINT32 *puiTemp = IMG_OFFSET_ADDR(pvAddr, ksize(pvAddr) - ALLOCMEM_MEMSTATS_PADDING);
- *puiTemp = OSGetCurrentClientProcessIDKM();
- }
- PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvAddr), OSGetCurrentClientProcessIDKM());
+ /* because clang has some features that allow detection out-of-bounds
+ * access we need to put the metadata in the beginning of the allocation */
+ *(IMG_UINT32 *) pvAddr = OSGetCurrentClientProcessIDKM();
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvAddr),
+ *(IMG_UINT32 *) pvAddr);
+
+ /* because metadata is kept in the beginning of the allocation we need
+ * to return address offset by the ALLOCMEM_PID_SIZE_PADDING */
+ pvAddr = (IMG_UINT8 *) pvAddr + ALLOCMEM_PID_SIZE_PADDING;
#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
}
else
PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
pvAddr,
sCpuPAddr,
- ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)),
- NULL,
+ PVR_ALIGN(ui32Size, PAGE_SIZE),
OSGetCurrentClientProcessIDKM()
DEBUG_MEMSTATS_ARGS);
#else
PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
- ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)),
+ PVR_ALIGN(ui32Size, PAGE_SIZE),
(IMG_UINT64)(uintptr_t) pvAddr,
OSGetCurrentClientProcessIDKM());
#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
}
#endif /* !defined(PVRSRV_ENABLE_PROCESS_STATS) */
+
+ return pvAddr;
}
-static inline void _pvr_alloc_stats_remove(void *pvAddr)
+static inline void *_pvr_alloc_stats_remove(void *pvAddr)
{
#if !defined(PVRSRV_ENABLE_PROCESS_STATS)
PVR_UNREFERENCED_PARAMETER(pvAddr);
if (!is_vmalloc_addr(pvAddr))
{
#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
- {
- IMG_UINT32 *puiTemp = IMG_OFFSET_ADDR(pvAddr, ksize(pvAddr) - ALLOCMEM_MEMSTATS_PADDING);
- PVRSRVStatsDecrMemKAllocStat(ksize(pvAddr), *puiTemp);
- }
+ /* because metadata is kept in the beginning of the allocation we need
+ * shift address offset by the ALLOCMEM_PID_SIZE_PADDING to the original
+ * value */
+ pvAddr = (IMG_UINT8 *) pvAddr - ALLOCMEM_PID_SIZE_PADDING;
+
+ /* first 4 bytes of the allocation are the process' PID */
+ PVRSRVStatsDecrMemKAllocStat(ksize(pvAddr), *(IMG_UINT32 *) pvAddr);
#else
PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
(IMG_UINT64)(uintptr_t) pvAddr,
#endif
}
#endif /* !defined(PVRSRV_ENABLE_PROCESS_STATS) */
+
+ return pvAddr;
}
void *(OSAllocMem)(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS)
{
void *pvRet = NULL;
- if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) <= g_ui32kmallocThreshold)
+ if ((ui32Size + ALLOCMEM_PID_SIZE_PADDING) <= g_ui32kmallocThreshold)
{
- pvRet = kmalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL);
+ pvRet = kmalloc(ui32Size + ALLOCMEM_PID_SIZE_PADDING, GFP_KERNEL);
if (pvRet == NULL)
{
OSTryDecreaseKmallocThreshold();
if (pvRet != NULL)
{
- _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ARGS);
+ pvRet = _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ARGS);
}
return pvRet;
{
void *pvRet = NULL;
- if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) <= g_ui32kmallocThreshold)
+ if ((ui32Size + ALLOCMEM_PID_SIZE_PADDING) <= g_ui32kmallocThreshold)
{
- pvRet = kzalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL);
+ pvRet = kzalloc(ui32Size + ALLOCMEM_PID_SIZE_PADDING, GFP_KERNEL);
if (pvRet == NULL)
{
OSTryDecreaseKmallocThreshold();
if (pvRet != NULL)
{
- _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ARGS);
+ pvRet = _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ARGS);
}
return pvRet;
#endif
if (pvMem != NULL)
{
- _pvr_alloc_stats_remove(pvMem);
+ pvMem = _pvr_alloc_stats_remove(pvMem);
if (!is_vmalloc_addr(pvMem))
{
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/string.h>
-#include <img_types.h>
/* Common and SO layer */
#include "img_defs.h"
#include "km_apphint.h"
#if defined(PDUMP)
-#include <stdarg.h>
+#if defined(__linux__)
+ #include <linux/version.h>
+
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ #include <linux/stdarg.h>
+ #else
+ #include <stdarg.h>
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+#else
+ #include <stdarg.h>
+#endif /* __linux__ */
#include "pdump_km.h"
#endif
};
struct apphint_init_data {
- IMG_UINT32 id; /* index into AppHint Table */
+ IMG_UINT32 id; /* index into AppHint Table */
APPHINT_CLASS class;
const IMG_CHAR *name;
union apphint_value default_value;
+ APPHINT_RT_CLASS guest; /* ALWAYS => present on GUEST,
+ NEVER => not present on GUEST */
};
struct apphint_init_data_mapping {
#define UINT32List UINT32
static const struct apphint_init_data init_data_buildvar[] = {
-#define X(a, b, c, d, e) \
- {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+#define X(a, b, c, d, e, f) \
+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d}, APPHINT_RT_CLASS_ ## f },
APPHINT_LIST_BUILDVAR_COMMON
APPHINT_LIST_BUILDVAR
#undef X
};
static const struct apphint_init_data init_data_modparam[] = {
-#define X(a, b, c, d, e) \
- {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+#define X(a, b, c, d, e, f) \
+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d}, APPHINT_RT_CLASS_ ## f },
APPHINT_LIST_MODPARAM_COMMON
APPHINT_LIST_MODPARAM
#undef X
};
static const struct apphint_init_data init_data_debuginfo[] = {
-#define X(a, b, c, d, e) \
- {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+#define X(a, b, c, d, e, f) \
+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d}, APPHINT_RT_CLASS_ ## f },
APPHINT_LIST_DEBUGINFO_COMMON
APPHINT_LIST_DEBUGINFO
#undef X
};
static const struct apphint_init_data init_data_debuginfo_device[] = {
-#define X(a, b, c, d, e) \
- {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+#define X(a, b, c, d, e, f) \
+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d}, APPHINT_RT_CLASS_ ## f },
APPHINT_LIST_DEBUGINFO_DEVICE_COMMON
APPHINT_LIST_DEBUGINFO_DEVICE
#undef X
__maybe_unused static const char NO_PARAM_TABLE[] = {};
static const struct apphint_param param_lookup[] = {
-#define X(a, b, c, d, e) \
+#define X(a, b, c, d, e, f) \
{APPHINT_ID_ ## a, APPHINT_DATA_TYPE_ ## b, e, ARRAY_SIZE(e) },
APPHINT_LIST_ALL
#undef X
.val = {
#define UINT32Bitfield UINT32
#define UINT32List UINT32
-#define X(a, b, c, d, e) \
- { {NULL}, {NULL}, NULL, NULL, {.b=d}, false },
+#define X(a, b, c, d, e, f) \
+ { {NULL}, {NULL}, NULL, NULL, {.b=d}, NULL },
APPHINT_LIST_ALL
#undef X
#undef UINT32Bitfield
__func__, param_lookup[id].data_type, id));
}
- /* Do not log errors if running in GUEST mode */
- if ((PVRSRV_OK != result) && !PVRSRV_VZ_MODE_IS(GUEST)) {
+ if (PVRSRV_OK != result) {
PVR_DPF((PVR_DBG_ERROR,
"%s: failed (%s)",
__func__, PVRSRVGetErrorString(result)));
return result;
}
-/**
+/*
* apphint_write - write the current AppHint data to a buffer
*
* Returns length written or -errno
*******************************************************************************
Module parameters initialization - different from debuginfo
******************************************************************************/
-/**
+/*
* apphint_kparam_set - Handle an update of a module parameter
*
* Returns 0, or -errno. arg is in kp->arg.
return (result > 0) ? 0 : result;
}
-/**
+/*
* apphint_kparam_get - handle a read of a module parameter
*
* Returns length written or -errno. Buffer is 4k (ie. be short!)
#define apphint_modparam_enable(name, number, perm) \
module_param_cb(name, &apphint_kparam_fops, &apphint.val[number], perm);
-#define X(a, b, c, d, e) \
+#define X(a, b, c, d, e, f) \
apphint_modparam_class_ ##c(a, APPHINT_ID_ ## a, 0444)
APPHINT_LIST_MODPARAM_COMMON
APPHINT_LIST_MODPARAM
Debug Info supporting functions
******************************************************************************/
-/**
+/*
* apphint_set - Handle a DI value update
*/
static IMG_INT64 apphint_set(const IMG_CHAR *buffer, IMG_UINT64 count,
return result;
}
-/**
+/*
* apphint_debuginfo_init - Create the specified debuginfo entries
*/
static int apphint_debuginfo_init(const char *sub_dir,
.pfnNext = apphint_di_next, .pfnShow = apphint_di_show,
.pfnWrite = apphint_set, .ui32WriteLenMax = APPHINT_BUFFER_SIZE
};
+ /* Determine if we're booted as a GUEST VZ OS */
+ IMG_BOOL bIsGUEST = PVRSRV_VZ_MODE_IS(GUEST);
if (*rootdir) {
PVR_DPF((PVR_DBG_WARNING,
if (!class_state[init_data[i].class].enabled)
continue;
+ /* Check to see if this AppHint should appear in a GUEST OS.
+ * This will have a value in the init_data[i].guest field of ALWAYS
+ * and if we don't have this set (and we're in GUEST mode) we must
+ * not present this AppHint to the OS.
+ */
+ if (bIsGUEST && (init_data[i].guest != APPHINT_RT_CLASS_ALWAYS))
+ continue;
+
result = DICreateEntry(init_data[i].name,
*rootdir,
&iterator,
return result;
}
-/**
+/*
* apphint_debuginfo_deinit- destroy the debuginfo entries
*/
static void apphint_debuginfo_deinit(unsigned int num_entries,
}
}
-/**
+/*
* Callback for debug dump
*/
static void apphint_dump_state(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
void *pvDumpDebugFile)
{
int i, result;
- char km_buffer[APPHINT_BUFFER_SIZE];
+ char description_buffer[50];
PVRSRV_DEVICE_NODE *device = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) {
|| (device && device != apphint.devices[i]))
continue;
- result = snprintf(km_buffer,
- APPHINT_BUFFER_SIZE,
+ result = snprintf(description_buffer,
+ sizeof(description_buffer),
"Debug Info Params Device ID: %d",
i);
if (0 > result)
continue;
- apphint_dump_values(km_buffer, i,
+ apphint_dump_values(description_buffer, i,
init_data_debuginfo_device,
ARRAY_SIZE(init_data_debuginfo_device),
pfnDumpDebugPrintf,
int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device)
{
int result, i;
- char device_num[APPHINT_BUFFER_SIZE];
unsigned int device_value_offset;
if (!apphint.initialized) {
goto err_out;
}
- result = snprintf(device_num, APPHINT_BUFFER_SIZE, "%u", apphint.num_devices);
- if (result < 0) {
- PVR_DPF((PVR_DBG_WARNING,
- "snprintf failed (%d)", result));
- result = -EINVAL;
- goto err_out;
- }
-
/* Set the default values for the new device */
device_value_offset = apphint.num_devices * APPHINT_DEBUGINFO_DEVICE_ID_MAX;
for (i = 0; i < APPHINT_DEBUGINFO_DEVICE_ID_MAX; i++) {
}
}
- result = apphint_debuginfo_init(device_num, apphint.num_devices,
+ result = apphint_debuginfo_init("apphint", device->sDevId.ui32InternalID,
ARRAY_SIZE(init_data_debuginfo_device),
init_data_debuginfo_device,
- apphint.debuginfo_rootdir,
- &apphint.debuginfo_device_rootdir[apphint.num_devices],
- apphint.debuginfo_device_entry[apphint.num_devices]);
+ device->sDebugInfo.psGroup,
+ &apphint.debuginfo_device_rootdir[device->sDevId.ui32InternalID],
+ apphint.debuginfo_device_entry[device->sDevId.ui32InternalID]);
if (0 != result)
goto err_out;
- apphint.devices[apphint.num_devices] = device;
+ apphint.devices[device->sDevId.ui32InternalID] = device;
apphint.num_devices++;
(void)SOPvrDbgRequestNotifyRegister(
EXPORT_SYMBOL(DCImportBufferAcquire);
EXPORT_SYMBOL(DCImportBufferRelease);
-/* Physmem interface (required by LMA DC drivers) */
-#include "physheap.h"
-EXPORT_SYMBOL(PhysHeapAcquireByUsage);
-EXPORT_SYMBOL(PhysHeapRelease);
-EXPORT_SYMBOL(PhysHeapGetType);
-EXPORT_SYMBOL(PhysHeapGetCpuPAddr);
-EXPORT_SYMBOL(PhysHeapGetSize);
-EXPORT_SYMBOL(PhysHeapCpuPAddrToDevPAddr);
-
EXPORT_SYMBOL(PVRSRVGetDriverStatus);
EXPORT_SYMBOL(PVRSRVSystemInstallDeviceLISR);
EXPORT_SYMBOL(PVRSRVSystemUninstallDeviceLISR);
#include "pvr_notifier.h"
EXPORT_SYMBOL(PVRSRVCheckStatus);
+#endif /* defined(SUPPORT_DISPLAY_CLASS) */
+
+#if defined(SUPPORT_EXTERNAL_PHYSHEAP_INTERFACE)
+/*
+ * Physmem interface.
+ * Required by LMA DC drivers, and some non-DC LMA display drivers.
+ */
+#include "physheap.h"
+EXPORT_SYMBOL(PhysHeapAcquireByID);
+EXPORT_SYMBOL(PhysHeapRelease);
+EXPORT_SYMBOL(PhysHeapGetType);
+EXPORT_SYMBOL(PhysHeapGetCpuPAddr);
+EXPORT_SYMBOL(PhysHeapGetSize);
+EXPORT_SYMBOL(PhysHeapCpuPAddrToDevPAddr);
#include "pvr_debug.h"
EXPORT_SYMBOL(PVRSRVGetErrorString);
EXPORT_SYMBOL(PVRSRVGetDeviceInstance);
-#endif /* defined(SUPPORT_DISPLAY_CLASS) */
+#endif
#if defined(SUPPORT_RGX)
#include "rgxapi_km.h"
{
if (pFile)
{
- struct drm_file *psDRMFile = pFile->private_data;
- PVRSRV_CONNECTION_PRIV *psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv;
+ struct drm_file *psDRMFile;
+ PVRSRV_CONNECTION_PRIV *psConnectionPriv;
+
+ psDRMFile = pFile->private_data;
+ PVR_LOG_RETURN_IF_FALSE(psDRMFile != NULL, "psDRMFile is NULL", NULL);
+
+ psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv;
+ PVR_LOG_RETURN_IF_FALSE(psConnectionPriv != NULL, "psConnectionPriv is NULL", NULL);
return (CONNECTION_DATA*)psConnectionPriv->pvConnectionData;
}
* need it */
PVRGpuTraceInitAppHintCallbacks(NULL);
#endif
+
return 0;
}
{
PVR_DPF((PVR_DBG_WARNING,
"%s: failed to initialise PVR GPU Tracing on device%d (%d)",
- __func__, psDeviceNode->sDevId.i32OsDeviceID, error));
+ __func__, psDeviceNode->sDevId.i32KernelDeviceID, error));
}
}
#endif
/**************************************************************************/ /*!
@Function PVRSRVDeviceShutdown
@Description Common device shutdown.
-@Input psDeviceNode The device node representing the device that should
- be shutdown
+@Input psDev The device node representing the device that should
+ be shutdown
@Return void
*/ /***************************************************************************/
-void PVRSRVDeviceShutdown(PVRSRV_DEVICE_NODE *psDeviceNode)
+void PVRSRVDeviceShutdown(struct drm_device *psDev)
{
- PVRSRV_ERROR eError;
-
- /*
- * Disable the bridge to stop processes trying to use the driver
- * after it has been shut down.
- */
- eError = LinuxBridgeBlockClientsAccess(IMG_TRUE);
+ struct pvr_drm_private *psDevPriv = psDev->dev_private;
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevPriv->dev_node;
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Failed to suspend driver (%d)",
- __func__, eError));
- return;
- }
+ /* Since this is a shutdown request ignore the returned error and try to
+ * to power off the device. This is done because there is no way of
+ * signalling the OS that this call failed. */
+ (void) LinuxBridgeBlockClientsAccess(psDevPriv, IMG_TRUE);
+ /* Passing PVRSRV_POWER_FLAGS_NONE as there are no special actions required
+ * from the shutdown call beside the regular device power off. */
(void) PVRSRVSetDeviceSystemPowerState(psDeviceNode,
PVRSRV_SYS_POWER_STATE_OFF,
PVRSRV_POWER_FLAGS_NONE);
/**************************************************************************/ /*!
@Function PVRSRVDeviceSuspend
@Description Common device suspend.
-@Input psDeviceNode The device node representing the device that should
- be suspended
-@Return int 0 on success and a Linux error code otherwise
+@Input psDev The device node representing the device that should
+ be suspended
+@Return int 0 on success and a Linux error code otherwise
*/ /***************************************************************************/
-int PVRSRVDeviceSuspend(PVRSRV_DEVICE_NODE *psDeviceNode)
+int PVRSRVDeviceSuspend(struct drm_device *psDev)
{
- /*
- * LinuxBridgeBlockClientsAccess prevents processes from using the driver
- * while it's suspended (this is needed for Android). Acquire the bridge
- * lock first to ensure the driver isn't currently in use.
- */
- LinuxBridgeBlockClientsAccess(IMG_FALSE);
+ struct pvr_drm_private *psDevPriv = psDev->dev_private;
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevPriv->dev_node;
+ PVRSRV_ERROR eError;
+
+ /* LinuxBridgeBlockClientsAccess prevents processes from using the driver
+ * while it's suspended (this is needed for Android). */
+ eError = LinuxBridgeBlockClientsAccess(psDevPriv, IMG_FALSE);
+ PVR_LOG_RETURN_IF_FALSE_VA(eError == PVRSRV_OK, -EFAULT,
+ "LinuxBridgeBlockClientsAccess() failed with error %u",
+ eError);
#if defined(SUPPORT_AUTOVZ)
/* To allow the driver to power down the GPU under AutoVz, the firmware must
if (PVRSRVSetDeviceSystemPowerState(psDeviceNode,
PVRSRV_SYS_POWER_STATE_OFF,
- PVRSRV_POWER_FLAGS_SUSPEND) != PVRSRV_OK)
+ PVRSRV_POWER_FLAGS_SUSPEND_REQ) != PVRSRV_OK)
{
- LinuxBridgeUnblockClientsAccess();
+ /* Ignore return error as we're already returning an error here. */
+ (void) LinuxBridgeUnblockClientsAccess(psDevPriv);
return -EINVAL;
}
/**************************************************************************/ /*!
@Function PVRSRVDeviceResume
@Description Common device resume.
-@Input psDeviceNode The device node representing the device that should
- be resumed
-@Return int 0 on success and a Linux error code otherwise
+@Input psDev The device node representing the device that should
+ be resumed
+@Return int 0 on success and a Linux error code otherwise
*/ /***************************************************************************/
-int PVRSRVDeviceResume(PVRSRV_DEVICE_NODE *psDeviceNode)
+int PVRSRVDeviceResume(struct drm_device *psDev)
{
+ struct pvr_drm_private *psDevPriv = psDev->dev_private;
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevPriv->dev_node;
+
if (PVRSRVSetDeviceSystemPowerState(psDeviceNode,
PVRSRV_SYS_POWER_STATE_ON,
- PVRSRV_POWER_FLAGS_SUSPEND) != PVRSRV_OK)
+ PVRSRV_POWER_FLAGS_RESUME_REQ) != PVRSRV_OK)
{
return -EINVAL;
}
- LinuxBridgeUnblockClientsAccess();
+ /* Ignore return error. We should proceed even if this fails. */
+ (void) LinuxBridgeUnblockClientsAccess(psDevPriv);
/*
* Reprocess the device queues in case commands were blocked during
psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv;
}
- if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT)
+ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_CREATED)
{
+ PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON);
eError = PVRSRVCommonDeviceInitialise(psDeviceNode);
if (eError != PVRSRV_OK)
{
psConnectionPriv->pfDeviceRelease = PVRSRVCommonConnectionDisconnect;
#endif
psDRMFile->driver_priv = (void*)psConnectionPriv;
+
+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD)
+ eError = PVRSRVGpuTraceWorkPeriodEventStatsRegister(
+ &psConnectionPriv->pvGpuWorkPeriodEventStats);
+ if (eError != PVRSRV_OK)
+ {
+ iErr = -ENOMEM;
+ goto fail_connect;
+ }
+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */
goto out;
fail_connect:
if (psConnectionPriv->pvConnectionData)
{
+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD)
+ if (psConnectionPriv->pvGpuWorkPeriodEventStats)
+ {
+ PVRSRVGpuTraceWorkPeriodEventStatsUnregister(
+ psConnectionPriv->pvGpuWorkPeriodEventStats);
+ psConnectionPriv->pvGpuWorkPeriodEventStats = NULL;
+ }
+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */
+
#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT)
if (psConnectionPriv->pfDeviceRelease)
{
* the same psDRMFile.
*/
void *pvSyncConnectionData;
+
+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD)
+ /* hGpuWorkPeriodEventStats is used to hold gpu work period event stats
+ * private data for each apps which have been working with GPU.
+ */
+ void *pvGpuWorkPeriodEventStats;
+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */
+
} PVRSRV_CONNECTION_PRIV;
int PVRSRVDriverInit(void);
int PVRSRVDeviceInit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
void PVRSRVDeviceDeinit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
-void PVRSRVDeviceShutdown(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
-int PVRSRVDeviceSuspend(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
-int PVRSRVDeviceResume(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+void PVRSRVDeviceShutdown(struct drm_device *psDev);
+int PVRSRVDeviceSuspend(struct drm_device *psDev);
+int PVRSRVDeviceResume(struct drm_device *psDev);
int PVRSRVDeviceServicesOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
struct drm_file *psDRMFile);
PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
{
- ENV_CONNECTION_DATA *psEnvConnection;
-
if (hOsPrivateData == NULL)
{
return PVRSRV_OK;
}
- psEnvConnection = hOsPrivateData;
-
#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
- PVR_ASSERT(psEnvConnection->psIonData != NULL);
+ {
+ ENV_CONNECTION_DATA *psEnvConnection = hOsPrivateData;
+
+ PVR_ASSERT(psEnvConnection->psIonData != NULL);
- PVR_ASSERT(psEnvConnection->psIonData->psIonClient != NULL);
- ion_client_destroy(psEnvConnection->psIonData->psIonClient);
+ PVR_ASSERT(psEnvConnection->psIonData->psIonClient != NULL);
+ ion_client_destroy(psEnvConnection->psIonData->psIonClient);
- IonDevRelease(psEnvConnection->psIonData->psIonDev);
- OSFreeMem(psEnvConnection->psIonData);
+ IonDevRelease(psEnvConnection->psIonData->psIonDev);
+ OSFreeMem(psEnvConnection->psIonData);
+ }
#endif
OSFreeMem(hOsPrivateData);
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
#include <linux/kthread.h>
#include <linux/utsname.h>
#include <linux/scatterlist.h>
#else
#include <linux/sched.h>
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+#if defined(PVR_ANDROID_HAS_DMA_HEAP_FIND)
+#include <linux/dma-heap.h>
+#include "physmem_dmabuf.h"
+#else
+#include "physmem.h"
+#endif
+#endif
#include "log2.h"
#include "osfunc.h"
#include "pvrsrv_sync_server.h"
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+#include "ri_server.h"
+#include "pvr_ricommon.h"
+#endif
#if defined(VIRTUAL_PLATFORM)
#define EVENT_OBJECT_TIMEOUT_US (120000000ULL)
psPage,
sCpuPAddr,
uiSize,
- NULL,
uiPid
DEBUG_MEMSTATS_VALUES);
#endif
void OSPhyContigPagesFree(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle)
{
struct page *psPage = (struct page*) psMemHandle->u.pvHandle;
- IMG_UINT32 uiSize, uiPageCount=0, ui32Order;
+ IMG_UINT32 ui32Order;
PVR_UNREFERENCED_PARAMETER(psPhysHeap);
ui32Order = psMemHandle->uiOrder;
- uiPageCount = (1 << ui32Order);
- uiSize = (uiPageCount * PAGE_SIZE);
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
*pvPtr,
sCpuPAddr,
actualSize,
- NULL,
OSGetCurrentClientProcessIDKM()
DEBUG_MEMSTATS_VALUES);
}
return uiSize;
}
-IMG_UINT32 OSVSScanf(const IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...)
-{
- va_list argList;
- IMG_INT32 iCount = 0;
-
- va_start(argList, pszFormat);
- iCount = vsscanf(pStr, pszFormat, argList);
- va_end(argList);
-
- return iCount;
-}
-
IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen)
{
return (IMG_INT)memcmp(pvBufA, pvBufB, uiLen);
#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC)
static struct workqueue_struct *gpFenceStatusWq;
-static struct workqueue_struct *gpFenceUnorderedWq;
static PVRSRV_ERROR _NativeSyncInit(void)
{
return PVRSRV_ERROR_INIT_FAILURE;
}
- gpFenceUnorderedWq = create_workqueue("pvr_fence_unordered");
- if (!gpFenceUnorderedWq)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create fence unordered workqueue",
- __func__));
- destroy_workqueue(gpFenceStatusWq);
- gpFenceStatusWq = NULL;
- return PVRSRV_ERROR_INIT_FAILURE;
- }
-
return PVRSRV_OK;
}
static void _NativeSyncDeinit(void)
{
- destroy_workqueue(gpFenceUnorderedWq);
destroy_workqueue(gpFenceStatusWq);
}
return gpFenceStatusWq;
}
-
-struct workqueue_struct *NativeSyncGetFenceUnorderedWq(void)
-{
- if (!gpFenceUnorderedWq)
- {
-#if defined(DEBUG)
- PVR_ASSERT(gpFenceUnorderedWq);
-#endif
- return NULL;
- }
-
- return gpFenceUnorderedWq;
-}
#endif
PVRSRV_ERROR OSInitEnvData(void)
{-ENOTTY, PVRSRV_ERROR_BRIDGE_CALL_FAILED},
{-ERANGE, PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL},
{-ENOMEM, PVRSRV_ERROR_OUT_OF_MEMORY},
+ {-EACCES, PVRSRV_ERROR_PMR_NOT_PERMITTED},
{-EINVAL, PVRSRV_ERROR_INVALID_PARAMS},
{0, PVRSRV_OK}
#endif
}
+IMG_BOOL OSIsMapPhysNonContigSupported(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && !defined(CONFIG_VMAP_PFN)
+ return IMG_FALSE;
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
+ return IMG_FALSE;
+#else
+ return IMG_TRUE;
+#endif
+}
+
+void OSUnMapPhysArrayToLin(void *pvLinAddr, void *pvPrivData)
+{
+ if (is_vmalloc_addr(pvLinAddr))
+ {
+#if defined(CONFIG_VMAP_PFN)
+ PVR_UNREFERENCED_PARAMETER(pvPrivData);
+ vunmap(pvLinAddr);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+ unmap_kernel_range((unsigned long) (uintptr_t) pvLinAddr,
+ get_vm_area_size(pvPrivData));
+ free_vm_area(pvPrivData);
+#else
+ PVR_DPF((PVR_DBG_ERROR,"%s: Cannot map into kernel, no method supported.", __func__));
+ PVR_ASSERT(0);
+#endif
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Given kernel address is not a vmalloc addr", __func__));
+ }
+}
+
+#define PagePAToPFN(PageAddr) (PageAddr >> PAGE_SHIFT)
+
+PVRSRV_ERROR OSMapPhysArrayToLin(IMG_CPU_PHYADDR pPagePA[],
+ IMG_UINT32 uiPagesCount,
+ void **ppvLinAddr,
+ void **ppvPrivData)
+{
+ if (ppvLinAddr == NULL || ppvPrivData == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#if defined(CONFIG_VMAP_PFN)
+ {
+ IMG_UINT32 i;
+
+ for (i = 0; i < uiPagesCount; i++)
+ {
+ pPagePA[i].uiAddr = PagePAToPFN(pPagePA[i].uiAddr);
+ }
+
+ *ppvLinAddr = vmap_pfn((unsigned long *)pPagePA,
+ (unsigned int)uiPagesCount,
+ pgprot_device(PAGE_KERNEL));
+ if (NULL == *ppvLinAddr)
+ {
+ return PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
+ }
+ *ppvPrivData = NULL;
+ return PVRSRV_OK;
+ }
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+ {
+ pte_t *pte[32], **pte_array;
+ struct vm_struct *psVMA;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 i = 0;
+
+ pte_array = &pte[0];
+ if (sizeof(pte) < (sizeof(pte[0]) * uiPagesCount))
+ {
+ pte_array = kzalloc(uiPagesCount * sizeof(*pte_array), GFP_KERNEL);
+ if (NULL == pte_array)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ psVMA = alloc_vm_area((size_t)(uiPagesCount << PAGE_SHIFT), pte_array);
+ if (NULL == psVMA)
+ {
+ eError = PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY;
+ goto FreePTEArray;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Allocated VM: %s VMA: %p Addr: %p Size: 0x%lx count: %d", __func__,
+ psVMA, psVMA->addr, psVMA->size, psVMA->nr_pages));
+
+ for (i = 0; i < uiPagesCount; i++)
+ {
+ *(pte_array[i]) = pte_mkspecial(pfn_pte((unsigned long) PagePAToPFN(pPagePA[i].uiAddr),
+ pgprot_device(PAGE_KERNEL)));
+ }
+
+ OSWriteMemoryBarrier(psVMA->addr);
+
+ *ppvLinAddr = psVMA->addr;
+ *ppvPrivData = psVMA;
+
+FreePTEArray:
+ if (pte_array != pte)
+ {
+ kfree(pte_array);
+ }
+
+ return eError;
+ }
+#else
+ PVR_DPF((PVR_DBG_ERROR,"%s: Cannot map into kernel, no method supported.", __func__));
+ PVR_ASSERT(0);
+ *ppvLinAddr = NULL;
+ return PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
+#endif
+}
+
void *
OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
size_t ui32Bytes,
res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
if (res == 0)
{
- PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued"));
+ PVR_LOG(("OSTimerCallbackWrapper: work already queued"));
}
}
PFN_SERVER_CLEANUP pfnServerCleanup;
void* pvServerCleanupData;
- enum dma_transfer_direction eDirection;
+ enum dma_data_direction eDirection;
struct sg_table **ppsSg;
struct page ***pages;
IMG_UINT32* puiNumPages;
/* Unpin pages */
for (j=0; j<psOSCleanup->puiNumPages[i]; j++)
{
- if (psOSCleanup->eDirection == DMA_DEV_TO_MEM)
+ /*
+ * using DMA_FROM_DEVICE from enum dma_data_direction instead of DMA_DEV_TO_MEM
+ * from enum dma_transfer_direction to avoid casting explicitly
+ */
+ if (psOSCleanup->eDirection == DMA_FROM_DEVICE)
{
set_page_dirty_lock(psOSCleanup->pages[i][j]);
}
*/
if (psOSCleanup->pages[i][j])
{
- if (psOSCleanup->eDirection == DMA_DEV_TO_MEM)
+ /*
+ * using DMA_FROM_DEVICE from enum dma_data_direction instead of DMA_DEV_TO_MEM
+ * from enum dma_transfer_direction to avoid casting explicitly
+ */
+ if (psOSCleanup->eDirection == DMA_FROM_DEVICE)
{
set_page_dirty_lock(psOSCleanup->pages[i][j]);
}
PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
OS_CLEANUP_DATA* psOSCleanupData = pvOSData;
+ enum dma_data_direction eDataDirection = bMemToDev ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
struct dma_slave_config sConfig = {0};
struct dma_async_tx_descriptor *psDesc;
}
dmaengine_slave_config(pvChan, &sConfig);
- iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction);
+ iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection);
if (!iRet)
{
PVR_DPF((PVR_DBG_ERROR, "%s: Error mapping SG list", __func__));
goto e4;
}
- dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, sConfig.direction);
+ dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, eDataDirection);
psDesc = dmaengine_prep_slave_sg(pvChan, psSg->sgl, (unsigned int)iRet, sConfig.direction, 0);
if (!psDesc)
goto e5;
}
- psOSCleanupData->eDirection = sConfig.direction;
+ psOSCleanupData->eDirection = eDataDirection;
psOSCleanupData->ppsSg[psOSCleanupData->uiCount] = psSg;
psOSCleanupData->pfnServerCleanup = pfnServerCleanup;
psOSCleanupData->pvServerCleanupData = pvServerCleanupParam;
return PVRSRV_OK;
e5:
- dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction);
+ dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection);
e4:
sg_free_table(psSg);
e3:
IMG_UINT32 ui32Idx;
IMG_INT32 i32Rwd;
+ enum dma_data_direction eDataDirection = bMemToDev ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
struct dma_slave_config sConfig = {0};
struct dma_async_tx_descriptor *psDesc;
}
dmaengine_slave_config(pvChan, &sConfig);
- iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction);
+ iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection);
if (!iRet)
{
PVR_DPF((PVR_DBG_ERROR, "%s: Error mapping SG list", __func__));
eError = PVRSRV_ERROR_INVALID_PARAMS;
goto e5;
}
- dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, sConfig.direction);
+ dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, eDataDirection);
psDesc = dmaengine_prep_slave_sg(pvChan, psSg->sgl, (unsigned int)iRet, sConfig.direction, 0);
if (!psDesc)
{
struct task_struct* t1;
- psOSCleanupData->eDirection = sConfig.direction;
+ psOSCleanupData->eDirection = eDataDirection;
psOSCleanupData->pfnServerCleanup = pfnServerCleanup;
psOSCleanupData->pvServerCleanupData = pvServerCleanupParam;
return PVRSRV_OK;
e6:
- dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction);
+ dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection);
e5:
sg_free_table(psSg);
e4:
IMG_UINT32 i;
psSg = psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount][i32Rwd];
- dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction);
+ dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, eDataDirection);
sg_free_table(psSg);
/* Unpin pages */
}
#endif /* SUPPORT_DMA_TRANSFER */
+
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+#if defined(PVR_ANDROID_HAS_DMA_HEAP_FIND)
+IMG_INTERNAL PVRSRV_ERROR
+OSAllocateSecBuf(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszName,
+ PMR **ppsPMR)
+{
+ struct dma_heap *heap;
+ struct dma_buf *buf;
+ struct device *dev;
+ struct dma_buf_attachment *buf_attachment;
+
+ IMG_UINT32 ui32MappingTable = 0;
+ PVRSRV_ERROR eError;
+ IMG_CHAR *pszHeapName;
+
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode, "psDeviceNode");
+ PVR_LOG_RETURN_IF_INVALID_PARAM(psDeviceNode->psDevConfig->pszSecureDMAHeapName, "pszSecureDMAHeapName");
+ PVR_LOG_RETURN_IF_INVALID_PARAM((OSStringLength(psDeviceNode->psDevConfig->pszSecureDMAHeapName) > 0), "pszSecureDMAHeapName length");
+
+ pszHeapName = psDeviceNode->psDevConfig->pszSecureDMAHeapName;
+ dev = (struct device*)psDeviceNode->psDevConfig->pvOSDevice;
+
+ heap = dma_heap_find(pszHeapName);
+ PVR_LOG_GOTO_IF_NOMEM(heap, eError, ErrorExit);
+
+ buf = dma_heap_buffer_alloc(heap, uiSize, 0, 0);
+ PVR_LOG_GOTO_IF_NOMEM(buf, eError, ErrorBufPut);
+
+ if (buf->size < uiSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: buffer size (%ld) is less than requested (%lld).",
+ __func__, buf->size, uiSize));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorBufFree;
+ }
+
+ buf_attachment = dma_buf_attach(buf, dev);
+ PVR_LOG_GOTO_IF_NOMEM(buf_attachment, eError, ErrorBufFree);
+
+ eError = PhysmemCreateNewDmaBufBackedPMR(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_EXTERNAL],
+ buf_attachment,
+ NULL,
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE
+ | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE,
+ buf->size,
+ 1,
+ 1,
+ &ui32MappingTable,
+ OSStringLength(pszName),
+ pszName,
+ ppsPMR);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateNewDmaBufBackedPMR", ErrorBufDetach);
+
+ return PVRSRV_OK;
+
+ErrorBufDetach:
+ dma_buf_detach(buf, buf_attachment);
+ErrorBufFree:
+ dma_heap_buffer_free(buf);
+ErrorBufPut:
+ dma_buf_put(buf);
+ErrorExit:
+
+ return eError;
+}
+
+IMG_INTERNAL void
+OSFreeSecBuf(PMR *psPMR)
+{
+ struct dma_buf *buf = PhysmemGetDmaBuf(psPMR);
+ dma_buf_put(buf);
+ dma_heap_buffer_free(buf);
+
+ PMRUnrefPMR(psPMR);
+}
+#else /* PVR_ANDROID_HAS_DMA_HEAP_FIND */
+IMG_INTERNAL PVRSRV_ERROR
+OSAllocateSecBuf(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszName,
+ PMR **ppsPMR)
+{
+ IMG_UINT32 ui32MappingTable = 0;
+ PVRSRV_ERROR eError;
+
+ eError = PhysmemNewRamBackedPMR(NULL,
+ psDeviceNode,
+ uiSize,
+ 1,
+ 1,
+ &ui32MappingTable,
+ ExactLog2(OSGetPageSize()),
+ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(GPU_SECURE)
+ | PVRSRV_MEMALLOCFLAG_GPU_READABLE
+ | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE,
+ OSStringLength(pszName),
+ pszName,
+ OSGetCurrentClientProcessIDKM(),
+ ppsPMR,
+ PDUMP_NONE,
+ NULL);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemNewRamBackedPMR", ErrorExit);
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+ eError = RIWritePMREntryWithOwnerKM(*ppsPMR, PVR_SYS_ALLOC_PID);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RIWritePMREntryWithOwnerKM", ErrorUnrefPMR);
+#endif
+
+ return PVRSRV_OK;
+
+#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO)
+ErrorUnrefPMR:
+ PMRUnrefPMR(*ppsPMR);
+#endif
+ErrorExit:
+ return eError;
+}
+
+IMG_INTERNAL void
+OSFreeSecBuf(PMR *psPMR)
+{
+ PMRUnrefPMR(psPMR);
+}
+#endif
+#endif /* SUPPORT_SECURE_ALLOC_KM */
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ /**************************************************************************/
#include <linux/version.h>
-#include <linux/dma-mapping.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
+ #include <linux/dma-map-ops.h>
+#else
+ #include <linux/dma-mapping.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
#include <asm/system.h>
#endif
*/ /**************************************************************************/
#include <linux/version.h>
#include <linux/cpumask.h>
-#include <linux/dma-mapping.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
+ #include <linux/dma-map-ops.h>
+#else
+ #include <linux/dma-mapping.h>
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) */
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
#include "pvr_debug.h"
#include "cache_ops.h"
-
extern void SysDevHost_Cache_Maintenance(IMG_HANDLE hSysData,
- PVRSRV_CACHE_OP eRequestType,
- void *pvVirtStart,
- void *pvVirtEnd,
- IMG_CPU_PHYADDR sCPUPhysStart,
- IMG_CPU_PHYADDR sCPUPhysEnd);
+ PVRSRV_CACHE_OP eRequestType,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStar,
+ IMG_CPU_PHYADDR sCPUPhysEnd);
+
void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
void *pvVirtStart,
#if !defined(NO_HARDWARE)
else
{
- //PVR_DPF((PVR_DBG_ERROR,
- // "%s: System doesn't register cache maintenance flush. Skipping!",
+ //PVR_DPF((PVR_DBG_WARNING,
+ // "%s: System doesn't implement cache maintenance. Skipping!",
// __func__));
- SysDevHost_Cache_Maintenance(
- psDevNode->psDevConfig->hSysData,
- PVRSRV_CACHE_OP_FLUSH,
- pvVirtStart,
- pvVirtEnd,
- sCPUPhysStart,
- sCPUPhysEnd);
+ SysDevHost_Cache_Maintenance(
+ psDevNode->psDevConfig->hSysData,
+ PVRSRV_CACHE_OP_FLUSH,
+ pvVirtStart,
+ pvVirtEnd,
+ sCPUPhysStart,
+ sCPUPhysEnd);
}
#endif
#if !defined(NO_HARDWARE)
else
{
- //PVR_DPF((PVR_DBG_ERROR,
- // "%s: System doesn't register cache maintenance clean. Skipping!",
+ //PVR_DPF((PVR_DBG_WARNING,
+ // "%s: System doesn't implement cache maintenance. Skipping!",
// __func__));
- SysDevHost_Cache_Maintenance(
- psDevNode->psDevConfig->hSysData,
- PVRSRV_CACHE_OP_CLEAN,
- pvVirtStart,
- pvVirtEnd,
- sCPUPhysStart,
- sCPUPhysEnd);
+ SysDevHost_Cache_Maintenance(
+ psDevNode->psDevConfig->hSysData,
+ PVRSRV_CACHE_OP_CLEAN,
+ pvVirtStart,
+ pvVirtEnd,
+ sCPUPhysStart,
+ sCPUPhysEnd);
+
}
#endif
#if !defined(NO_HARDWARE)
else
{
- //PVR_DPF((PVR_DBG_ERROR,
- // "%s: System doesn't register cache maintenance invalid. Skipping!",
+ //PVR_DPF((PVR_DBG_WARNING,
+ // "%s: System doesn't implement cache maintenance. Skipping!",
// __func__));
- SysDevHost_Cache_Maintenance(
- psDevNode->psDevConfig->hSysData,
- PVRSRV_CACHE_OP_INVALIDATE,
- pvVirtStart,
- pvVirtEnd,
- sCPUPhysStart,
- sCPUPhysEnd);
+ SysDevHost_Cache_Maintenance(
+ psDevNode->psDevConfig->hSysData,
+ PVRSRV_CACHE_OP_INVALIDATE,
+ pvVirtStart,
+ pvVirtEnd,
+ sCPUPhysStart,
+ sCPUPhysEnd);
+
}
#endif
}
void OSUserModeAccessToPerfCountersEn(void)
{
-#if 0// !defined(NO_HARDWARE)
+#if 0//!defined(NO_HARDWARE)
PVR_DPF((PVR_DBG_WARNING, "%s: Not implemented!", __func__));
PVR_ASSERT(0);
#endif
/* end of dma_buf_ops */
+
typedef struct _PMR_DMA_BUF_DATA_
{
/* Filled in at PMR create time */
/* Start size of the g_psDmaBufHash hash table */
#define DMA_BUF_HASH_SIZE 20
-static DEFINE_MUTEX(g_HashLock);
+static DEFINE_MUTEX(g_FactoryLock);
static HASH_TABLE *g_psDmaBufHash;
static IMG_UINT32 g_ui32HashRefCount;
* PMR callback functions *
*****************************************************************************/
-static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+/* This function is protected by the pfn(Get/Release)PMRFactoryLock() lock
+ * acquired/released in _UnrefAndMaybeDestroy() in pmr.c. */
+static void PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
{
PMR_DMA_BUF_DATA *psPrivData = pvPriv;
struct dma_buf_attachment *psAttachment = psPrivData->psAttachment;
struct dma_buf *psDmaBuf = psAttachment->dmabuf;
struct sg_table *psSgTable = psPrivData->psSgTable;
PMR *psPMR;
- PVRSRV_ERROR eError = PVRSRV_OK;
if (psDmaBuf->ops != &sPVRDmaBufOps)
{
/* We have a hash table so check if we've seen this dmabuf before */
psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf);
- if (psPMR)
+ if (psPMR != NULL)
{
- if (!PMRIsPMRLive(psPMR))
+ HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+ g_ui32HashRefCount--;
+
+ if (g_ui32HashRefCount == 0)
{
- HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf);
- g_ui32HashRefCount--;
-
- if (g_ui32HashRefCount == 0)
- {
- HASH_Delete(g_psDmaBufHash);
- g_psDmaBufHash = NULL;
- }
- }
- else{
- eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+ HASH_Delete(g_psDmaBufHash);
+ g_psDmaBufHash = NULL;
}
}
+
PVRSRVIonRemoveMemAllocRecord(psDmaBuf);
}
- }else
- {
- psPMR = (PMR *) psDmaBuf->priv;
- if (PMRIsPMRLive(psPMR))
- {
- eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
- }
-
- }
-
- if (PVRSRV_OK != eError)
- {
- return eError;
}
#if defined(PVRSRV_ENABLE_PROCESS_STATS)
dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL);
-
if (psPrivData->bPoisonOnFree)
{
- int err;
+ int err = DmaBufSetValue(psDmaBuf, PVRSRV_POISON_ON_FREE_VALUE,
+ __func__);
+ PVR_LOG_IF_FALSE(err != 0, "Failed to poison allocation before free");
- err = DmaBufSetValue(psDmaBuf, PVRSRV_POISON_ON_FREE_VALUE, __func__);
- if (err)
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: Failed to poison allocation before "
- "free", __func__));
- PVR_ASSERT(IMG_FALSE);
- }
+ PVR_ASSERT(err != 0);
}
if (psPrivData->pfnDestroy)
{
- eError = psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment);
- if (eError != PVRSRV_OK)
- {
- return eError;
- }
+ psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment);
}
OSFreeMem(psPrivData->pasDevPhysAddr);
OSFreeMem(psPrivData);
-
- return PVRSRV_OK;
}
static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
return PVRSRV_OK;
}
-static void PMRGetFactoryLock(void)
+static void PMRFactoryLock(void)
{
- mutex_lock(&g_HashLock);
+ mutex_lock(&g_FactoryLock);
}
-static void PMRReleaseFactoryLock(void)
+static void PMRFactoryUnlock(void)
{
- mutex_unlock(&g_HashLock);
+ mutex_unlock(&g_FactoryLock);
}
static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
.pfnReleaseKernelMappingData = PMRReleaseKernelMappingDataDmaBuf,
.pfnMMap = PMRMMapDmaBuf,
.pfnFinalize = PMRFinalizeDmaBuf,
- .pfnGetPMRFactoryLock = PMRGetFactoryLock,
- .pfnReleasePMRFactoryLock = PMRReleaseFactoryLock,
+ .pfnGetPMRFactoryLock = PMRFactoryLock,
+ .pfnReleasePMRFactoryLock = PMRFactoryUnlock,
};
/*****************************************************************************
goto errReturn;
}
+ if (!PMRValidateSize((IMG_UINT64) ui32NumVirtChunks * uiChunkSize))
+ {
+ PVR_LOG_VA(PVR_DBG_ERROR,
+ "PMR size exceeds limit #Chunks: %u ChunkSz %"IMG_UINT64_FMTSPECX"",
+ ui32NumVirtChunks,
+ uiChunkSize);
+ eError = PVRSRV_ERROR_PMR_TOO_LARGE;
+ goto errReturn;
+ }
+
psPrivData = OSAllocZMem(sizeof(*psPrivData));
if (psPrivData == NULL)
{
eError = PMRCreatePMR(psHeap,
ui32NumVirtChunks * uiChunkSize,
- uiChunkSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
pui32MappingTable,
return eError;
}
-static PVRSRV_ERROR PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap,
- struct dma_buf_attachment *psAttachment)
+static void PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap,
+ struct dma_buf_attachment *psAttachment)
{
struct dma_buf *psDmaBuf = psAttachment->dmabuf;
dma_buf_detach(psDmaBuf, psAttachment);
dma_buf_put(psDmaBuf);
-
- return PVRSRV_OK;
}
struct dma_buf *
PVRSRV_ERROR eError;
IMG_INT iFd;
- mutex_lock(&g_HashLock);
+ PMRFactoryLock();
PMRRefPMR(psPMR);
goto fail_dma_buf;
}
- mutex_unlock(&g_HashLock);
+ PMRFactoryUnlock();
*piFd = iFd;
/* A PMR memory lay out can't change once exported
dma_buf_put(psDmaBuf);
fail_pmr_ref:
- mutex_unlock(&g_HashLock);
+ PMRFactoryUnlock();
PMRUnrefPMR(psPMR);
PVR_ASSERT(eError != PVRSRV_OK);
PVR_UNREFERENCED_PARAMETER(psConnection);
- if (!psDevNode)
- {
- eError = PVRSRV_ERROR_INVALID_PARAMS;
- goto errReturn;
- }
+ PVR_GOTO_IF_INVALID_PARAM(psDevNode != NULL, eError, errReturn);
/* Terminate string from bridge to prevent corrupt annotations in RI */
if (pszName != NULL)
pszName0[ui32NameSize-1] = '\0';
}
- mutex_lock(&g_HashLock);
+ PMRFactoryLock();
/* Get the buffer handle */
psDmaBuf = dma_buf_get(fd);
if (psDmaBuf->ops == &sPVRDmaBufOps)
{
- PVRSRV_DEVICE_NODE *psPMRDevNode;
-
- /* We exported this dma_buf, so we can just get its PMR */
- psPMR = (PMR *) psDmaBuf->priv;
+ /* We exported this dma_buf, so we can just get its PMR. */
+ psPMR = psDmaBuf->priv;
- /* However, we can't import it if it belongs to a different device */
- psPMRDevNode = PMR_DeviceNode(psPMR);
- if (psPMRDevNode != psDevNode)
+ /* However, we can't import it if it belongs to a different device. */
+ if (PMR_DeviceNode(psPMR) != psDevNode)
{
PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device",
__func__));
- eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
- goto err;
+ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, err);
}
}
else
{
- if (g_psDmaBufHash)
+ if (g_psDmaBufHash != NULL)
{
- /* We have a hash table so check if we've seen this dmabuf before */
+ /* We have a hash table so check if we've seen this dmabuf
+ * before. */
psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf);
}
else
{
- /*
- * As different processes may import the same dmabuf we need to
+ /* As different processes may import the same dmabuf we need to
* create a hash table so we don't generate a duplicate PMR but
- * rather just take a reference on an existing one.
- */
+ * rather just take a reference on an existing one. */
g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE);
- if (!g_psDmaBufHash)
- {
- eError = PVRSRV_ERROR_OUT_OF_MEMORY;
- goto err;
- }
+ PVR_GOTO_IF_NOMEM(g_psDmaBufHash, eError, err);
+
bHashTableCreated = IMG_TRUE;
}
}
- if (psPMR)
+ if (psPMR != NULL)
{
/* Reuse the PMR we already created */
PMRRefPMR(psPMR);
PMR_LogicalSize(psPMR, puiSize);
*puiAlign = PAGE_SIZE;
}
+
/* No errors so far */
eError = PVRSRV_OK;
err:
- if (psPMR || (PVRSRV_OK != eError))
+ if (psPMR != NULL || eError != PVRSRV_OK)
{
- mutex_unlock(&g_HashLock);
+ PMRFactoryUnlock();
dma_buf_put(psDmaBuf);
if (PVRSRV_OK == eError)
{
- /*
- * We expect a PMR to be immutable at this point
+ /* We expect a PMR to be immutable at this point.
* But its explicitly set here to cover a corner case
* where a PMR created through non-DMA interface could be
- * imported back again through DMA interface */
+ * imported back again through DMA interface. */
PMR_SetLayoutFixed(psPMR, IMG_TRUE);
}
+
return eError;
}
HASH_Insert(g_psDmaBufHash, (uintptr_t) psDmaBuf, (uintptr_t) psPMR);
g_ui32HashRefCount++;
- mutex_unlock(&g_HashLock);
+ PMRFactoryUnlock();
PVRSRVIonAddMemAllocRecord(psDmaBuf);
dma_buf_detach(psDmaBuf, psAttachment);
errUnlockAndDMAPut:
- if (IMG_TRUE == bHashTableCreated)
+ if (bHashTableCreated)
{
HASH_Delete(g_psDmaBufHash);
g_psDmaBufHash = NULL;
dma_buf_put(psDmaBuf);
errUnlockReturn:
- mutex_unlock(&g_HashLock);
+ PMRFactoryUnlock();
errReturn:
PVR_ASSERT(eError != PVRSRV_OK);
#include "devicemem_server_utils.h"
#include "pvr_vmap.h"
#include "physheap.h"
+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY)
+#include "physmem_cpumap_history.h"
+#endif
/* ourselves */
#include "physmem_osmem.h"
#define FLAG_POISON_ON_FREE (1U)
#define FLAG_POISON_ON_ALLOC (2U)
#define FLAG_ONDEMAND (3U)
-#define FLAG_UNPINNED (4U)
+
#define FLAG_IS_CMA (5U)
#define FLAG_UNSET_MEMORY_TYPE (6U)
* Poison on free - Should we Poison the memory on free.
* Poison on alloc - Should we Poison the memory on alloc.
* On demand - Is the allocation on Demand i.e Do we defer allocation to time of use.
- * Unpinned - Should be protected by page pool lock
* CMA - Is CMA memory allocated via DMA framework
* Unset Memory Type - Upon free do we need to revert the cache type before return to OS
* */
* the additional pages allocated are tracked through this additional
* variable and are accounted for in the memory statistics */
IMG_UINT32 ui32CMAAdjustedPageCount;
+
+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY)
+ /*
+ Handle on the parent PMR
+ */
+ void *hPMR;
+#endif
+
} PMR_OSPAGEARRAY_DATA;
/***********************************
LinuxPagePoolEntry *psPoolEntry;
} LinuxCleanupData;
-/* A struct for the unpinned items */
-typedef struct
-{
- struct list_head sUnpinPoolItem;
- PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr;
-} LinuxUnpinEntry;
-
/* Caches to hold page pool and page array structures */
static struct kmem_cache *g_psLinuxPagePoolCache;
* x86 needs two page pools because we have to change the memory attributes
* of the pages which is expensive due to an implicit flush.
* See set_pages_array_uc/wc/wb. */
-static IMG_UINT32 g_ui32UnpinPageCount;
static IMG_UINT32 g_ui32PagePoolUCCount;
#if defined(CONFIG_X86)
static IMG_UINT32 g_ui32PagePoolWCCount;
/* List holding the page array pointers: */
static LIST_HEAD(g_sPagePoolList_WC);
static LIST_HEAD(g_sPagePoolList_UC);
-static LIST_HEAD(g_sUnpinList);
#if defined(DEBUG) && defined(SUPPORT_VALIDATION)
/* Global structure to manage GPU memory leak */
static IMG_UINT32 g_ui32UMALeakCounter = 0;
#endif
+static IMG_BOOL g_bInitialisedOnAlloc = IMG_FALSE;
+
+static inline IMG_BOOL
+_ShouldInitMem(IMG_UINT32 ui32AllocFlags)
+{
+ return BIT_ISSET(ui32AllocFlags, FLAG_ZERO) && !g_bInitialisedOnAlloc;
+}
+
static inline IMG_UINT32
_PagesInPoolUnlocked(void)
{
mutex_unlock(&g_sPagePoolMutex);
}
-static PVRSRV_ERROR
-_AddUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
-{
- LinuxUnpinEntry *psUnpinEntry;
-
- psUnpinEntry = OSAllocMem(sizeof(*psUnpinEntry));
- if (!psUnpinEntry)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: OSAllocMem failed. Cannot add entry to unpin list.",
- __func__));
- return PVRSRV_ERROR_OUT_OF_MEMORY;
- }
-
- psUnpinEntry->psPageArrayDataPtr = psOSPageArrayData;
-
- /* Add into pool that the shrinker can access easily*/
- list_add_tail(&psUnpinEntry->sUnpinPoolItem, &g_sUnpinList);
-
- g_ui32UnpinPageCount += psOSPageArrayData->iNumOSPagesAllocated;
-
- return PVRSRV_OK;
-}
-
-static void
-_RemoveUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
-{
- LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
-
- /* Remove from pool */
- list_for_each_entry_safe(psUnpinEntry,
- psTempUnpinEntry,
- &g_sUnpinList,
- sUnpinPoolItem)
- {
- if (psUnpinEntry->psPageArrayDataPtr == psOSPageArrayData)
- {
- list_del(&psUnpinEntry->sUnpinPoolItem);
- break;
- }
- }
-
- OSFreeMem(psUnpinEntry);
-
- g_ui32UnpinPageCount -= psOSPageArrayData->iNumOSPagesAllocated;
-}
-
static inline IMG_BOOL
_GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags,
struct list_head **ppsPoolHead,
static unsigned long
_GetNumberOfPagesInPoolUnlocked(void)
{
- return _PagesInPoolUnlocked() + g_ui32UnpinPageCount;
+ return _PagesInPoolUnlocked();
}
/* Linux shrinker function that informs the OS about how many pages we are caching and
_ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
{
unsigned long uNumToScan = psShrinkControl->nr_to_scan;
- unsigned long uSurplus = 0;
- LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
IMG_UINT32 uiPagesFreed;
PVR_ASSERT(psShrinker == &g_sShrinker);
&uiPagesFreed);
uNumToScan -= uiPagesFreed;
- if (uNumToScan == 0)
- {
- goto e_exit;
- }
-
- /* Free unpinned memory, starting with LRU entries */
- list_for_each_entry_safe(psUnpinEntry,
- psTempUnpinEntry,
- &g_sUnpinList,
- sUnpinPoolItem)
- {
- PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr = psUnpinEntry->psPageArrayDataPtr;
- IMG_UINT32 uiNumPages = (psPageArrayDataPtr->uiTotalNumOSPages > psPageArrayDataPtr->iNumOSPagesAllocated)?
- psPageArrayDataPtr->iNumOSPagesAllocated:psPageArrayDataPtr->uiTotalNumOSPages;
- PVRSRV_ERROR eError;
-
- /* Free associated pages */
- eError = _FreeOSPages(psPageArrayDataPtr,
- NULL,
- 0);
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Shrinker is unable to free unpinned pages. Error: %s (%d)",
- __func__,
- PVRSRVGetErrorString(eError),
- eError));
- goto e_exit;
- }
-
- /* Remove item from pool */
- list_del(&psUnpinEntry->sUnpinPoolItem);
-
- g_ui32UnpinPageCount -= uiNumPages;
-
- /* Check if there is more to free or if we already surpassed the limit */
- if (uiNumPages < uNumToScan)
- {
- uNumToScan -= uiNumPages;
-
- }
- else if (uiNumPages > uNumToScan)
- {
- uSurplus += uiNumPages - uNumToScan;
- uNumToScan = 0;
- goto e_exit;
- }
- else
- {
- uNumToScan -= uiNumPages;
- goto e_exit;
- }
- }
-
-e_exit:
- if (list_empty(&g_sUnpinList))
- {
- PVR_ASSERT(g_ui32UnpinPageCount == 0);
- }
-
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
{
int remain;
#else
/* Returning the number of pages freed during the scan */
_PagePoolUnlock();
- return psShrinkControl->nr_to_scan - uNumToScan + uSurplus;
+ return psShrinkControl->nr_to_scan - uNumToScan;
#endif
}
if (g_psLinuxPagePoolCache)
{
/* Only create the shrinker if we created the cache OK */
- register_shrinker(&g_sShrinker, NULL);
+ register_shrinker(&g_sShrinker, "pvr-pp");
}
OSAtomicWrite(&g_iPoolCleanTasks, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,3,0))
+/* Check both config and modparam setting */
+#if PVRSRV_USE_LINUX_CONFIG_INIT_ON_ALLOC == 1
+ g_bInitialisedOnAlloc = want_init_on_alloc(0x0);
+
+/* Assume modparam setting not in use on system */
+#elif PVRSRV_USE_LINUX_CONFIG_INIT_ON_ALLOC == 2
+# if defined(CONFIG_INIT_ON_ALLOC_DEFAULT_ON)
+ g_bInitialisedOnAlloc = IMG_TRUE;
+# else
+ g_bInitialisedOnAlloc = IMG_FALSE;
+# endif
+
+/* Ignore both config and modparam settings */
+#else
+ g_bInitialisedOnAlloc = IMG_FALSE;
+#endif
+#endif
}
/* Unregister the shrinker and remove all pages from the pool that are still left */
_MemsetPageArray(IMG_UINT32 uiNumToClean,
struct page **ppsCleanArray,
pgprot_t pgprot,
- IMG_UINT8 ui8Pattern)
+ IMG_UINT8 ui8Pattern, int rv_cache)
{
IMG_CPU_VIRTADDR pvAddr;
IMG_UINT32 uiMaxPagesToMap = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES,
{
IMG_UINT32 uiToClean = MIN(uiNumToClean, uiMaxPagesToMap);
- pvAddr = pvr_vmap(ppsCleanArray, uiToClean, VM_WRITE, pgprot);
+ if (rv_cache) {
+ pvAddr = pvr_vmap_cached(ppsCleanArray, uiToClean, VM_WRITE, pgprot);
+ } else {
+ pvAddr = pvr_vmap(ppsCleanArray, uiToClean, VM_WRITE, pgprot);
+ }
if (!pvAddr)
{
if (uiMaxPagesToMap <= 1)
OSCachedMemSet(pvAddr, ui8Pattern, PAGE_SIZE * uiToClean);
}
pvr_vunmap(pvAddr, uiToClean, pgprot);
+
ppsCleanArray = &(ppsCleanArray[uiToClean]);
uiNumToClean -= uiToClean;
}
* at a time. */
eError = _MemsetPageArray(psPagePoolEntry->uiItemsRemaining,
psPagePoolEntry->ppsPageArray,
- pgprot, PVRSRV_ZERO_VALUE);
+ pgprot, PVRSRV_ZERO_VALUE, 0);
if (eError != PVRSRV_OK)
{
goto eExit;
static inline IMG_BOOL
_PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags,
struct page **ppsPageArray,
- IMG_BOOL bUnpinned,
IMG_UINT32 uiOrder,
IMG_UINT32 uiNumPages)
{
#endif
if (uiOrder == 0 &&
- !bUnpinned &&
!PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags))
{
IMG_UINT32 uiEntries;
/* Allocate and initialise the structure to hold the metadata of the allocation */
static PVRSRV_ERROR
_AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode,
- PMR_SIZE_T uiChunkSize,
+ PMR_SIZE_T uiSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 uiLog2AllocPageSize,
PMR_OSPAGEARRAY_DATA **ppsPageArrayDataPtr)
{
PVRSRV_ERROR eError;
- PMR_SIZE_T uiSize = uiChunkSize * ui32NumVirtChunks;
IMG_UINT32 uiNumOSPageSizeVirtPages;
IMG_UINT32 uiNumDevPageSizeVirtPages;
PMR_OSPAGEARRAY_DATA *psPageArrayData;
IMG_CPU_PHYADDR sUnused =
{ IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) };
- pvAddr = pvr_vmap(ppsCleanArray, uiToClean, -1, pgprot);
+ pvAddr = pvr_vmap(ppsCleanArray, uiToClean, VM_MAP, pgprot);
if (!pvAddr)
{
PVR_DPF((PVR_DBG_ERROR,
PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
NULL, sCPUPhysAddr,
1 << psPageArrayData->uiLog2AllocPageSize,
- NULL, psPageArrayData->uiPid
+ psPageArrayData->uiPid
DEBUG_MEMSTATS_VALUES);
}
IMG_UINT32 uiOSPagesToAlloc;
IMG_UINT32 uiDevPagesFromPool = 0;
- gfp_t gfp_flags = _GetGFPFlags(ui32MinOrder ? BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO) : IMG_FALSE, /* Zero all pages later as batch */
+ gfp_t gfp_flags = _GetGFPFlags(ui32MinOrder ? _ShouldInitMem(psPageArrayData->ui32AllocFlags) : IMG_FALSE, /* Zero all pages later as batch */
psPageArrayData->psDevNode);
gfp_t ui32GfpFlags;
gfp_t ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY);
/* Try to get pages from the pool since it is faster;
the page pool currently only supports zero-order pages
- thus currently excludes all DMA/CMA allocated memory */
+ thus currently excludes all DMA/CMA allocated memory.
+ _ShouldInitMem() must not be used for bZero argument since it only
+ applies to new pages allocated from the kernel. */
_GetPagesFromPoolLocked(psPageArrayData->psDevNode,
psPageArrayData->ui32CPUCacheFlags,
uiOSPagesToAlloc,
}
}
- if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO) && ui32MinOrder == 0)
+ if (_ShouldInitMem(psPageArrayData->ui32AllocFlags) && ui32MinOrder == 0)
{
eError = _MemsetPageArray(uiOSPagesToAlloc - uiDevPagesFromPool,
ppsPageAttributeArray, PAGE_KERNEL,
- PVRSRV_ZERO_VALUE);
+ PVRSRV_ZERO_VALUE, 1);
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (fast)"));
* can point to different allocations: first for pages obtained from
* the pool and then the remaining pages */
eError = _MemsetPageArray(uiDevPagesFromPool, ppsPageArray, PAGE_KERNEL,
- PVRSRV_POISON_ON_ALLOC_VALUE);
+ PVRSRV_POISON_ON_ALLOC_VALUE, 1);
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "Failed to poison pages (fast)"));
}
eError = _MemsetPageArray(uiOSPagesToAlloc - uiDevPagesFromPool,
ppsPageAttributeArray, PAGE_KERNEL,
- PVRSRV_POISON_ON_ALLOC_VALUE);
+ PVRSRV_POISON_ON_ALLOC_VALUE, 1);
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "Failed to poison pages (fast)"));
IMG_UINT32 uiOSPagesToAlloc = uiDevPagesToAlloc * (1 << uiOrder);
IMG_UINT32 uiDevPagesAllocated = psPageArrayData->uiTotalNumOSPages >> uiOrder;
const IMG_UINT32 ui32AllocFlags = psPageArrayData->ui32AllocFlags;
- gfp_t ui32GfpFlags = _GetGFPFlags(uiOrder ? BIT_ISSET(ui32AllocFlags, FLAG_ZERO):
- IMG_FALSE, /* Zero pages later as batch */
+ gfp_t ui32GfpFlags = _GetGFPFlags(uiOrder ? _ShouldInitMem(ui32AllocFlags) : IMG_FALSE, /* Zero pages later as batch */
psPageArrayData->psDevNode);
/* We use this page array to receive pages from the pool and then reuse it afterwards to
* allocated only if:
* - PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES == 1 && uiOrder == 0
* - PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES == 0 && uiOrder == 0 &&
- * !BIT_ISSET(ui32AllocFlags, FLAG_ZERO) */
+ * !(BIT_ISSET(ui32AllocFlags, FLAG_ZERO))
+ * _ShouldInitMem() must not be used for bZero argument since it only
+ * applies to new pages allocated from the kernel. */
_GetPagesFromPoolLocked(psPageArrayData->psDevNode,
psPageArrayData->ui32CPUCacheFlags,
uiDevPagesToAlloc,
}
}
- if (BIT_ISSET(ui32AllocFlags, FLAG_ZERO) && uiOrder == 0)
+ if (_ShouldInitMem(ui32AllocFlags) && uiOrder == 0)
{
/* At this point this array contains pages allocated from the page pool at its start
* and pages allocated from the OS after that.
* zeroed or we didn't allocate any of them. */
eError = _MemsetPageArray(uiTempPageArrayIndex - uiDevPagesFromPool,
&ppsTempPageArray[uiDevPagesFromPool],
- PAGE_KERNEL, PVRSRV_ZERO_VALUE);
+ PAGE_KERNEL, PVRSRV_ZERO_VALUE, 0);
PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to zero pages (sparse)", e_free_pages);
}
else if (BIT_ISSET(ui32AllocFlags, FLAG_POISON_ON_ALLOC))
/* Here we need to poison all of the pages regardless if they were
* allocated from the pool or from the system. */
eError = _MemsetPageArray(uiTempPageArrayIndex, ppsTempPageArray,
- PAGE_KERNEL, PVRSRV_POISON_ON_ALLOC_VALUE);
+ PAGE_KERNEL, PVRSRV_POISON_ON_ALLOC_VALUE, 0);
PVR_LOG_IF_FALSE(eError == PVRSRV_OK, "failed to poison pages (sparse)");
/* We need to flush the cache for the poisoned pool pages here. The flush for the pages
}
/* Free the struct holding the metadata */
-static PVRSRV_ERROR
+static void
_FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData)
{
PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData));
}
kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
-
- return PVRSRV_OK;
}
/* Free all or some pages from a sparse page array */
psPageArrayData->ui32CPUCacheFlags,
PVRSRV_POISON_ON_FREE_VALUE);
}
+ else if (pai32FreeIndices != NULL)
+ {
+ /* Attempt to poison an index not containing a valid page */
+ return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK;
+ }
}
}
ppsPageArray[idx] = NULL;
uiTempIdx++;
}
+ else if (pai32FreeIndices != NULL)
+ {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ /* Attempt to keep memstats consistent in event of fail as we have
+ * freed some pages
+ */
+ uiTempIdx <<= uiOrder;
+ _DecrMemAllocStat_UmaPages(uiTempIdx * PAGE_SIZE,
+ psPageArrayData->uiPid);
+#endif
+ /* Attempt to free an already free index, could be duplicated free indices */
+ return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK;
+ }
}
uiTempIdx <<= uiOrder;
}
ppsPageArray[uiPageIndex] = NULL;
}
+ else if (pai32FreeIndices != NULL)
+ {
+ /* Attempt to free an already free index, could be duplicated free indices.
+ * We don't have a need to unwind here as this isn't something we want to
+ * recover from, we do want to try and maintain some consistency with pages we
+ * can free before the error occurred and adjusting the memstats as required.
+ */
+ for (i = 0; i < uiTempIdx; i++)
+ {
+ __free_pages(ppsTempPageArray[i], 0);
+ }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ _DecrMemAllocStat_UmaPages(uiTempIdx * PAGE_SIZE,
+ psPageArrayData->uiPid);
+#endif
+
+ OSFreeMemNoStats(ppsTempPageArray);
+ return PVRSRV_ERROR_PMR_FREE_INVALID_CHUNK;
+ }
}
/* Try to move the temp page array to the pool */
bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
ppsTempPageArray,
- BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNPINNED),
0,
uiTempIdx);
if (bSuccess)
/* Try to move the page array to the pool */
bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
ppsPageArray,
- BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNPINNED),
uiOrder,
uiNumPages);
if (bSuccess)
/* Destruction function is called after last reference disappears,
* but before PMR itself is freed.
*/
-static PVRSRV_ERROR
+static void
PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv)
{
PVRSRV_ERROR eError;
mutex_unlock(&g_sUMALeakMutex);
PVR_DPF((PVR_DBG_WARNING, "%s: Skipped freeing of PMR 0x%p to trigger memory leak.", __func__, pvPriv));
- return PVRSRV_OK;
+ return;
}
mutex_unlock(&g_sUMALeakMutex);
#endif
- _PagePoolLock();
- if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED))
- {
- _RemoveUnpinListEntryUnlocked(psOSPageArrayData);
- }
- _PagePoolUnlock();
- eError = _FreeOSPages(psOSPageArrayData,
- NULL,
- 0);
+ eError = _FreeOSPages(psOSPageArrayData, NULL, 0);
+ PVR_LOG_IF_ERROR(eError, "_FreeOSPages");
PVR_ASSERT(eError == PVRSRV_OK); /* can we do better? */
}
- eError = _FreeOSPagesArray(psOSPageArrayData);
- PVR_ASSERT(eError == PVRSRV_OK); /* can we do better? */
- return PVRSRV_OK;
+ _FreeOSPagesArray(psOSPageArrayData);
}
/* Callback function for locking the system physical page addresses.
void *pvBase;
IMG_UINT32 ui32PageCount;
pgprot_t PageProps;
+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY)
+ IMG_UINT32 ui32CpuCacheFlags;
+#endif
} PMR_OSPAGEARRAY_KERNMAP_DATA;
static PVRSRV_ERROR
struct page **pagearray;
PMR_OSPAGEARRAY_KERNMAP_DATA *psData;
+ int riscv_cache = 0;
+
/* For cases device page size greater than the OS page size,
* multiple physically contiguous OS pages constitute one device page.
* However only the first page address of such an ensemble is stored
break;
case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+ riscv_cache = 1;
break;
default:
goto e1;
}
- pvAddress = pvr_vmap(pagearray, ui32PageCount, VM_READ | VM_WRITE, prot);
+ if (riscv_cache) {
+ pvAddress = pvr_vmap_cached(pagearray, ui32PageCount, VM_READ | VM_WRITE, prot);
+ } else {
+ pvAddress = pvr_vmap(pagearray, ui32PageCount, VM_READ | VM_WRITE, prot);
+ }
if (pvAddress == NULL)
{
eError = PVRSRV_ERROR_OUT_OF_MEMORY;
OSFreeMem(pagearray);
}
- return PVRSRV_OK;
+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY)
+ {
+ IMG_CPU_PHYADDR pvAddrPhy;
+ pvAddrPhy.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(*pagearray));
+ InsertMappingRecord(PMR_GetAnnotation(psOSPageArrayData->hPMR),
+ psOSPageArrayData->uiPid,
+ pvAddress,
+ pvAddrPhy,
+ psOSPageArrayData->ui32CPUCacheFlags,
+ uiMapOffset,
+ ui32PageCount);
+
+ psData->ui32CpuCacheFlags = psOSPageArrayData->ui32CPUCacheFlags;
+ }
+#endif
+ return PVRSRV_OK;
/*
error exit paths follow
*/
PMR_OSPAGEARRAY_KERNMAP_DATA *psData = hHandle;
PVR_UNREFERENCED_PARAMETER(pvPriv);
- pvr_vunmap(psData->pvBase, psData->ui32PageCount, psData->PageProps);
- OSFreeMem(psData);
-}
-
-static
-PVRSRV_ERROR PMRUnpinOSMem(PMR_IMPL_PRIVDATA pPriv)
-{
- PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
- PVRSRV_ERROR eError = PVRSRV_OK;
-
- /* Lock down the pool and add the array to the unpin list */
- _PagePoolLock();
-
- /* Check current state */
- PVR_ASSERT(BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED) == IMG_FALSE);
- PVR_ASSERT(BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND) == IMG_FALSE);
-
- eError = _AddUnpinListEntryUnlocked(psOSPageArrayData);
-
- if (eError != PVRSRV_OK)
+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY)
{
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Unable to add allocation to unpinned list (%d).",
- __func__,
- eError));
-
- goto e_exit;
+ IMG_CPU_PHYADDR pvAddrPhy;
+ pvAddrPhy.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(vmalloc_to_page(psData->pvBase)));
+ InsertUnMappingRecord(psData->pvBase,
+ pvAddrPhy,
+ psData->ui32CpuCacheFlags,
+ psData->ui32PageCount);
}
+#endif
- /* Set the Unpinned bit */
- BIT_SET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED);
-
-e_exit:
- _PagePoolUnlock();
- return eError;
-}
-
-static
-PVRSRV_ERROR PMRPinOSMem(PMR_IMPL_PRIVDATA pPriv,
- PMR_MAPPING_TABLE *psMappingTable)
-{
- PVRSRV_ERROR eError;
- PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
- IMG_UINT32 *pui32MapTable = NULL;
- IMG_UINT32 i, j = 0, ui32Temp = 0;
-
- _PagePoolLock();
-
- /* Check current state */
- PVR_ASSERT(BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED));
-
- /* Clear unpinned bit */
- BIT_UNSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED);
-
- /* If there are still pages in the array remove entries from the pool */
- if (psOSPageArrayData->iNumOSPagesAllocated != 0)
- {
- _RemoveUnpinListEntryUnlocked(psOSPageArrayData);
- _PagePoolUnlock();
-
- eError = PVRSRV_OK;
- goto e_exit_mapalloc_failure;
- }
- _PagePoolUnlock();
-
- /* If pages were reclaimed we allocate new ones and
- * return PVRSRV_ERROR_PMR_NEW_MEMORY */
- if (psMappingTable->ui32NumVirtChunks == 1)
- {
- eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages);
- }
- else
- {
- pui32MapTable = (IMG_UINT32 *)OSAllocMem(sizeof(*pui32MapTable) * psMappingTable->ui32NumPhysChunks);
- if (NULL == pui32MapTable)
- {
- eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Unable to Alloc Map Table.",
- __func__));
- goto e_exit_mapalloc_failure;
- }
-
- for (i = 0, j = 0; i < psMappingTable->ui32NumVirtChunks; i++)
- {
- ui32Temp = psMappingTable->aui32Translation[i];
- if (TRANSLATION_INVALID != ui32Temp)
- {
- pui32MapTable[j++] = ui32Temp;
- }
- }
- eError = _AllocOSPages(psOSPageArrayData, pui32MapTable, psMappingTable->ui32NumPhysChunks);
- }
-
- if (eError != PVRSRV_OK)
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Unable to get new pages for unpinned allocation.",
- __func__));
-
- eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
- goto e_exit;
- }
-
- PVR_DPF((PVR_DBG_MESSAGE,
- "%s: Allocating new pages for unpinned allocation. "
- "Old content is lost!",
- __func__));
-
- eError = PVRSRV_ERROR_PMR_NEW_MEMORY;
-
-e_exit:
- OSFreeMem(pui32MapTable);
-e_exit_mapalloc_failure:
- return eError;
+ pvr_vunmap(psData->pvBase, psData->ui32PageCount, psData->PageProps);
+ OSFreeMem(psData);
}
/*************************************************************************/ /*!
.pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem,
.pfnReadBytes = NULL,
.pfnWriteBytes = NULL,
- .pfnUnpinMem = &PMRUnpinOSMem,
- .pfnPinMem = &PMRPinOSMem,
.pfnChangeSparseMem = &PMRChangeSparseMemOSMem,
.pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem,
.pfnFinalize = &PMRFinalizeOSMem,
IMG_UINT32 *puiAllocIndices,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32Log2AllocPageSize)
{
PVRSRV_ERROR eError = PVRSRV_OK;
}
else if (ui32NumPhysChunks != 0)
{
- /* Calculate the number of pages we want to allocate */
- IMG_UINT32 ui32PagesToAlloc =
- (IMG_UINT32)((((ui32NumPhysChunks * uiChunkSize) - 1) >> ui32Log2AllocPageSize) + 1);
-
- /* Make sure calculation is correct */
- PVR_ASSERT(((PMR_SIZE_T) ui32PagesToAlloc << ui32Log2AllocPageSize) ==
- (ui32NumPhysChunks * uiChunkSize));
-
/* Allocate the physical pages */
eError = _AllocOSPages(psPrivData, puiAllocIndices,
- ui32PagesToAlloc);
+ ui32NumPhysChunks);
}
return eError;
PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap,
CONNECTION_DATA *psConnection,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *puiAllocIndices,
/* Create Array structure that hold the physical pages */
eError = _AllocOSPageArray(psDevNode,
- uiChunkSize,
+ uiSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
uiLog2AllocPageSize,
if (!BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND))
{
eError = DoPageAlloc(psPrivData, puiAllocIndices, ui32NumPhysChunks,
- ui32NumVirtChunks, uiChunkSize, uiLog2AllocPageSize);
+ ui32NumVirtChunks, uiLog2AllocPageSize);
if (eError != PVRSRV_OK)
{
goto errorOnAllocPages;
eError = PMRCreatePMR(psPhysHeap,
uiSize,
- uiChunkSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
puiAllocIndices,
goto errorOnCreate;
}
+#if defined(PVRSRV_PHYSMEM_CPUMAP_HISTORY)
+ psPrivData->hPMR = psPMR;
+#endif
+
*ppsPMRPtr = psPMR;
return PVRSRV_OK;
}
errorOnAllocPages:
- eError2 = _FreeOSPagesArray(psPrivData);
- PVR_ASSERT(eError2 == PVRSRV_OK);
+ _FreeOSPagesArray(psPrivData);
errorOnAllocPageArray:
errorOnParam:
psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode));
PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem");
- psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT;
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_CREATED;
psDeviceNode->psDevConfig = psDevConfig;
psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON;
/* Initialise Phys mem heaps */
- eError = PVRSRVPhysMemHeapsInit(psDeviceNode, psDevConfig);
- PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPhysMemHeapsInit", ErrorSysDevDeInit);
+ eError = PhysHeapInitDeviceHeaps(psDeviceNode, psDevConfig);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapInitDeviceHeaps", ErrorSysDevDeInit);
*ppsDeviceNode = psDeviceNode;
PhysMemTestDeInit(PVRSRV_DEVICE_NODE *psDeviceNode)
{
/* Deinitialise Phys mem heaps */
- PVRSRVPhysMemHeapsDeinit(psDeviceNode);
+ PhysHeapDeInitDeviceHeaps(psDeviceNode);
OSFreeMem(psDeviceNode);
}
+static PVRSRV_ERROR
+PMRContiguousSparseMappingTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags)
+{
+ PVRSRV_ERROR eError, eError1;
+ PHYS_HEAP *psHeap;
+ PHYS_HEAP_POLICY psHeapPolicy;
+
+ PMR *psPMR = NULL;
+ PMR *psSpacingPMR = NULL, *psSecondSpacingPMR = NULL;
+ IMG_UINT32 aui32MappingTableFirstAlloc[4] = {0,1,2,3};
+ IMG_UINT32 aui32MappingTableSecondAlloc[8] = {4,5,6,7,8,9,10,11};
+ IMG_UINT32 aui32MappingTableThirdAlloc[4] = {12,13,14,15};
+ IMG_UINT32 ui32NoMappingTable = 0;
+ IMG_UINT8 *pcWriteBuffer, *pcReadBuffer;
+ IMG_BOOL *pbValid;
+ IMG_DEV_PHYADDR *apsDevPAddr;
+ IMG_UINT32 ui32NumOfPages = 16;
+ size_t uiMappedSize, uiPageSize;
+ IMG_UINT32 i, uiAttempts;
+ IMG_HANDLE hPrivData = NULL;
+ void *pvKernAddr = NULL;
+
+ eError = PhysHeapAcquireByID(PVRSRV_GET_PHYS_HEAP_HINT(uiFlags),
+ psDeviceNode,
+ &psHeap);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByID", ErrorReturn);
+
+ psHeapPolicy = PhysHeapGetPolicy(psHeap);
+
+ PhysHeapRelease(psHeap);
+
+ /* If this is the case then it's not supported and so don't attempt the test */
+ if (psHeapPolicy != PHYS_HEAP_POLICY_ALLOC_ALLOW_NONCONTIG)
+ {
+ return PVRSRV_OK;
+ }
+
+ uiPageSize = OSGetPageSize();
+
+ /* Allocate OS memory for PMR page list */
+ apsDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR));
+ PVR_LOG_RETURN_IF_NOMEM(apsDevPAddr, "OSAllocMem");
+
+ /* Allocate OS memory for PMR page state */
+ pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL));
+ PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrorFreePMRPageListMem);
+
+ /* Allocate OS memory for write buffer */
+ pcWriteBuffer = OSAllocMem(uiPageSize * ui32NumOfPages);
+ PVR_LOG_GOTO_IF_NOMEM(pcWriteBuffer, eError, ErrorFreePMRPageStateMem);
+ OSCachedMemSet(pcWriteBuffer, 0xF, uiPageSize);
+
+ /* Allocate OS memory for read buffer */
+ pcReadBuffer = OSAllocMem(uiPageSize * ui32NumOfPages);
+ PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer);
+
+ /* Allocate Sparse PMR with SPARSE | READ | WRITE | UNCACHED_WC attributes */
+ uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC;
+
+ /*
+ * Construct a sparse PMR attempting to ensure the allocations
+ * are physically non contiguous but sequentially placed in the mapping
+ * table.
+ */
+ for (uiAttempts = 3; uiAttempts > 0; uiAttempts--)
+ {
+ /* Allocate a sparse PMR from given physical heap - CPU/GPU/FW */
+ eError = PhysmemNewRamBackedPMR(NULL,
+ psDeviceNode,
+ ui32NumOfPages * uiPageSize,
+ 4,
+ ui32NumOfPages,
+ aui32MappingTableFirstAlloc,
+ OSGetPageShift(),
+ uiFlags,
+ sizeof("PMRContiguousSparseMappingTest"),
+ "PMRContiguousSparseMappingTest",
+ OSGetCurrentClientProcessIDKM(),
+ &psPMR,
+ PDUMP_NONE,
+ NULL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR"));
+ goto ErrorFreeReadBuffer;
+ }
+
+ /* Allocate some memory from the same physheap so that we can ensure
+ * the allocations aren't linear
+ */
+ eError = PhysmemNewRamBackedPMR(NULL,
+ psDeviceNode,
+ ui32NumOfPages * uiPageSize,
+ 1,
+ 1,
+ &ui32NoMappingTable,
+ OSGetPageShift(),
+ uiFlags,
+ sizeof("PMRContiguousSparseMappingTest"),
+ "PMRContiguousSparseMappingTest",
+ OSGetCurrentClientProcessIDKM(),
+ &psSpacingPMR,
+ PDUMP_NONE,
+ NULL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR"));
+ goto ErrorUnrefPMR;
+ }
+
+ /* Allocate 8 more physical pages on the Sparse PMR */
+ eError = PMR_ChangeSparseMem(psPMR,
+ 8,
+ aui32MappingTableSecondAlloc,
+ 0,
+ NULL,
+ uiFlags | SPARSE_RESIZE_ALLOC);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PMR_ChangeSparseMem", ErrorUnrefSpacingPMR);
+
+ /* Allocate some more memory from the same physheap so that we can ensure
+ * the allocations aren't linear
+ */
+ eError = PhysmemNewRamBackedPMR(NULL,
+ psDeviceNode,
+ ui32NumOfPages * uiPageSize,
+ 1,
+ 1,
+ &ui32NoMappingTable,
+ OSGetPageShift(),
+ uiFlags,
+ sizeof("PMRContiguousSparseMappingTest"),
+ "PMRContiguousSparseMappingTest",
+ OSGetCurrentClientProcessIDKM(),
+ &psSecondSpacingPMR,
+ PDUMP_NONE,
+ NULL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR"));
+ goto ErrorUnrefSpacingPMR;
+ }
+
+ /* Allocate final 4 physical pages on the Sparse PMR */
+ eError = PMR_ChangeSparseMem(psPMR,
+ 4,
+ aui32MappingTableThirdAlloc,
+ 0,
+ NULL,
+ uiFlags | SPARSE_RESIZE_ALLOC);
+ PVR_LOG_GOTO_IF_ERROR(eError, "PMR_ChangeSparseMem", ErrorUnrefSecondSpacingPMR);
+
+ /*
+ * Check we have in fact managed to obtain a PMR with non contiguous
+ * physical pages.
+ */
+ eError = PMRLockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to lock PMR"));
+ goto ErrorUnrefSecondSpacingPMR;
+ }
+
+ /* Get the Device physical addresses of the pages */
+ eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), ui32NumOfPages, 0, apsDevPAddr, pbValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses"));
+ goto ErrorUnlockPhysAddresses;
+ }
+
+ {
+ IMG_BOOL bPhysicallyContiguous = IMG_TRUE;
+ IMG_DEV_PHYADDR sPrevDevPAddr = apsDevPAddr[0];
+ for (i = 1; i < ui32NumOfPages && bPhysicallyContiguous; i++)
+ {
+ if (apsDevPAddr[i].uiAddr != sPrevDevPAddr.uiAddr + uiPageSize)
+ {
+ bPhysicallyContiguous = IMG_FALSE;
+ }
+ sPrevDevPAddr = apsDevPAddr[i];
+ }
+
+ if (bPhysicallyContiguous)
+ {
+ /* We haven't yet managed to create the mapping scenario we
+ * require: unwind and attempt again.
+ */
+ eError1 = PMRUnlockSysPhysAddresses(psPMR);
+ if (eError1 != PVRSRV_OK)
+ {
+ eError = (eError == PVRSRV_OK)? eError1 : eError;
+ PVR_DPF((PVR_DBG_ERROR, "Failed to unlock PMR"));
+ }
+ eError1 = PMRUnrefPMR(psPMR);
+ if (eError1 != PVRSRV_OK)
+ {
+ eError = (eError == PVRSRV_OK)? eError1 : eError;
+ PVR_DPF((PVR_DBG_ERROR, "Failed to free PMR"));
+ }
+ eError1 = PMRUnrefPMR(psSpacingPMR);
+ if (eError1 != PVRSRV_OK)
+ {
+ eError = (eError == PVRSRV_OK)? eError1 : eError;
+ PVR_DPF((PVR_DBG_ERROR, "Failed to free Spacing PMR"));
+ }
+ eError1 = PMRUnrefPMR(psSecondSpacingPMR);
+ if (eError1 != PVRSRV_OK)
+ {
+ eError = (eError == PVRSRV_OK)? eError1 : eError;
+ PVR_DPF((PVR_DBG_ERROR, "Failed to free Second Spacing PMR"));
+ }
+ } else {
+ /* We have the scenario, break out of the attempt loop */
+ break;
+ }
+ }
+ }
+
+ if (uiAttempts == 0)
+ {
+ /* We can't create the scenario, very unlikely this would happen */
+ PVR_LOG_GOTO_IF_ERROR(PVRSRV_ERROR_MEMORY_TEST_FAILED,
+ "Unable to create Non Contiguous PMR scenario",
+ ErrorFreeReadBuffer);
+ }
+
+ /* We have the PMR scenario to test, now attempt to map the whole PMR,
+ * write and then read from it
+ */
+ eError = PMRAcquireSparseKernelMappingData(psPMR, 0, ui32NumOfPages * uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR"));
+ goto ErrorUnlockPhysAddresses;
+ }
+
+ OSCachedMemCopyWMB(pvKernAddr, pcWriteBuffer, ui32NumOfPages * uiPageSize);
+
+ eError = PMRReleaseKernelMappingData(psPMR, hPrivData);
+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+
+ /*
+ * Release and reacquire the mapping to exercise the mapping paths
+ */
+ eError = PMRAcquireSparseKernelMappingData(psPMR, 0, ui32NumOfPages * uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR"));
+ goto ErrorUnlockPhysAddresses;
+ }
+
+ OSCachedMemSetWMB(pcReadBuffer, 0x0, ui32NumOfPages * uiPageSize);
+ OSCachedMemCopyWMB(pcReadBuffer, pvKernAddr, ui32NumOfPages * uiPageSize);
+
+ eError = PMRReleaseKernelMappingData(psPMR, hPrivData);
+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+
+ for (i = 0; i < ui32NumOfPages * uiPageSize; i++)
+ {
+ if (pcReadBuffer[i] != pcWriteBuffer[i])
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Test failed. Got (0x%hhx), expected (0x%hhx)! @ %u",
+ __func__, pcReadBuffer[i], pcWriteBuffer[i], i));
+ eError = PVRSRV_ERROR_MEMORY_TEST_FAILED;
+ goto ErrorUnlockPhysAddresses;
+ }
+ }
+
+ErrorUnlockPhysAddresses:
+ /* Unlock and Unref the PMR to destroy it */
+ eError1 = PMRUnlockSysPhysAddresses(psPMR);
+ if (eError1 != PVRSRV_OK)
+ {
+ eError = (eError == PVRSRV_OK)? eError1 : eError;
+ PVR_DPF((PVR_DBG_ERROR, "Failed to unlock PMR"));
+ }
+
+ErrorUnrefSecondSpacingPMR:
+ eError1 = PMRUnrefPMR(psSecondSpacingPMR);
+ if (eError1 != PVRSRV_OK)
+ {
+ eError = (eError == PVRSRV_OK)? eError1 : eError;
+ PVR_DPF((PVR_DBG_ERROR, "Failed to free Second Spacing PMR"));
+ }
+ErrorUnrefSpacingPMR:
+ eError1 = PMRUnrefPMR(psSpacingPMR);
+ if (eError1 != PVRSRV_OK)
+ {
+ eError = (eError == PVRSRV_OK)? eError1 : eError;
+ PVR_DPF((PVR_DBG_ERROR, "Failed to free Spacing PMR"));
+ }
+ErrorUnrefPMR:
+ eError1 = PMRUnrefPMR(psPMR);
+ if (eError1 != PVRSRV_OK)
+ {
+ eError = (eError == PVRSRV_OK)? eError1 : eError;
+ PVR_DPF((PVR_DBG_ERROR, "Failed to free PMR"));
+ }
+
+ErrorFreeReadBuffer:
+ OSFreeMem(pcReadBuffer);
+ErrorFreeWriteBuffer:
+ OSFreeMem(pcWriteBuffer);
+ErrorFreePMRPageStateMem:
+ OSFreeMem(pbValid);
+ErrorFreePMRPageListMem:
+ OSFreeMem(apsDevPAddr);
+ErrorReturn:
+ return eError;
+}
+
/* Test for PMR factory validation */
static PVRSRV_ERROR
PMRValidationTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags)
PVR_LOG_RETURN_IF_NOMEM(apsDevPAddr, "OSAllocMem");
/* Allocate OS memory for PMR page state */
- pbValid = OSAllocMem(ui32NumOfPages * sizeof(IMG_BOOL));
+ pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL));
PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrorFreePMRPageListMem);
- OSCachedMemSet(pbValid, 0, ui32NumOfPages * sizeof(IMG_BOOL));
/* Allocate OS memory for write buffer */
pcWriteBuffer = OSAllocMem(uiPageSize);
eError = PhysmemNewRamBackedPMR(NULL,
psDeviceNode,
ui32NumOfPages * uiPageSize,
- uiPageSize,
ui32NumOfPhysPages,
ui32NumOfPages,
pui32MappingTable,
PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR"));
goto ErrorUnlockPhysAddresses;
}
- OSCachedMemSet(pcReadBuffer, 0x0, uiPageSize);
- OSCachedMemCopy(pcReadBuffer, pvKernAddr, uiMappedSize);
+ OSCachedMemSetWMB(pcReadBuffer, 0x0, uiPageSize);
+ OSCachedMemCopyWMB(pcReadBuffer, pvKernAddr, uiMappedSize);
eError = PMRReleaseKernelMappingData(psPMR, hPrivData);
PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
eError = PhysmemNewRamBackedPMR(NULL,
psDeviceNode,
uiPageSize * PHYSMEM_TEST_PAGES,
- uiPageSize * PHYSMEM_TEST_PAGES,
1,
1,
&ui32MappingTable,
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,
- "%s: PMR validation test failed!",
- __func__));
+ "%s: PMR Contiguous PhysHeap self test failed! %"PVRSRV_MEMALLOCFLAGS_FMTSPEC,
+ __func__,
+ uiFlags));
return eError;
}
+ eError = PMRContiguousSparseMappingTest(psDeviceNode, uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: PMR Non-contiguous PhysHeap self test failed! %"PVRSRV_MEMALLOCFLAGS_FMTSPEC,
+ __func__,
+ uiFlags));
+ return eError;
+ }
+
+
for (i = 0; i < ui32Passes; i++)
{
/* Mem test */
}
/* GPU local mem */
- eError = PhysMemTestRun(psDeviceNode, 0, ui32MemTestPasses);
+ eError = PhysMemTestRun(psDeviceNode, PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(GPU_LOCAL), ui32MemTestPasses);
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "GPU local memory test failed!"));
IMG_CPU_PHYADDR *psCpuPAddr,
IMG_UINT32 uiLog2PageSize,
IMG_BOOL bUseVMInsertPage,
- IMG_BOOL bUseMixedMap)
+ IMG_BOOL bUseMixedMap,
+ int riscv_cached)
{
IMG_INT32 iStatus;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
- sPFN = phys_to_pfn_t(psCpuPAddr->uiAddr + SYSPORT_MEM_OFFSET, 0);
+ sPFN = riscv_cached ? phys_to_pfn_t(psCpuPAddr->uiAddr, 0) :
+ phys_to_pfn_t(psCpuPAddr->uiAddr + SYSPORT_MEM_OFFSET, 0);
#else
uiPFN = psCpuPAddr->uiAddr >> PAGE_SHIFT;
PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr->uiAddr);
IMG_BOOL *pbValid;
IMG_BOOL bUseMixedMap = IMG_FALSE;
IMG_BOOL bUseVMInsertPage = IMG_FALSE;
+ int riscv_cached = 0;
+
+ /* if writeable but not shared mapping is requested then fail */
+ PVR_RETURN_IF_INVALID_PARAM(((ps_vma->vm_flags & VM_WRITE) == 0) ||
+ ((ps_vma->vm_flags & VM_SHARED) != 0));
eError = PMRLockSysPhysAddresses(psPMR);
if (eError != PVRSRV_OK)
goto e0;
}
- if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
- ((ps_vma->vm_flags & VM_SHARED) == 0))
- {
- eError = PVRSRV_ERROR_INVALID_PARAMS;
- goto e1;
- }
-
sPageProt = vm_get_page_prot(ps_vma->vm_flags);
eError = DevmemCPUCacheMode(psDevNode,
if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA)
sPageProt = pgprot_writecombine(sPageProt);
#endif
+ riscv_cached = 1;
break;
}
if (pbValid[uiOffsetIdx])
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = riscv_cached ? phys_to_pfn_t(psCpuPAddr->uiAddr, 0) :
+ phys_to_pfn_t(psCpuPAddr->uiAddr + SYSPORT_MEM_OFFSET, 0);
sPFN = phys_to_pfn_t(psCpuPAddr[uiOffsetIdx].uiAddr, 0);
if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0)
&psCpuPAddr[uiOffsetIdx],
uiLog2PageSize,
bUseVMInsertPage,
- bUseMixedMap);
+ bUseMixedMap, riscv_cached);
if (iStatus)
{
/* Failure error code doesn't get propagated */
(void*)(uintptr_t)(ps_vma->vm_start + uiOffset),
sPAddr,
1<<uiLog2PageSize,
- NULL,
OSGetCurrentClientProcessIDKM()
DEBUG_MEMSTATS_VALUES);
}
*/
static DEFINE_MUTEX(g_sMMapMutex);
-#define _DRIVER_SUSPENDED 1
-#define _DRIVER_NOT_SUSPENDED 0
-static ATOMIC_T g_iDriverSuspended;
+#define _SUSPENDED 1
+#define _NOT_SUSPENDED 0
+static ATOMIC_T g_iDriverSuspendCount;
static ATOMIC_T g_iNumActiveDriverThreads;
static ATOMIC_T g_iNumActiveKernelThreads;
static IMG_HANDLE g_hDriverThreadEventObject;
{
if (pvData == DI_START_TOKEN)
{
+ BridgeGlobalStatsLock();
DIPrintf(psEntry,
"Total ioctl call count = %u\n"
"Total number of bytes copied via copy_from_user = %u\n"
"copy_to_user (B)",
"Total Time (us)",
"Max Time (us)");
+ BridgeGlobalStatsUnlock();
}
else if (pvData != NULL)
{
PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psTableEntry = pvData;
IMG_UINT32 ui32Remainder;
+ BridgeGlobalStatsLock();
DIPrintf(psEntry,
"%3d: %-60s %-48s %-10u %-20u %-20u %-20" IMG_UINT64_FMTSPEC " %-20" IMG_UINT64_FMTSPEC "\n",
(IMG_UINT32)(((size_t)psTableEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)),
psTableEntry->ui32CopyToUserTotalBytes,
OSDivide64r64(psTableEntry->ui64TotalTimeNS, 1000, &ui32Remainder),
OSDivide64r64(psTableEntry->ui64MaxTimeNS, 1000, &ui32Remainder));
+ BridgeGlobalStatsUnlock();
}
return 0;
eError = InitDMABUFBridge();
PVR_LOG_IF_ERROR(eError, "InitDMABUFBridge");
- OSAtomicWrite(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED);
+ OSAtomicWrite(&g_iDriverSuspendCount, 0);
OSAtomicWrite(&g_iNumActiveDriverThreads, 0);
OSAtomicWrite(&g_iNumActiveKernelThreads, 0);
}
}
-PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown)
+PVRSRV_ERROR LinuxBridgeBlockClientsAccess(struct pvr_drm_private *psDevPriv,
+ IMG_BOOL bShutdown)
{
PVRSRV_ERROR eError;
IMG_HANDLE hEvent;
+ IMG_INT iSuspendCount;
eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent);
if (eError != PVRSRV_OK)
return eError;
}
- if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED,
- _DRIVER_SUSPENDED) == _DRIVER_SUSPENDED)
+ iSuspendCount = OSAtomicIncrement(&g_iDriverSuspendCount);
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Driver suspended %d times.", __func__,
+ iSuspendCount));
+
+ if (OSAtomicCompareExchange(&psDevPriv->suspended, _NOT_SUSPENDED,
+ _SUSPENDED) == _SUSPENDED)
{
- PVR_DPF((PVR_DBG_ERROR, "%s: Driver is already suspended", __func__));
+ OSAtomicDecrement(&g_iDriverSuspendCount);
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device %p already suspended", __func__,
+ psDevPriv->dev_node));
eError = PVRSRV_ERROR_INVALID_PARAMS;
- goto out_put;
+ goto CloseEventObject;
}
/* now wait for any threads currently in the server to exit */
OSEventObjectWait(hEvent);
}
-out_put:
+CloseEventObject:
OSEventObjectClose(hEvent);
return eError;
}
-PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void)
+PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(struct pvr_drm_private *psDevPriv)
{
PVRSRV_ERROR eError;
+ IMG_INT iSuspendCount;
/* resume the driver and then signal so any waiting threads wake up */
- if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_SUSPENDED,
- _DRIVER_NOT_SUSPENDED) == _DRIVER_NOT_SUSPENDED)
+ if (OSAtomicCompareExchange(&psDevPriv->suspended, _SUSPENDED,
+ _NOT_SUSPENDED) == _NOT_SUSPENDED)
{
- PVR_DPF((PVR_DBG_ERROR, "%s: Driver is not suspended", __func__));
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device is not suspended", __func__));
return PVRSRV_ERROR_INVALID_PARAMS;
}
+ iSuspendCount = OSAtomicDecrement(&g_iDriverSuspendCount);
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Driver suspended %d times.", __func__,
+ iSuspendCount));
+
eError = OSEventObjectSignal(g_hDriverThreadEventObject);
if (eError != PVRSRV_OK)
{
{
PVRSRV_ERROR eError = PVRSRV_OK;
- if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED)
+ if (OSAtomicRead(&g_iDriverSuspendCount) > 0)
{
PVRSRV_ERROR eError = OSEventObjectSignal(g_hDriverThreadEventObject);
if (eError != PVRSRV_OK)
return eError;
}
- while (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED)
+ while (OSAtomicRead(&g_iDriverSuspendCount) == 0)
{
/* we should be able to use normal (not kernel) wait here since
* we were just unfrozen and most likely we're not going to
PVRSRV_ERROR eError;
/* increment first so there is no race between this value and
- * g_iDriverSuspended in LinuxBridgeBlockClientsAccess() */
+ * g_iDriverSuspendCount in LinuxBridgeBlockClientsAccess() */
OSAtomicIncrement(&g_iNumActiveDriverThreads);
- if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED)
+ if (OSAtomicRead(&g_iDriverSuspendCount) > 0)
{
/* decrement here because the driver is going to be suspended and
* this thread is going to be frozen so we don't want to wait for
* the freezer but during shutdown this will just return */
try_to_freeze();
- /* if the thread was unfrozen but the flag is not yet set to
- * _DRIVER_NOT_SUSPENDED wait for it
+ /* if the thread was unfrozen but the number of suspends is non-0 wait
+ * for it
* in case this is a shutdown the thread was not frozen so we'll
* wait here indefinitely but this is ok (and this is in fact what
* we want) because no thread should be entering the driver in such
error = BridgedDispatchKM(psConnection, &sBridgePackageKM);
+e0:
PVRSRVDriverThreadExit();
-e0:
return OSPVRSRVToNativeError(error);
}
IMG_HANDLE hSecurePMRHandle = (IMG_HANDLE)((uintptr_t)ps_vma->vm_pgoff);
PMR *psPMR;
PVRSRV_ERROR eError;
+ PVRSRV_MEMALLOCFLAGS_T uiProtFlags =
+ (BITMASK_HAS(ps_vma->vm_flags, VM_READ) ? PVRSRV_MEMALLOCFLAG_CPU_READABLE : 0) |
+ (BITMASK_HAS(ps_vma->vm_flags, VM_WRITE) ? PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0);
if (psConnection == NULL)
{
* Unref the handle immediately, because we have now done
* the required operation on the PMR (whether it succeeded or not)
*/
- eError = PMRMMapPMR(psPMR, ps_vma);
+ eError = PMRMMapPMR(psPMR, ps_vma, uiProtFlags);
mutex_unlock(&g_sMMapMutex);
PVRSRVReleaseHandle(psConnection->psHandleBase, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
if (eError != PVRSRV_OK)
e0:
PVRSRVDriverThreadExit();
- PVR_DPF((PVR_DBG_ERROR, "Unable to translate error %d", eError));
+ PVR_DPF((PVR_DBG_ERROR, "Failed with error: %s", PVRSRVGetErrorString(eError)));
PVR_ASSERT(eError != PVRSRV_OK);
- return -ENOENT; // -EAGAIN // or what?
+ return OSPVRSRVToNativeError(eError);
}
#define PVR_BRIDGE_K_H
#include "pvrsrv_error.h"
+#include "pvr_drv.h"
/*!
******************************************************************************
to exit and then disable access to the driver. New threads will
not be allowed to enter the Server until the driver is
unsuspended (see LinuxBridgeUnblockClientsAccess).
+ @Input psDevPriv pointer to devices OS specific data
@Input bShutdown this flag indicates that the function was called
from a shutdown callback and therefore it will
not wait for the kernel threads to get frozen
procedure)
@Return PVRSRV_ERROR
******************************************************************************/
-PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown);
+PVRSRV_ERROR LinuxBridgeBlockClientsAccess(struct pvr_drm_private *psDevPriv,
+ IMG_BOOL bShutdown);
/*!
******************************************************************************
@Function LinuxBridgeUnblockClientsAccess
@Description This function will re-enable the bridge and allow any threads
waiting to enter the Server to continue.
+ @Input psDevPriv pointer to devices OS specific data
@Return PVRSRV_ERROR
******************************************************************************/
-PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void);
+PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(struct pvr_drm_private *psDevPriv);
void LinuxBridgeNumActiveKernelThreadsIncrement(void);
void LinuxBridgeNumActiveKernelThreadsDecrement(void);
mutex_unlock(&ctx->ctx_lock);
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
+
+static void
+dma_resv_count_fences(struct dma_resv *resv, u32 *read_fence_count_out, u32 *write_fence_count_out)
+{
+ struct dma_resv_iter cursor;
+ u32 write_fence_count = 0;
+ u32 read_fence_count = 0;
+ struct dma_fence *fence;
+
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ if (dma_resv_iter_is_restarted(&cursor)) {
+ read_fence_count = 0;
+ write_fence_count = 0;
+ }
+ if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_READ)
+ read_fence_count++;
+ else if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_WRITE)
+ write_fence_count++;
+ }
+
+ *read_fence_count_out = read_fence_count;
+ *write_fence_count_out = write_fence_count;
+}
+
static u32
pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs,
u32 *pmr_flags)
{
struct dma_resv *resv;
- struct dma_resv_iter cursor;
+ u32 fence_count = 0;
+ bool exclusive;
+ int i;
+
+ for (i = 0; i < nr_pmrs; i++) {
+ u32 write_fence_count = 0;
+ u32 read_fence_count = 0;
+
+ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
+
+ resv = pmr_reservation_object_get(pmrs[i]);
+ if (WARN_ON_ONCE(!resv))
+ continue;
+
+ dma_resv_count_fences(resv, &read_fence_count, &write_fence_count);
+
+ if (!exclusive || !read_fence_count)
+ fence_count += write_fence_count;
+ if (exclusive)
+ fence_count += read_fence_count;
+ }
+
+ return fence_count;
+}
+
+static struct pvr_buffer_sync_check_data *
+pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx,
+ PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx,
+ u32 nr_pmrs,
+ struct _PMR_ **pmrs,
+ u32 *pmr_flags)
+{
+ struct pvr_buffer_sync_check_data *data;
+ struct dma_resv *resv;
+ struct dma_fence *fence;
+ u32 fence_count;
+ bool exclusive;
+ int i;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ fence_count = pvr_buffer_sync_pmrs_fence_count(nr_pmrs, pmrs,
+ pmr_flags);
+ if (fence_count) {
+ data->fences = kcalloc(fence_count, sizeof(*data->fences),
+ GFP_KERNEL);
+ if (!data->fences)
+ goto err_check_data_free;
+ }
+
+ for (i = 0; i < nr_pmrs; i++) {
+ struct dma_resv_iter cursor;
+ bool include_write_fences;
+ bool include_read_fences;
+ u32 write_fence_count = 0;
+ u32 read_fence_count = 0;
+
+ resv = pmr_reservation_object_get(pmrs[i]);
+ if (WARN_ON_ONCE(!resv))
+ continue;
+
+ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
+
+ dma_resv_count_fences(resv, &read_fence_count, &write_fence_count);
+
+ include_write_fences = (!exclusive || !read_fence_count);
+ include_read_fences = exclusive;
+
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ enum dma_resv_usage usage = dma_resv_iter_usage(&cursor);
+
+ if ((!include_write_fences && usage == DMA_RESV_USAGE_WRITE) ||
+ (!include_read_fences && usage == DMA_RESV_USAGE_READ))
+ continue;
+
+ data->fences[data->nr_fences++] =
+ pvr_fence_create_from_fence(fence_ctx,
+ sync_checkpoint_ctx,
+ fence,
+ PVRSRV_NO_FENCE,
+ (usage == DMA_RESV_USAGE_WRITE) ?
+ "write check fence" :
+ "read check fence");
+ if (!data->fences[data->nr_fences - 1]) {
+ data->nr_fences--;
+ PVR_FENCE_TRACE(fence,
+ (usage == DMA_RESV_USAGE_WRITE) ?
+ "waiting on write fence" :
+ "waiting on read fence\n");
+ WARN_ON(dma_fence_wait(fence, true) <= 0);
+ }
+ }
+ }
+
+ WARN_ON((i != nr_pmrs));
+
+ return data;
+
+err_check_data_free:
+ kfree(data);
+ return NULL;
+}
+
+#else
+
+static u32
+pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs,
+ u32 *pmr_flags)
+{
+ struct dma_resv *resv;
+ struct dma_resv_list *resv_list;
struct dma_fence *fence;
u32 fence_count = 0;
bool exclusive;
if (WARN_ON_ONCE(!resv))
continue;
- dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
+ resv_list = dma_resv_shared_list(resv);
+ fence = dma_resv_excl_fence(resv);
+
+ if (fence &&
+ (!exclusive || !resv_list || !resv_list->shared_count))
fence_count++;
- }
+
+ if (exclusive && resv_list)
+ fence_count += resv_list->shared_count;
}
return fence_count;
{
struct pvr_buffer_sync_check_data *data;
struct dma_resv *resv;
- struct dma_resv_iter cursor;
+ struct dma_resv_list *resv_list;
struct dma_fence *fence;
u32 fence_count;
bool exclusive;
- int i;
+ int i, j;
int err;
data = kzalloc(sizeof(*data), GFP_KERNEL);
continue;
exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
-
- if (!resv->fences) {
- err = dma_resv_reserve_fences(resv, 1);
+ if (!exclusive) {
+ err = dma_resv_reserve_shared(resv
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0))
+ , 1
+#endif
+ );
if (err)
goto err_destroy_fences;
}
- dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
+ resv_list = dma_resv_shared_list(resv);
+ fence = dma_resv_excl_fence(resv);
+
+ if (fence &&
+ (!exclusive || !resv_list || !resv_list->shared_count)) {
data->fences[data->nr_fences++] =
pvr_fence_create_from_fence(fence_ctx,
sync_checkpoint_ctx,
WARN_ON(dma_fence_wait(fence, true) <= 0);
}
}
+
+ if (exclusive && resv_list) {
+ for (j = 0; j < resv_list->shared_count; j++) {
+ fence = rcu_dereference_protected(resv_list->shared[j],
+ dma_resv_held(resv));
+ data->fences[data->nr_fences++] =
+ pvr_fence_create_from_fence(fence_ctx,
+ sync_checkpoint_ctx,
+ fence,
+ PVRSRV_NO_FENCE,
+ "check fence");
+ if (!data->fences[data->nr_fences - 1]) {
+ data->nr_fences--;
+ PVR_FENCE_TRACE(fence,
+ "waiting on non-exclusive fence\n");
+ WARN_ON(dma_fence_wait(fence, true) <= 0);
+ }
+ }
+ }
}
WARN_ON((i != nr_pmrs) || (data->nr_fences != fence_count));
return NULL;
}
+#endif
+
static void
pvr_buffer_sync_check_fences_destroy(struct pvr_buffer_sync_check_data *data)
{
if (WARN_ON_ONCE(!resv))
continue;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
+ dma_resv_reserve_fences(resv, 1);
+#endif
if (data->pmr_flags[i] & PVR_BUFFER_FLAG_WRITE) {
PVR_FENCE_TRACE(&data->update_fence->base,
"added exclusive fence (%s) to resv %p\n",
data->update_fence->name, resv);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
dma_resv_add_fence(resv,
- &data->update_fence->base, DMA_RESV_USAGE_WRITE);
+ &data->update_fence->base,
+ DMA_RESV_USAGE_WRITE);
+#else
+ dma_resv_add_excl_fence(resv,
+ &data->update_fence->base);
+#endif
} else if (data->pmr_flags[i] & PVR_BUFFER_FLAG_READ) {
PVR_FENCE_TRACE(&data->update_fence->base,
"added non-exclusive fence (%s) to resv %p\n",
data->update_fence->name, resv);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
dma_resv_add_fence(resv,
- &data->update_fence->base, DMA_RESV_USAGE_READ);
+ &data->update_fence->base,
+ DMA_RESV_USAGE_READ);
+#else
+ dma_resv_add_shared_fence(resv,
+ &data->update_fence->base);
+#endif
}
}
#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
-static IMG_UINT32 gPVRDebugLevel =
+static IMG_UINT32 PVRDebugLevel =
(
DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING
#if defined(PVRSRV_DEBUG_CCB_MAX)
#endif /* defined(PVR_DPF_ADHOC_DEBUG_ON) */
);
-module_param(gPVRDebugLevel, uint, 0644);
-MODULE_PARM_DESC(gPVRDebugLevel,
+module_param(PVRDebugLevel, uint, 0644);
+MODULE_PARM_DESC(PVRDebugLevel,
"Sets the level of debug output (default 0x7)");
IMG_UINT32 OSDebugLevel(void)
{
- return gPVRDebugLevel;
+ return PVRDebugLevel;
}
void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel)
{
- gPVRDebugLevel = ui32DebugLevel;
+ PVRDebugLevel = ui32DebugLevel;
}
IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel)
{
- return (gPVRDebugLevel & ui32DebugLevel) != 0;
+ return (PVRDebugLevel & ui32DebugLevel) != 0;
}
#else /* defined(PVRSRV_NEED_PVR_DPF) */
IMG_CHAR *pszBuf = gszBuffer;
IMG_UINT32 ui32BufSiz = sizeof(gszBuffer);
- if (!(gPVRDebugLevel & ui32DebugLevel))
+ if (!(PVRDebugLevel & ui32DebugLevel))
{
return;
}
*/
static DEFINE_MUTEX(g_device_mutex);
+/* Executed before sleep/suspend-to-RAM/S3. During this phase the content
+ * of the video memory is preserved (copied to system RAM). This step is
+ * necessary because the device can be powered off and the content of the
+ * video memory lost.
+ */
static int pvr_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct pvr_drm_private *priv = ddev->dev_private;
DRM_DEBUG_DRIVER("device %p\n", dev);
- return PVRSRVDeviceSuspend(priv->dev_node);
+ return PVRSRVDeviceSuspend(ddev);
}
+/* Executed after the system is woken up from sleep/suspend-to-RAM/S3. This
+ * phase restores the content of the video memory from the system RAM.
+ */
static int pvr_pm_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
- struct pvr_drm_private *priv = ddev->dev_private;
DRM_DEBUG_DRIVER("device %p\n", dev);
- return PVRSRVDeviceResume(priv->dev_node);
+ return PVRSRVDeviceResume(ddev);
}
+
static int pvr_pm_runtime_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
return 0;
}
+
+/* Executed before the hibernation image is created. This callback allows to
+ * preserve the content of the video RAM into the system RAM which in turn
+ * is then stored into a disk.
+ */
+static int pvr_pm_freeze(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ DRM_DEBUG_DRIVER("%s(): device %p\n", __func__, dev);
+
+ return PVRSRVDeviceSuspend(ddev);
+}
+
+/* Executed after the hibernation image is created or if the creation of the
+ * image has failed. This callback should undo whatever was done in
+ * pvr_pm_freeze to allow the device to operate in the same way as before the
+ * call to pvr_pm_freeze.
+ */
+static int pvr_pm_thaw(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ DRM_DEBUG_DRIVER("%s(): device %p\n", __func__, dev);
+
+ return PVRSRVDeviceResume(ddev);
+}
+
+/* Executed after the hibernation image is created. This callback should not
+ * preserve the content of the video memory since this was already done
+ * in pvr_pm_freeze.
+ *
+ * Note: from the tests performed on a TestChip this callback is not executed
+ * and driver's pvr_shutdown() is executed instead.
+ */
+static int pvr_pm_poweroff(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ DRM_DEBUG_DRIVER("%s(): device %p\n", __func__, dev);
+
+ PVRSRVDeviceShutdown(ddev);
+
+ return 0;
+}
+
+/* Executed after the content of the system memory is restored from the
+ * hibernation image. This callback restored video RAM from the system RAM
+ * and performs any necessary device setup required for the device to operate
+ * properly.
+ */
+static int pvr_pm_restore(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ DRM_DEBUG_DRIVER("%s(): device %p\n", __func__, dev);
+
+ return PVRSRVDeviceResume(ddev);
+}
+
const struct dev_pm_ops pvr_pm_ops = {
+ /* Sleep (suspend-to-RAM/S3) callbacks.
+ * This mode saves the content of the video RAM to the system RAM and
+ * powers off the device to reduce the power consumption. Because the
+ * video RAM can be powered off, it needs to be preserved beforehand.
+ */
.suspend = pvr_pm_suspend,
.resume = pvr_pm_resume,
- .runtime_suspend = pvr_pm_runtime_suspend,
- .runtime_resume = pvr_pm_runtime_resume,
+ .runtime_suspend = pvr_pm_runtime_suspend,
+ .runtime_resume = pvr_pm_runtime_resume,
+
+
+ /* Hibernation (suspend-to-disk/S4) callbacks.
+ * This mode saves the content of the video RAM to the system RAM and then
+ * dumps the system RAM to disk (swap partition or swap file). The system
+ * then powers off. After power on the system RAM content is loaded from
+ * the disk and then video RAM is restored from the system RAM.
+ *
+ * The procedure is executed in following order
+ *
+ * - Suspend-to-disk is triggered
+ * At this point the OS goes through the list of all registered devices and
+ * calls provided callbacks.
+ * -- pvr_pm_freeze() is called
+ * The GPU is powered of and submitting new work is blocked.
+ * The content of the video RAM is saved to the system RAM, and
+ * other actions required to suspend the device are performed.
+ * -- system RAM image is created and saved on the disk
+ * The disk now contains a snapshot for the DDK Driver for the
+ * moment when pvr_pm_freeze() was called.
+ * -- pvr_pm_thaw() is called
+ * All actions taken in pvr_pm_freeze() are undone. The memory
+ * allocated for the video RAM is freed and all actions necessary
+ * to bring the device to operational state are taken.
+ * This makes sure that regardless if image was created successfully
+ * or not the device remains operational.
+ *
+ * - System is powered off
+ * -- pvr_shutdown() is called
+ * No actions are required beside powering off the GPU.
+ *
+ * - System is powered up
+ * -- system RAM image is read from the disk
+ * This restores the snapshot of the DDK driver along with the saved
+ * video RAM buffer.
+ * -- pvr_pm_restore() is called
+ * Video RAM is restored from the buffer located in the system RAM.
+ * Actions to reset the device and bring it back to working state
+ * are taken. Video RAM buffer is freed.
+ * In summary the same procedure as in the case of pvr_pm_thaw() is
+ * performed.
+ */
+ .freeze = pvr_pm_freeze,
+ .thaw = pvr_pm_thaw,
+ .poweroff = pvr_pm_poweroff,
+ .restore = pvr_pm_restore,
};
*/
void *sync_foreign_debug_notify_handle;
#endif
+
+ /* Flag stating if the device was suspended/resumed. If this is 0 then
+ * the device was either resumed or no suspend was called but if 1 then
+ * the OS called suspend on this device.
+ */
+ atomic_t suspended;
};
extern const struct dev_pm_ops pvr_pm_ops;
static IMG_INT32 devfreq_target(struct device *dev, unsigned long *requested_freq, IMG_UINT32 flags)
{
int deviceId = _device_get_devid(dev);
- PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByOSId(deviceId);
+ PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByKernelDevID(deviceId);
RGX_DATA *psRGXData = NULL;
IMG_DVFS_DEVICE *psDVFSDevice = NULL;
IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL;
/* Increasing frequency, change voltage first */
if (ui32Freq > ui32CurFreq)
{
- psDVFSDeviceCfg->pfnSetVoltage(ui32Volt);
+ psDVFSDeviceCfg->pfnSetVoltage(psDeviceNode->psDevConfig->hSysData, ui32Volt);
}
- psDVFSDeviceCfg->pfnSetFrequency(ui32Freq);
+ psDVFSDeviceCfg->pfnSetFrequency(psDeviceNode->psDevConfig->hSysData, ui32Freq);
/* Decreasing frequency, change frequency first */
if (ui32Freq < ui32CurFreq)
{
- psDVFSDeviceCfg->pfnSetVoltage(ui32Volt);
+ psDVFSDeviceCfg->pfnSetVoltage(psDeviceNode->psDevConfig->hSysData, ui32Volt);
}
psRGXTimingInfo->ui32CoreClockSpeed = ui32Freq;
static int devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat)
{
int deviceId = _device_get_devid(dev);
- PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByOSId(deviceId);
+ PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByKernelDevID(deviceId);
PVRSRV_RGXDEV_INFO *psDevInfo = NULL;
IMG_DVFS_DEVICE *psDVFSDevice = NULL;
RGX_DATA *psRGXData = NULL;
static IMG_INT32 devfreq_cur_freq(struct device *dev, unsigned long *freq)
{
int deviceId = _device_get_devid(dev);
- PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByOSId(deviceId);
+ PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetDeviceInstanceByKernelDevID(deviceId);
RGX_DATA *psRGXData = NULL;
/* Check the device is registered */
psRGXTimingInfo->ui32CoreClockSpeed = min_freq;
- psDVFSDeviceCfg->pfnSetFrequency(min_freq);
- psDVFSDeviceCfg->pfnSetVoltage(min_volt);
+ psDVFSDeviceCfg->pfnSetFrequency(psDeviceNode->psDevConfig->hSysData, min_freq);
+ psDVFSDeviceCfg->pfnSetVoltage(psDeviceNode->psDevConfig->hSysData, min_volt);
psDVFSDevice->data.upthreshold = psDVFSGovernorCfg->ui32UpThreshold;
psDVFSDevice->data.downdifferential = psDVFSGovernorCfg->ui32DownDifferential;
eError = TO_IMG_ERR(PTR_ERR(psDVFSDevice->psDevFreq));
goto err_exit;
}
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 50)))
+ /* Handle Linux kernel bug where a NULL return can occur. */
+ if (psDVFSDevice->psDevFreq == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Failed to add as devfreq device %p, NULL return",
+ psDVFSDevice->psDevFreq));
+ eError = TO_IMG_ERR(-EINVAL);
+ goto err_exit;
+ }
+#endif
eError = SuspendDVFS(psDeviceNode);
if (eError != PVRSRV_OK)
}
PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
- " | @%s (foreign)", value);
+ " | @%s (foreign)", fence_value_str);
}
spin_unlock_irqrestore(&fctx->list_lock, flags);
}
{
struct pvr_fence_context *fctx =
container_of(kref, struct pvr_fence_context, kref);
- struct workqueue_struct *unordered_wq = NativeSyncGetFenceUnorderedWq();
PVR_FENCE_CTX_TRACE(fctx, "destroyed fence context (%s)\n", fctx->name);
trace_pvr_fence_context_destroy_kref(fctx);
- if (unordered_wq)
- queue_work(unordered_wq, &fctx->destroy_work);
+ schedule_work(&fctx->destroy_work);
}
/**
* pvr_sync_file.c if the driver determines any GPU work
* is stuck waiting for a sync checkpoint representing a
* foreign sync to be signalled.
+ * @fctx: fence context
* @nr_ufos: number of ufos in vaddrs
* @vaddrs: array of FW addresses of UFOs which the
* driver is waiting on.
static inline void pvr_fence_cleanup(void)
{
- struct workqueue_struct *unordered_wq = NativeSyncGetFenceUnorderedWq();
-
/*
* Ensure all PVR fence contexts have been destroyed, by flushing
* the global workqueue.
*/
- if (unordered_wq)
- flush_workqueue(unordered_wq);
+ flush_scheduled_work();
}
#if defined(PVR_FENCE_DEBUG)
#define CREATE_TRACE_POINTS
#include "rogue_trace_events.h"
+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD)
+#include "pvr_gpuwork.h"
+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */
+
/******************************************************************************
Module internal implementation
******************************************************************************/
IMG_HANDLE hGPUTraceTLStream;
IMG_UINT64 ui64LastSampledTimeCorrOSTimeStamp;
IMG_UINT32 ui32FTraceLastOrdinal;
+ /* Used to determine if HWPerf was enabled outside of this module. */
+ IMG_BOOL bHWPerfHasRun;
} RGX_HWPERF_FTRACE_DATA;
/* This lock ensures state change of GPU_TRACING on/off is done atomically */
IMG_BOOL bDeInit);
static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE);
+static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo, void *pBuffer,
+ IMG_UINT32 ui32ReadLen);
PVRSRV_ERROR PVRGpuTraceSupportInit(void)
{
eError = OSLockCreate(&ghGPUTraceStateLock);
PVR_LOG_RETURN_IF_ERROR (eError, "OSLockCreate");
+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD)
+ eError = GpuTraceWorkPeriodInitialize();
+ PVR_LOG_RETURN_IF_ERROR (eError, "GpuTraceWorkPeriodInitialize");
+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */
+
return PVRSRV_OK;
}
void PVRGpuTraceSupportDeInit(void)
{
+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD)
+ GpuTraceSupportDeInitialize();
+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */
if (ghGPUTraceStateLock)
{
OSLockDestroy(ghGPUTraceStateLock);
/* single events can be enabled by calling trace_set_clr_event()
* with the event name, e.g.:
* trace_set_clr_event("rogue", "rogue_ufo_update", 1) */
-#if defined(CONFIG_EVENT_TRACING) /* this is a kernel config option */
-#if defined(ANDROID) || defined(CHROMIUMOS_KERNEL)
- if (trace_set_clr_event("gpu", NULL, 1))
- {
- PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"gpu\" event"
- " group"));
- }
- else
- {
- PVR_LOG(("FTrace events from \"gpu\" group enabled"));
- }
-#endif /* defined(ANDROID) || defined(CHROMIUMOS_KERNEL) */
if (trace_set_clr_event("rogue", NULL, 1))
{
PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"rogue\" event"
- " group"));
+ " group"));
+ /* this enables FTrace globally (if not enabled nothing will appear
+ * in the FTrace buffer) */
+ tracing_on();
}
else
{
PVR_LOG(("FTrace events from \"rogue\" group enabled"));
}
-#endif /* defined(CONFIG_EVENT_TRACING) */
}
}
psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+ psFtraceData->bHWPerfHasRun = psRgxDevInfo->bHWPerfHasRun;
+
PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock));
/* return if already enabled */
IMG_UINT64 ui64UFOFilter = psRgxDevInfo->ui64HWPerfFilter &
(RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO);
- /* Do not call into PVRSRVRGXCtrlHWPerfKM if we're in GUEST mode. */
- if (PVRSRV_VZ_MODE_IS(GUEST))
- {
- eError = PVRSRV_OK;
- }
- else
- {
- eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode,
- RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE,
- RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |
- ui64UFOFilter);
- }
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode,
+ RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE,
+ RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |
+ ui64UFOFilter);
PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM", err_out);
+
+ /* This can be cleared now that we saved the previous state. It will
+ * allow us to detect the next time HWPerf is enabled. */
+ psRgxDevInfo->bHWPerfHasRun = IMG_FALSE;
}
else
#endif
/* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d",
- PVRSRV_TL_HWPERF_RGX_FW_STREAM, psRgxDevNode->sDevId.i32OsDeviceID) < 0)
+ PVRSRV_TL_HWPERF_RGX_FW_STREAM, psRgxDevNode->sDevId.i32KernelDeviceID) < 0)
{
PVR_DPF((PVR_DBG_ERROR,
"%s: Failed to form HWPerf stream name for device %d",
__func__,
- psRgxDevNode->sDevId.i32OsDeviceID));
+ psRgxDevNode->sDevId.i32KernelDeviceID));
return PVRSRV_ERROR_INVALID_PARAMS;
}
#if defined(SUPPORT_RGX)
if (!bDeInit)
{
- /* Do not call into PVRSRVRGXCtrlHWPerfKM if we are in GUEST mode. */
- if (PVRSRV_VZ_MODE_IS(GUEST))
- {
- eError = PVRSRV_OK;
- }
- else
- {
- eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode,
- RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE,
- (RGX_HWPERF_EVENT_MASK_NONE));
- }
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode,
+ RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE,
+ (RGX_HWPERF_EVENT_MASK_NONE));
PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
}
#endif
if (psFtraceData->hGPUTraceTLStream)
{
- IMG_PBYTE pbTmp = NULL;
- IMG_UINT32 ui32Tmp = 0;
+ IMG_PBYTE pBuffer;
+ IMG_UINT32 ui32ReadLen;
/* We have to flush both the L1 (FW) and L2 (Host) buffers in case there
* are some events left unprocessed in this FTrace/systrace "session"
*/
eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
psFtraceData->hGPUTraceTLStream,
- &pbTmp, &ui32Tmp);
+ &pBuffer, &ui32ReadLen);
PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
- /* Let close stream perform the release data on the outstanding acquired data */
+ /* We still need to process packets if there were any so that there is
+ * no gap in the ordinal value.
+ */
+ if (ui32ReadLen > 0)
+ {
+ _GpuTraceProcessPackets(psRgxDevInfo, pBuffer, ui32ReadLen);
+ }
+
+ /* Let close stream perform the release data on the outstanding acquired
+ * data */
eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
psFtraceData->hGPUTraceTLStream);
PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
static const IMG_CHAR *_HWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType)
{
- static const IMG_CHAR *aszKickType[RGX_HWPERF_KICK_TYPE_LAST+1] = {
+ static const IMG_CHAR *aszKickType[RGX_HWPERF_KICK_TYPE2_LAST+1] = {
+ "TA3D", /* Deprecated */
#if defined(RGX_FEATURE_HWPERF_VOLCANIC)
- "TA3D", "CDM", "RS", "SHG", "TQTDM", "SYNC", "TA", "3D", "LAST"
+ /* Volcanic deprecated kick types */
+ "CDM", "RS", "SHG", "TQTDM", "SYNC", "TA", "3D", "LAST",
+
+ "<UNKNOWN>", "<UNKNOWN>", "<UNKNOWN>", "<UNKNOWN>", "<UNKNOWN>",
+ "<UNKNOWN>", "<UNKNOWN>", "<UNKNOWN>",
#else
- "TA3D", "TQ2D", "TQ3D", "CDM", "RS", "VRDM", "TQTDM", "SYNC", "TA", "3D", "LAST"
+ /* Rogue deprecated kick types */
+ "TQ2D", "TQ3D", "CDM", "RS", "VRDM", "TQTDM", "SYNC", "TA", "3D", "LAST",
+
+ "<UNKNOWN>", "<UNKNOWN>", "<UNKNOWN>", "<UNKNOWN>", "<UNKNOWN>",
+ "<UNKNOWN>",
#endif
+ "TQ2D", "TQ3D", "TQTDM", "CDM", "GEOM", "3D", "SYNC", "RS", "LAST"
};
/* cast in case of negative value */
- if (((IMG_UINT32) eKickType) >= RGX_HWPERF_KICK_TYPE_LAST)
+ if (((IMG_UINT32) eKickType) >= RGX_HWPERF_KICK_TYPE2_LAST)
{
return "<UNKNOWN>";
}
if (PVRGpuTraceIsEnabled())
{
- trace_rogue_job_enqueue(ui32FirmwareCtx, ui32IntJobRef, ui32ExtJobRef,
- pszKickType);
+ trace_rogue_job_enqueue(psDevNode->sDevId.ui32InternalID, ui32FirmwareCtx,
+ ui32IntJobRef, ui32ExtJobRef, pszKickType);
}
}
static void _GpuTraceWorkSwitch(
IMG_UINT64 ui64HWTimestampInOSTime,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32CtxId,
IMG_UINT32 ui32CtxPriority,
IMG_UINT32 ui32ExtJobRef,
{
PVR_ASSERT(pszWorkType);
trace_rogue_sched_switch(pszWorkType, eSwType, ui64HWTimestampInOSTime,
- ui32CtxId, 2-ui32CtxPriority, ui32IntJobRef, ui32ExtJobRef);
+ ui32GpuId, ui32CtxId, 2-ui32CtxPriority, ui32IntJobRef,
+ ui32ExtJobRef);
}
static void _GpuTraceUfo(
IMG_UINT64 ui64OSTimestamp,
const RGX_HWPERF_UFO_EV eEvType,
+ const IMG_UINT32 ui32GpuId,
const IMG_UINT32 ui32CtxId,
const IMG_UINT32 ui32ExtJobRef,
const IMG_UINT32 ui32IntJobRef,
{
switch (eEvType) {
case RGX_HWPERF_UFO_EV_UPDATE:
- trace_rogue_ufo_updates(ui64OSTimestamp, ui32CtxId,
+ trace_rogue_ufo_updates(ui64OSTimestamp, ui32GpuId, ui32CtxId,
ui32ExtJobRef, ui32IntJobRef, ui32UFOCount, puData);
break;
case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
- trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId,
+ trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32GpuId, ui32CtxId,
ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount,
puData);
break;
case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
- trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId,
+ trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32GpuId, ui32CtxId,
ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount,
puData);
break;
case RGX_HWPERF_UFO_EV_CHECK_FAIL:
- trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId,
+ trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32GpuId, ui32CtxId,
ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount,
puData);
break;
case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
- trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId,
+ trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32GpuId, ui32CtxId,
ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount,
puData);
break;
static void _GpuTraceFirmware(
IMG_UINT64 ui64HWTimestampInOSTime,
+ IMG_UINT32 ui32GpuId,
const IMG_CHAR* pszWorkType,
PVR_GPUTRACE_SWITCH_TYPE eSwType)
{
- trace_rogue_firmware_activity(ui64HWTimestampInOSTime, pszWorkType, eSwType);
+ trace_rogue_firmware_activity(ui64HWTimestampInOSTime, ui32GpuId, pszWorkType, eSwType);
}
static void _GpuTraceEventsLost(
const RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_UINT32 ui32GpuId,
const IMG_UINT32 ui32LastOrdinal,
const IMG_UINT32 ui32CurrOrdinal)
{
- trace_rogue_events_lost(eStreamId, ui32LastOrdinal, ui32CurrOrdinal);
+ trace_rogue_events_lost(eStreamId, ui32GpuId, ui32LastOrdinal, ui32CurrOrdinal);
}
/* Calculate the OS timestamp given an RGX timestamp in the HWPerf event. */
pszWorkName, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32IntJobRef, eSwType));
_GpuTraceWorkSwitch(ui64Timestamp,
+ psDevInfo->psDeviceNode->sDevId.ui32InternalID,
psHWPerfPktData->ui32DMContext,
psHWPerfPktData->ui32CtxPriority,
psHWPerfPktData->ui32ExtJobRef,
pszWorkName,
eSwType);
+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD)
+ GpuTraceWorkPeriod(psHWPerfPktData->ui32PID,
+ psDevInfo->psDeviceNode->sDevId.ui32InternalID,
+ ui64Timestamp,
+ psHWPerfPktData->ui32IntJobRef,
+ (eSwType == PVR_GPUTRACE_SWITCH_TYPE_BEGIN) ?
+ PVR_GPU_WORK_EVENT_START : PVR_GPU_WORK_EVENT_END);
+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */
+
PVR_DPF_RETURN;
}
psHWPerfPktData->ui32IntJobRef));
_GpuTraceUfo(ui64Timestamp, psHWPerfPktData->eEvType,
+ psDevInfo->psDeviceNode->sDevId.ui32InternalID,
psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32ExtJobRef,
psHWPerfPktData->ui32IntJobRef, ui32UFOCount, puData);
}
ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
psHWPerfPkt->ui64Timestamp);
- _GpuTraceFirmware(ui64Timestamp, pszWorkName, eSwType);
+ _GpuTraceFirmware(ui64Timestamp, psDevInfo->psDeviceNode->sDevId.ui32InternalID, pszWorkName,
+ eSwType);
}
static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
PVR_ASSERT(psHWPerfPkt);
eType = RGX_HWPERF_GET_TYPE(psHWPerfPkt);
- if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1)
+ /* If HWPerf was enabled outsize of this module the local copy of the ordinal
+ * is going to be out of sync so don't check for it. */
+ if (!psFtraceData->bHWPerfHasRun)
{
- RGX_HWPERF_STREAM_ID eStreamId = RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt);
- _GpuTraceEventsLost(eStreamId,
- psFtraceData->ui32FTraceLastOrdinal,
- psHWPerfPkt->ui32Ordinal);
- PVR_DPF((PVR_DBG_ERROR, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)",
- eStreamId, psFtraceData->ui32FTraceLastOrdinal, psHWPerfPkt->ui32Ordinal));
+ if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1)
+ {
+ RGX_HWPERF_STREAM_ID eStreamId = RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt);
+ _GpuTraceEventsLost(eStreamId, psDevInfo->psDeviceNode->sDevId.ui32InternalID,
+ psFtraceData->ui32FTraceLastOrdinal,
+ psHWPerfPkt->ui32Ordinal);
+ PVR_DPF((PVR_DBG_ERROR, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)",
+ eStreamId, psFtraceData->ui32FTraceLastOrdinal, psHWPerfPkt->ui32Ordinal));
+ }
+ }
+ else
+ {
+ /* Clear since the next check for the ordinal consistency should be
+ * correct. */
+ psFtraceData->bHWPerfHasRun = IMG_FALSE;
}
psFtraceData->ui32FTraceLastOrdinal = psHWPerfPkt->ui32Ordinal;
}
-static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo,
- void *pBuffer, IMG_UINT32 ui32ReadLen)
+static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo, void *pBuffer,
+ IMG_UINT32 ui32ReadLen)
{
IMG_UINT32 ui32TlPackets = 0;
IMG_UINT32 ui32HWPerfPackets = 0;
static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
{
- PVRSRV_RGXDEV_INFO* psDeviceInfo = hCmdCompHandle;
- RGX_HWPERF_FTRACE_DATA* psFtraceData;
- PVRSRV_ERROR eError;
- IMG_PBYTE pBuffer;
- IMG_UINT32 ui32ReadLen;
- IMG_BOOL bFTraceLockAcquired = IMG_FALSE;
+ PVRSRV_RGXDEV_INFO *psDeviceInfo = hCmdCompHandle;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData;
+ PVRSRV_ERROR eError;
+ IMG_PBYTE pBuffer;
+ IMG_UINT32 ui32ReadLen;
PVR_DPF_ENTERED;
psFtraceData = psDeviceInfo->pvGpuFtraceData;
- /* Command-complete notifiers can run concurrently. If this is
- * happening, just bail out and let the previous call finish.
+ /* Command-complete notifiers can run concurrently. If this is happening,
+ * just bail out and let the previous call finish.
* This is ok because we can process the queued packets on the next call.
*/
- bFTraceLockAcquired = OSTryLockAcquire(psFtraceData->hFTraceResourceLock);
- if (IMG_FALSE == bFTraceLockAcquired)
+ if (!OSTryLockAcquire(psFtraceData->hFTraceResourceLock))
{
PVR_DPF_RETURN;
}
- /* If this notifier is called, it means the TL resources will be valid at-least
- * until the end of this call, since the DeInit function will wait on the hFTraceResourceLock
- * to clean-up the TL resources and un-register the notifier, so just assert here.
+ /* If this notifier is called, it means the TL resources will be valid
+ * at-least until the end of this call, since the DeInit function will wait
+ * on the hFTraceResourceLock to clean-up the TL resources and un-register
+ * the notifier, so just assert here.
*/
- PVR_ASSERT(psFtraceData->hGPUTraceTLStream);
+ PVR_ASSERT(psFtraceData->hGPUTraceTLStream != NULL);
/* If we have a valid stream attempt to acquire some data */
- eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream, &pBuffer, &ui32ReadLen);
- if (eError == PVRSRV_OK)
+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+ psFtraceData->hGPUTraceTLStream,
+ &pBuffer, &ui32ReadLen);
+ if (eError != PVRSRV_OK)
{
- /* Process the HWPerf packets and release the data */
- if (ui32ReadLen > 0)
+ if (eError != PVRSRV_ERROR_TIMEOUT)
{
- PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceCmdCompleteNotify: DATA AVAILABLE offset=%p, length=%d", pBuffer, ui32ReadLen));
+ PVR_LOG_ERROR(eError, "TLClientAcquireData");
+ }
- /* Process the transport layer data for HWPerf packets... */
- _GpuTraceProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen);
+ goto unlock;
+ }
- eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream);
- if (eError != PVRSRV_OK)
- {
- PVR_LOG_ERROR(eError, "TLClientReleaseData");
+ /* Process the HWPerf packets and release the data */
+ if (ui32ReadLen > 0)
+ {
+ PVR_DPF((PVR_DBG_VERBOSE, "%s: DATA AVAILABLE offset=%p, length=%d",
+ __func__, pBuffer, ui32ReadLen));
- /* Serious error, disable FTrace GPU events */
+ /* Process the transport layer data for HWPerf packets... */
+ _GpuTraceProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen);
- /* Release TraceLock so we always have the locking
- * order BridgeLock->TraceLock to prevent AB-BA deadlocks*/
- OSLockRelease(psFtraceData->hFTraceResourceLock);
- OSLockAcquire(psFtraceData->hFTraceResourceLock);
- _GpuTraceDisable(psDeviceInfo, IMG_FALSE);
- OSLockRelease(psFtraceData->hFTraceResourceLock);
- goto out;
+ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE,
+ psFtraceData->hGPUTraceTLStream);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "TLClientReleaseData");
- }
- } /* else no data, ignore */
- }
- else if (eError != PVRSRV_ERROR_TIMEOUT)
- {
- PVR_LOG_ERROR(eError, "TLClientAcquireData");
- }
- if (bFTraceLockAcquired)
- {
- OSLockRelease(psFtraceData->hFTraceResourceLock);
+ /* Serious error, disable FTrace GPU events */
+ _GpuTraceDisable(psDeviceInfo, IMG_FALSE);
+ }
}
-out:
+
+unlock:
+ OSLockRelease(psFtraceData->hFTraceResourceLock);
+
PVR_DPF_RETURN;
}
+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD)
+PVRSRV_ERROR
+PVRSRVGpuTraceWorkPeriodEventStatsRegister(IMG_HANDLE
+ *phGpuWorkPeriodEventStats)
+{
+ return GpuTraceWorkPeriodEventStatsRegister(phGpuWorkPeriodEventStats);
+}
+
+void
+PVRSRVGpuTraceWorkPeriodEventStatsUnregister(
+ IMG_HANDLE hGpuWorkPeriodEventStats)
+{
+ GpuTraceWorkPeriodEventStatsUnregister(hGpuWorkPeriodEventStats);
+}
+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */
+
/* ----- AppHint interface -------------------------------------------------- */
static PVRSRV_ERROR _GpuTraceIsEnabledCallback(
if (value != gbFTraceGPUEventsEnabled)
{
- PVRSRV_ERROR eError;
- if ((eError = _GpuTraceSetEnabledForAllDevices(value)) == PVRSRV_OK)
+#if defined(PVRSRV_NEED_PVR_TRACE)
+ const IMG_CHAR *pszOperation = value ? "enable" : "disable";
+#endif
+
+ if (_GpuTraceSetEnabledForAllDevices(value) != PVRSRV_OK)
{
- PVR_TRACE(("%s GPU FTrace", value ? "ENABLED" : "DISABLED"));
- gbFTraceGPUEventsEnabled = value;
+ PVR_TRACE(("FAILED to %s GPU FTrace for all devices", pszOperation));
+ goto err_restore_state;
}
- else
+
+ if (trace_set_clr_event("rogue", NULL, (int) value) != 0)
{
- PVR_TRACE(("FAILED to %s GPU FTrace", value ? "enable" : "disable"));
- /* On failure, partial enable/disable might have resulted.
- * Try best to restore to previous state. Ignore error */
- _GpuTraceSetEnabledForAllDevices(gbFTraceGPUEventsEnabled);
+ PVR_TRACE(("FAILED to %s GPU FTrace event group", pszOperation));
+ goto err_restore_state;
+ }
- OSLockRelease(ghGPUTraceStateLock);
- return eError;
+ if (value)
+ {
+ /* this enables FTrace globally (if not enabled nothing will appear
+ * in the FTrace buffer) */
+ tracing_on();
}
+
+ PVR_TRACE(("%s GPU FTrace", value ? "ENABLED" : "DISABLED"));
+ gbFTraceGPUEventsEnabled = value;
}
else
{
OSLockRelease(ghGPUTraceStateLock);
return PVRSRV_OK;
+
+err_restore_state:
+ /* On failure, partial enable/disable might have resulted. Try best to
+ * restore to previous state. Ignore error */
+ (void) _GpuTraceSetEnabledForAllDevices(gbFTraceGPUEventsEnabled);
+
+ (void) trace_set_clr_event("rogue", NULL, (int) gbFTraceGPUEventsEnabled);
+
+ OSLockRelease(ghGPUTraceStateLock);
+
+ return PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT;
}
void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode)
{
+ /* Do not register callback handlers if we are in GUEST mode */
+ if (PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ return;
+ }
PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFTraceGPU,
_GpuTraceIsEnabledCallback,
_GpuTraceSetEnabledCallback,
}
else if (eError != PVRSRV_OK)
{
- PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf events on device %d", psDeviceNode->sDevId.i32OsDeviceID));
+ PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf events on device %d", psDeviceNode->sDevId.i32KernelDeviceID));
}
#endif
psDeviceNode = psDeviceNode->psNext;
if (ghLockFTraceEventLock == NULL)
return;
- OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
- psDeviceNode = psPVRSRVData->psDeviceNodeList;
-
/* Lock down events state, for consistent value of guiUfoEventRef */
OSLockAcquire(ghLockFTraceEventLock);
if (--guiUfoEventRef == 0)
{
+ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
+ psDeviceNode = psPVRSRVData->psDeviceNodeList;
+
/* make sure UFO events are disabled on all rogue devices */
while (psDeviceNode)
{
else if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "Could not disable UFO HWPerf events on device %d",
- psDeviceNode->sDevId.i32OsDeviceID));
+ psDeviceNode->sDevId.i32KernelDeviceID));
}
#endif
psDeviceNode = psDeviceNode->psNext;
}
+
+ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
}
OSLockRelease(ghLockFTraceEventLock);
-
- OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
}
void PVRGpuTraceEnableFirmwareActivityCallback(void)
{
+#if defined(SUPPORT_RGX)
PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
PVRSRV_DEVICE_NODE *psDeviceNode;
-#if defined(SUPPORT_RGX)
PVRSRV_RGXDEV_INFO *psRgxDevInfo;
uint64_t ui64Filter, ui64FWEventsFilter = 0;
int i;
{
ui64FWEventsFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i);
}
-#endif
+
+ OSLockAcquire(ghLockFTraceEventLock);
OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
psDeviceNode = psPVRSRVData->psDeviceNodeList;
-
- OSLockAcquire(ghLockFTraceEventLock);
/* Enable all FW events on all the devices */
while (psDeviceNode)
{
-#if defined(SUPPORT_RGX)
PVRSRV_ERROR eError;
psRgxDevInfo = psDeviceNode->pvDevice;
ui64Filter = psRgxDevInfo->ui64HWPerfFilter | ui64FWEventsFilter;
eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
IMG_FALSE, ui64Filter);
- if ((eError != PVRSRV_OK) && !PVRSRV_VZ_MODE_IS(GUEST))
+ if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "Could not enable HWPerf event for firmware"
" task timings (%s).", PVRSRVGetErrorString(eError)));
}
-#endif
+
psDeviceNode = psDeviceNode->psNext;
}
- OSLockRelease(ghLockFTraceEventLock);
OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+
+ OSLockRelease(ghLockFTraceEventLock);
+#endif /* defined(SUPPORT_RGX) */
}
void PVRGpuTraceDisableFirmwareActivityCallback(void)
{
+#if defined(SUPPORT_RGX)
PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
PVRSRV_DEVICE_NODE *psDeviceNode;
-#if defined(SUPPORT_RGX)
IMG_UINT64 ui64FWEventsFilter = ~0;
int i;
-#endif
/* We have to check if lock is valid because on driver unload
* PVRGpuTraceSupportDeInit is called before kernel disables the ftrace
if (ghLockFTraceEventLock == NULL)
return;
+ OSLockAcquire(ghLockFTraceEventLock);
+
OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock);
psDeviceNode = psPVRSRVData->psDeviceNodeList;
-#if defined(SUPPORT_RGX)
for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++)
{
ui64FWEventsFilter &= ~RGX_HWPERF_EVENT_MASK_VALUE(i);
}
-#endif
-
- OSLockAcquire(ghLockFTraceEventLock);
/* Disable all FW events on all the devices */
while (psDeviceNode)
{
-#if defined(SUPPORT_RGX)
PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice;
IMG_UINT64 ui64Filter = psRgxDevInfo->ui64HWPerfFilter & ui64FWEventsFilter;
- if ((PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
- IMG_FALSE, ui64Filter) != PVRSRV_OK) &&
- !PVRSRV_VZ_MODE_IS(GUEST))
+ if (PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+ IMG_FALSE, ui64Filter) != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR, "Could not disable HWPerf event for firmware task timings."));
}
-#endif
psDeviceNode = psDeviceNode->psNext;
}
- OSLockRelease(ghLockFTraceEventLock);
-
OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock);
+
+ OSLockRelease(ghLockFTraceEventLock);
+#endif /* defined(SUPPORT_RGX) */
}
/******************************************************************************
/* This header must always be included last */
#include "kernel_compatibility.h"
+MODULE_IMPORT_NS(DMA_BUF);
+
static struct drm_driver pvr_drm_platform_driver;
#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
static void pvr_shutdown(struct platform_device *pdev)
{
struct drm_device *ddev = platform_get_drvdata(pdev);
- struct pvr_drm_private *priv = ddev->dev_private;
DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
- PVRSRVDeviceShutdown(priv->dev_node);
+ PVRSRVDeviceShutdown(ddev);
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
pvr_drm_platform_driver.set_busid = drm_platform_set_busid;
#endif
- printk("@@#########################@@\n");
err = PVRSRVDriverInit();
if (err)
return err;
if (err)
return err;
- return 0;//pvr_devices_register();
+ return 0; // pvr_devices_register();
}
static void __exit pvr_exit(void)
DRM_DEBUG_DRIVER("done\n");
}
-module_init(pvr_init);
-//late_initcall(pvr_init);
+late_initcall(pvr_init);
module_exit(pvr_exit);
#include "pvr_bridge_k.h"
#include "pvr_uaccess.h"
#include "osdi_impl.h"
+#include "kernel_compatibility.h"
#define _DRIVER_THREAD_ENTER() \
do { \
static int _Open(struct inode *psINode, struct file *psFile)
{
- DFS_FILE *psDFSFile = PDE_DATA(psINode);
+ DFS_FILE *psDFSFile = pde_data(psINode);
int iRes;
PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", -EIO);
static int _Close(struct inode *psINode, struct file *psFile)
{
- DFS_FILE *psDFSFile = PDE_DATA(psINode);
+ DFS_FILE *psDFSFile = pde_data(psINode);
DFS_ENTRY *psEntry;
int iRes;
static ssize_t _Read(struct file *psFile, char __user *pcBuffer,
size_t uiCount, loff_t *puiPos)
{
- DFS_FILE *psDFSFile = PDE_DATA(psFile->f_path.dentry->d_inode);
+ DFS_FILE *psDFSFile = pde_data(psFile->f_path.dentry->d_inode);
ssize_t iRes = -1;
_DRIVER_THREAD_ENTER();
iRes = seq_read(psFile, pcBuffer, uiCount, puiPos);
if (iRes < 0)
{
- PVR_DPF((PVR_DBG_ERROR, "%s: filed to read from file pfnRead() "
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to read from file pfnRead() "
"returned %zd", __func__, iRes));
goto return_;
}
psEntry->sImplEntry.pvPrivData);
if (iRes < 0)
{
- PVR_DPF((PVR_DBG_ERROR, "%s: filed to read from file pfnRead() "
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to read from file pfnRead() "
"returned %zd", __func__, iRes));
OSFreeMem(pcLocalBuffer);
goto return_;
static loff_t _LSeek(struct file *psFile, loff_t iOffset, int iOrigin)
{
- DFS_FILE *psDFSFile = PDE_DATA(psFile->f_path.dentry->d_inode);
+ DFS_FILE *psDFSFile = pde_data(psFile->f_path.dentry->d_inode);
loff_t iRes = -1;
_DRIVER_THREAD_ENTER();
iRes = seq_lseek(psFile, iOffset, iOrigin);
if (iRes < 0)
{
- PVR_DPF((PVR_DBG_ERROR, "%s: filed to set file position to "
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to set file position to "
"offset %lld, pfnSeek() returned %lld", __func__,
iOffset, iRes));
goto return_;
psEntry->sImplEntry.pvPrivData);
if (iRes < 0)
{
- PVR_DPF((PVR_DBG_ERROR, "%s: filed to set file position to "
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to set file position to "
"offset %lld, pfnSeek() returned %lld", __func__,
iOffset, iRes));
goto return_;
size_t uiCount, loff_t *puiPos)
{
struct inode *psINode = psFile->f_path.dentry->d_inode;
- DFS_FILE *psDFSFile = PDE_DATA(psINode);
+ DFS_FILE *psDFSFile = pde_data(psINode);
DI_ITERATOR_CB *psIter = &psDFSFile->sEntry.sIterCb;
IMG_CHAR *pcLocalBuffer;
IMG_UINT64 ui64Count;
#ifndef _PVR_SYNC_API_H
#define _PVR_SYNC_API_H
-#include <img_types.h>
+#include "img_types.h"
int pvr_sync_api_init(void *file_handle, void **api_priv);
int pvr_sync_api_deinit(void *api_priv, bool is_sw);
TRACE_EVENT_FN(rogue_fence_update,
- TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset,
+ TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 gpu_id, u32 ctx_id, u32 offset,
u32 sync_fwaddr, u32 sync_value),
- TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value),
+ TP_ARGS(comm, cmd, dm, gpu_id, ctx_id, offset, sync_fwaddr, sync_value),
TP_STRUCT__entry(
__string( comm, comm )
__string( cmd, cmd )
__string( dm, dm )
+ __field( u32, gpu_id )
__field( u32, ctx_id )
__field( u32, offset )
__field( u32, sync_fwaddr )
__assign_str(comm, comm);
__assign_str(cmd, cmd);
__assign_str(dm, dm);
+ __entry->gpu_id = gpu_id;
__entry->ctx_id = ctx_id;
__entry->offset = offset;
__entry->sync_fwaddr = sync_fwaddr;
__entry->sync_value = sync_value;
),
- TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
+ TP_printk("comm=%s cmd=%s dm=%s gpu=%lu ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
__get_str(comm),
__get_str(cmd),
__get_str(dm),
- (unsigned long)__entry->ctx_id,
- (unsigned long)__entry->offset,
- (unsigned long)__entry->sync_fwaddr,
- (unsigned long)__entry->sync_value),
+ (unsigned long) __entry->gpu_id,
+ (unsigned long) __entry->ctx_id,
+ (unsigned long) __entry->offset,
+ (unsigned long) __entry->sync_fwaddr,
+ (unsigned long) __entry->sync_value
+ ),
trace_fence_update_enabled_callback,
trace_fence_update_disabled_callback
TRACE_EVENT_FN(rogue_fence_check,
- TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset,
+ TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 gpu_id, u32 ctx_id, u32 offset,
u32 sync_fwaddr, u32 sync_value),
- TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value),
+ TP_ARGS(comm, cmd, dm, gpu_id, ctx_id, offset, sync_fwaddr, sync_value),
TP_STRUCT__entry(
__string( comm, comm )
__string( cmd, cmd )
__string( dm, dm )
+ __field( u32, gpu_id )
__field( u32, ctx_id )
__field( u32, offset )
__field( u32, sync_fwaddr )
__assign_str(comm, comm);
__assign_str(cmd, cmd);
__assign_str(dm, dm);
+ __entry->gpu_id = gpu_id;
__entry->ctx_id = ctx_id;
__entry->offset = offset;
__entry->sync_fwaddr = sync_fwaddr;
__entry->sync_value = sync_value;
),
- TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
+ TP_printk("comm=%s cmd=%s dm=%s gpu=%lu ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
__get_str(comm),
__get_str(cmd),
__get_str(dm),
+ (unsigned long)__entry->gpu_id,
(unsigned long)__entry->ctx_id,
(unsigned long)__entry->offset,
(unsigned long)__entry->sync_fwaddr,
- (unsigned long)__entry->sync_value),
+ (unsigned long)__entry->sync_value
+ ),
trace_fence_check_enabled_callback,
trace_fence_check_disabled_callback
TRACE_EVENT(rogue_job_enqueue,
- TP_PROTO(u32 ctx_id, u32 int_id, u32 ext_id,
+ TP_PROTO(u32 gpu_id, u32 ctx_id, u32 int_id, u32 ext_id,
const char *kick_type),
- TP_ARGS(ctx_id, int_id, ext_id, kick_type),
+ TP_ARGS(gpu_id, ctx_id, int_id, ext_id, kick_type),
TP_STRUCT__entry(
+ __field(u32, gpu_id)
__field(u32, ctx_id)
__field(u32, int_id)
__field(u32, ext_id)
),
TP_fast_assign(
+ __entry->gpu_id = gpu_id;
__entry->ctx_id = ctx_id;
__entry->int_id = int_id;
__entry->ext_id = ext_id;
__assign_str(kick_type, kick_type);
),
- TP_printk("ctx_id=%lu int_id=%lu ext_id=%lu kick_type=%s",
+ TP_printk("gpu=%lu, ctx_id=%lu int_id=%lu ext_id=%lu kick_type=%s",
+ (unsigned long) __entry->gpu_id,
(unsigned long) __entry->ctx_id,
(unsigned long) __entry->int_id,
(unsigned long) __entry->ext_id,
TRACE_EVENT(rogue_sched_switch,
- TP_PROTO(const char *work_type, u32 switch_type, u64 timestamp, u32 next_ctx_id,
+ TP_PROTO(const char *work_type, u32 switch_type, u64 timestamp, u32 gpu_id, u32 next_ctx_id,
u32 next_prio, u32 next_int_id, u32 next_ext_id),
- TP_ARGS(work_type, switch_type, timestamp, next_ctx_id, next_prio, next_int_id, next_ext_id),
+ TP_ARGS(work_type, switch_type, timestamp, gpu_id, next_ctx_id, next_prio, next_int_id,
+ next_ext_id),
TP_STRUCT__entry(
__string(work_type, work_type)
__field(u32, switch_type)
__field(u64, timestamp)
+ __field(u32, gpu_id)
__field(u32, next_ctx_id)
__field(u32, next_prio)
__field(u32, next_int_id)
__assign_str(work_type, work_type);
__entry->switch_type = switch_type;
__entry->timestamp = timestamp;
+ __entry->gpu_id = gpu_id;
__entry->next_ctx_id = next_ctx_id;
__entry->next_prio = next_prio;
__entry->next_int_id = next_int_id;
__entry->next_ext_id = next_ext_id;
),
- TP_printk("ts=%llu.%06lu next_ctx_id=%lu next_int_id=%lu next_ext_id=%lu"
+ TP_printk("ts=%llu.%06lu gpu=%lu next_ctx_id=%lu next_int_id=%lu next_ext_id=%lu"
" next_prio=%lu work_type=%s switch_type=%s",
(unsigned long long) show_secs_from_ns(__entry->timestamp),
(unsigned long) show_usecs_from_ns(__entry->timestamp),
+ (unsigned long) __entry->gpu_id,
(unsigned long) __entry->next_ctx_id,
(unsigned long) __entry->next_int_id,
(unsigned long) __entry->next_ext_id,
TRACE_EVENT(rogue_create_fw_context,
- TP_PROTO(const char *comm, const char *dm, u32 ctx_id),
+ TP_PROTO(const char *comm, const char *dm, u32 gpu_id, u32 ctx_id),
- TP_ARGS(comm, dm, ctx_id),
+ TP_ARGS(comm, dm, gpu_id, ctx_id),
TP_STRUCT__entry(
__string( comm, comm )
__string( dm, dm )
+ __field( u32, gpu_id )
__field( u32, ctx_id )
),
TP_fast_assign(
__assign_str(comm, comm);
__assign_str(dm, dm);
+ __entry->gpu_id = gpu_id;
__entry->ctx_id = ctx_id;
),
- TP_printk("comm=%s dm=%s ctx_id=%lu",
+ TP_printk("comm=%s dm=%s gpu=%lu ctx_id=%lu",
__get_str(comm),
__get_str(dm),
- (unsigned long)__entry->ctx_id)
+ (unsigned long) __entry->gpu_id,
+ (unsigned long) __entry->ctx_id
+ )
);
void PVRGpuTraceEnableUfoCallback(void);
TRACE_EVENT_FN(rogue_ufo_update,
- TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id,
+ TP_PROTO(u64 timestamp, u32 gpu_id, u32 ctx_id, u32 int_id, u32 ext_id,
u32 fwaddr, u32 old_value, u32 new_value),
- TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, old_value,
+ TP_ARGS(timestamp, gpu_id, ctx_id, int_id, ext_id, fwaddr, old_value,
new_value),
TP_STRUCT__entry(
__field( u64, timestamp )
+ __field( u32, gpu_id )
__field( u32, ctx_id )
__field( u32, int_id )
__field( u32, ext_id )
TP_fast_assign(
__entry->timestamp = timestamp;
+ __entry->gpu_id = gpu_id;
__entry->ctx_id = ctx_id;
__entry->int_id = int_id;
__entry->ext_id = ext_id;
__entry->new_value = new_value;
),
- TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu"
+ TP_printk("ts=%llu.%06lu gpu=%lu ctx_id=%lu int_id=%lu ext_id=%lu"
" fwaddr=%#lx old_value=%#lx new_value=%#lx",
(unsigned long long)show_secs_from_ns(__entry->timestamp),
(unsigned long)show_usecs_from_ns(__entry->timestamp),
- (unsigned long)__entry->ctx_id,
- (unsigned long)__entry->int_id,
- (unsigned long)__entry->ext_id,
- (unsigned long)__entry->fwaddr,
- (unsigned long)__entry->old_value,
- (unsigned long)__entry->new_value),
+ (unsigned long) __entry->gpu_id,
+ (unsigned long) __entry->ctx_id,
+ (unsigned long) __entry->int_id,
+ (unsigned long) __entry->ext_id,
+ (unsigned long) __entry->fwaddr,
+ (unsigned long) __entry->old_value,
+ (unsigned long) __entry->new_value
+ ),
+
PVRGpuTraceEnableUfoCallbackWrapper,
PVRGpuTraceDisableUfoCallback
);
TRACE_EVENT_FN(rogue_ufo_check_fail,
- TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id,
+ TP_PROTO(u64 timestamp, u32 gpu_id, u32 ctx_id, u32 int_id, u32 ext_id,
u32 fwaddr, u32 value, u32 required),
- TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value, required),
+ TP_ARGS(timestamp, gpu_id, ctx_id, int_id, ext_id, fwaddr, value, required),
TP_STRUCT__entry(
__field( u64, timestamp )
+ __field( u32, gpu_id )
__field( u32, ctx_id )
__field( u32, int_id )
__field( u32, ext_id )
TP_fast_assign(
__entry->timestamp = timestamp;
+ __entry->gpu_id = gpu_id;
__entry->ctx_id = ctx_id;
__entry->int_id = int_id;
__entry->ext_id = ext_id;
__entry->required = required;
),
- TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu"
+ TP_printk("ts=%llu.%06lu gpu=%lu ctx_id=%lu int_id=%lu ext_id=%lu"
" fwaddr=%#lx value=%#lx required=%#lx",
(unsigned long long)show_secs_from_ns(__entry->timestamp),
(unsigned long)show_usecs_from_ns(__entry->timestamp),
- (unsigned long)__entry->ctx_id,
- (unsigned long)__entry->int_id,
- (unsigned long)__entry->ext_id,
- (unsigned long)__entry->fwaddr,
- (unsigned long)__entry->value,
- (unsigned long)__entry->required),
+ (unsigned long) __entry->gpu_id,
+ (unsigned long) __entry->ctx_id,
+ (unsigned long) __entry->int_id,
+ (unsigned long) __entry->ext_id,
+ (unsigned long) __entry->fwaddr,
+ (unsigned long) __entry->value,
+ (unsigned long) __entry->required
+ ),
+
PVRGpuTraceEnableUfoCallbackWrapper,
PVRGpuTraceDisableUfoCallback
);
TRACE_EVENT_FN(rogue_ufo_pr_check_fail,
- TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id,
+ TP_PROTO(u64 timestamp, u32 gpu_id, u32 ctx_id, u32 int_id, u32 ext_id,
u32 fwaddr, u32 value, u32 required),
- TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value, required),
+ TP_ARGS(timestamp, gpu_id, ctx_id, int_id, ext_id, fwaddr, value, required),
TP_STRUCT__entry(
__field( u64, timestamp )
+ __field( u32, gpu_id )
__field( u32, ctx_id )
__field( u32, int_id )
__field( u32, ext_id )
TP_fast_assign(
__entry->timestamp = timestamp;
+ __entry->gpu_id = gpu_id;
__entry->ctx_id = ctx_id;
__entry->int_id = int_id;
__entry->ext_id = ext_id;
__entry->required = required;
),
- TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu"
+ TP_printk("ts=%llu.%06lu gpu=%lu ctx_id=%lu int_id=%lu ext_id=%lu"
" fwaddr=%#lx value=%#lx required=%#lx",
(unsigned long long)show_secs_from_ns(__entry->timestamp),
(unsigned long)show_usecs_from_ns(__entry->timestamp),
- (unsigned long)__entry->ctx_id,
- (unsigned long)__entry->int_id,
- (unsigned long)__entry->ext_id,
- (unsigned long)__entry->fwaddr,
- (unsigned long)__entry->value,
- (unsigned long)__entry->required),
+ (unsigned long) __entry->gpu_id,
+ (unsigned long) __entry->ctx_id,
+ (unsigned long) __entry->int_id,
+ (unsigned long) __entry->ext_id,
+ (unsigned long) __entry->fwaddr,
+ (unsigned long) __entry->value,
+ (unsigned long) __entry->required
+ ),
+
PVRGpuTraceEnableUfoCallbackWrapper,
PVRGpuTraceDisableUfoCallback
);
TRACE_EVENT_FN(rogue_ufo_check_success,
- TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id,
+ TP_PROTO(u64 timestamp, u32 gpu_id, u32 ctx_id, u32 int_id, u32 ext_id,
u32 fwaddr, u32 value),
- TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value),
+ TP_ARGS(timestamp, gpu_id, ctx_id, int_id, ext_id, fwaddr, value),
TP_STRUCT__entry(
__field( u64, timestamp )
+ __field( u32, gpu_id )
__field( u32, ctx_id )
__field( u32, int_id )
__field( u32, ext_id )
TP_fast_assign(
__entry->timestamp = timestamp;
+ __entry->gpu_id = gpu_id;
__entry->ctx_id = ctx_id;
__entry->int_id = int_id;
__entry->ext_id = ext_id;
__entry->value = value;
),
- TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu"
+ TP_printk("ts=%llu.%06lu gpu=%lu ctx_id=%lu int_id=%lu ext_id=%lu"
" fwaddr=%#lx value=%#lx",
(unsigned long long)show_secs_from_ns(__entry->timestamp),
(unsigned long)show_usecs_from_ns(__entry->timestamp),
- (unsigned long)__entry->ctx_id,
- (unsigned long)__entry->int_id,
- (unsigned long)__entry->ext_id,
- (unsigned long)__entry->fwaddr,
- (unsigned long)__entry->value),
+ (unsigned long) __entry->gpu_id,
+ (unsigned long) __entry->ctx_id,
+ (unsigned long) __entry->int_id,
+ (unsigned long) __entry->ext_id,
+ (unsigned long) __entry->fwaddr,
+ (unsigned long) __entry->value
+ ),
+
PVRGpuTraceEnableUfoCallbackWrapper,
PVRGpuTraceDisableUfoCallback
);
TRACE_EVENT_FN(rogue_ufo_pr_check_success,
- TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id,
+ TP_PROTO(u64 timestamp, u32 gpu_id, u32 ctx_id, u32 int_id, u32 ext_id,
u32 fwaddr, u32 value),
- TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value),
+ TP_ARGS(timestamp, gpu_id, ctx_id, int_id, ext_id, fwaddr, value),
TP_STRUCT__entry(
__field( u64, timestamp )
+ __field( u32, gpu_id )
__field( u32, ctx_id )
__field( u32, int_id )
__field( u32, ext_id )
TP_fast_assign(
__entry->timestamp = timestamp;
+ __entry->gpu_id = gpu_id;
__entry->ctx_id = ctx_id;
__entry->int_id = int_id;
__entry->ext_id = ext_id;
__entry->value = value;
),
- TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu"
+ TP_printk("ts=%llu.%06lu gpu=%lu ctx_id=%lu int_id=%lu ext_id=%lu"
" fwaddr=%#lx value=%#lx",
(unsigned long long)show_secs_from_ns(__entry->timestamp),
(unsigned long)show_usecs_from_ns(__entry->timestamp),
- (unsigned long)__entry->ctx_id,
- (unsigned long)__entry->int_id,
- (unsigned long)__entry->ext_id,
- (unsigned long)__entry->fwaddr,
- (unsigned long)__entry->value),
+ (unsigned long) __entry->gpu_id,
+ (unsigned long) __entry->ctx_id,
+ (unsigned long) __entry->int_id,
+ (unsigned long) __entry->ext_id,
+ (unsigned long) __entry->fwaddr,
+ (unsigned long) __entry->value
+ ),
+
PVRGpuTraceEnableUfoCallbackWrapper,
PVRGpuTraceDisableUfoCallback
);
TRACE_EVENT(rogue_events_lost,
- TP_PROTO(u32 event_source, u32 last_ordinal, u32 curr_ordinal),
+ TP_PROTO(u32 event_source, u32 gpu_id, u32 last_ordinal, u32 curr_ordinal),
- TP_ARGS(event_source, last_ordinal, curr_ordinal),
+ TP_ARGS(event_source, gpu_id, last_ordinal, curr_ordinal),
TP_STRUCT__entry(
__field( u32, event_source )
+ __field( u32, gpu_id )
__field( u32, last_ordinal )
__field( u32, curr_ordinal )
),
TP_fast_assign(
__entry->event_source = event_source;
+ __entry->gpu_id = gpu_id;
__entry->last_ordinal = last_ordinal;
__entry->curr_ordinal = curr_ordinal;
),
- TP_printk("event_source=%s last_ordinal=%u curr_ordinal=%u",
+ TP_printk("event_source=%s gpu=%u last_ordinal=%u curr_ordinal=%u",
__print_symbolic(__entry->event_source, {0, "GPU"}, {1, "Host"}),
+ __entry->gpu_id,
__entry->last_ordinal,
__entry->curr_ordinal)
);
TRACE_EVENT_FN(rogue_firmware_activity,
- TP_PROTO(u64 timestamp, const char *task, u32 fw_event),
+ TP_PROTO(u64 timestamp, u32 gpu_id, const char *task, u32 fw_event),
- TP_ARGS(timestamp, task, fw_event),
+ TP_ARGS(timestamp, gpu_id, task, fw_event),
TP_STRUCT__entry(
__field( u64, timestamp )
+ __field( u32, gpu_id )
__string( task, task )
__field( u32, fw_event )
),
TP_fast_assign(
__entry->timestamp = timestamp;
+ __entry->gpu_id = gpu_id,
__assign_str(task, task);
__entry->fw_event = fw_event;
),
- TP_printk("ts=%llu.%06lu task=%s event=%s",
- (unsigned long long)show_secs_from_ns(__entry->timestamp),
- (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ TP_printk("ts=%llu.%06lu gpu=%lu task=%s event=%s",
+ (unsigned long long) show_secs_from_ns(__entry->timestamp),
+ (unsigned long) show_usecs_from_ns(__entry->timestamp),
+ (unsigned long) __entry->gpu_id,
__get_str(task),
__print_symbolic(__entry->fw_event,
/* These values are from ospvr_gputrace.h. */
struct PVRSRV_CLIENT_SYNC_PRIM_TAG;
struct PVRSRV_CLIENT_SYNC_PRIM_OP;
-enum tag_img_bool;
-
#endif /* __pvrsrv_defined_struct_enum__ */
struct _PMR_;
/* srvkm.h */
enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceCreate(void *pvOSDevice,
- int i32OsDeviceID,
+ int i32KernelDeviceID,
struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode);
enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceDestroy(
struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
#endif
#ifndef CHECKPOINT_PFNS
-typedef enum tag_img_bool (*PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN)(
+typedef bool (*PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN)(
__u32 ui32FwAddr, __u32 ui32Value);
typedef enum PVRSRV_ERROR_TAG (*PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN)(void);
typedef void(*PFN_SYNC_CHECKPOINT_CHECK_STATE_FN)(void);
enum PVRSRV_ERROR_TAG SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, PVRSRV_TIMELINE timeline, PVRSRV_FENCE fence, const char *pszCheckpointName, PSYNC_CHECKPOINT *ppsSyncCheckpoint);
void SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
void SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
-enum tag_img_bool SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
-enum tag_img_bool SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+bool SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+bool SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
enum PVRSRV_ERROR_TAG SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint);
enum PVRSRV_ERROR_TAG SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint);
void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint);
@Return struct workqueue_struct ptr on success, NULL otherwise.
*/ /**************************************************************************/
struct workqueue_struct *NativeSyncGetFenceStatusWq(void);
-struct workqueue_struct *NativeSyncGetFenceUnorderedWq(void);
#endif
#endif /* __SERVICES_KERNEL_CLIENT__ */
/* This is a helper that calls trace_rogue_fence_update for each fence in an
* array.
*/
-void trace_rogue_fence_updates(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+ IMG_UINT32 ui32GpuId,
+ IMG_UINT32 ui32FWContext,
IMG_UINT32 ui32Offset,
IMG_UINT uCount,
PRGXFWIF_UFO_ADDR *pauiAddresses,
IMG_UINT i;
for (i = 0; i < uCount; i++)
{
- trace_rogue_fence_update(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+ trace_rogue_fence_update(current->comm, cmd, dm, ui32GpuId, ui32FWContext, ui32Offset,
pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED);
}
}
-void trace_rogue_fence_checks(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+ IMG_UINT32 ui32GpuId,
+ IMG_UINT32 ui32FWContext,
IMG_UINT32 ui32Offset,
IMG_UINT uCount,
PRGXFWIF_UFO_ADDR *pauiAddresses,
IMG_UINT i;
for (i = 0; i < uCount; i++)
{
- trace_rogue_fence_check(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+ trace_rogue_fence_check(current->comm, cmd, dm, ui32GpuId, ui32FWContext, ui32Offset,
pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED);
}
}
void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWCtx,
IMG_UINT32 ui32ExtJobRef,
IMG_UINT32 ui32IntJobRef,
IMG_UINT i;
for (i = 0; i < ui32UFOCount; i++)
{
- trace_rogue_ufo_update(ui64OSTimestamp, ui32FWCtx,
+ trace_rogue_ufo_update(ui64OSTimestamp, ui32GpuId, ui32FWCtx,
ui32IntJobRef,
ui32ExtJobRef,
puData->sUpdate.ui32FWAddr,
}
void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWCtx,
IMG_UINT32 ui32ExtJobRef,
IMG_UINT32 ui32IntJobRef,
{
if (bPrEvent)
{
- trace_rogue_ufo_pr_check_success(ui64OSTimestamp, ui32FWCtx,
+ trace_rogue_ufo_pr_check_success(ui64OSTimestamp, ui32GpuId, ui32FWCtx,
ui32IntJobRef, ui32ExtJobRef,
puData->sCheckSuccess.ui32FWAddr,
puData->sCheckSuccess.ui32Value);
}
else
{
- trace_rogue_ufo_check_success(ui64OSTimestamp, ui32FWCtx,
+ trace_rogue_ufo_check_success(ui64OSTimestamp, ui32GpuId, ui32FWCtx,
ui32IntJobRef, ui32ExtJobRef,
puData->sCheckSuccess.ui32FWAddr,
puData->sCheckSuccess.ui32Value);
}
void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWCtx,
IMG_UINT32 ui32ExtJobRef,
IMG_UINT32 ui32IntJobRef,
{
if (bPrEvent)
{
- trace_rogue_ufo_pr_check_fail(ui64OSTimestamp, ui32FWCtx,
+ trace_rogue_ufo_pr_check_fail(ui64OSTimestamp, ui32GpuId, ui32FWCtx,
ui32IntJobRef, ui32ExtJobRef,
puData->sCheckFail.ui32FWAddr,
puData->sCheckFail.ui32Value,
}
else
{
- trace_rogue_ufo_check_fail(ui64OSTimestamp, ui32FWCtx,
+ trace_rogue_ufo_check_fail(ui64OSTimestamp, ui32GpuId, ui32FWCtx,
ui32IntJobRef, ui32ExtJobRef,
puData->sCheckFail.ui32FWAddr,
puData->sCheckFail.ui32Value,
bool trace_rogue_are_fence_updates_traced(void);
-void trace_job_enqueue(IMG_UINT32 ui32FWContext,
- IMG_UINT32 ui32ExtJobRef,
- IMG_UINT32 ui32IntJobRef,
- const char *pszKickType);
-
#if defined(SUPPORT_RGX)
void trace_rogue_fence_updates(const char *cmd, const char *dm,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWContext,
IMG_UINT32 ui32Offset,
IMG_UINT uCount,
IMG_UINT32 *paui32Values);
void trace_rogue_fence_checks(const char *cmd, const char *dm,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWContext,
IMG_UINT32 ui32Offset,
IMG_UINT uCount,
IMG_UINT32 *paui32Values);
void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWCtx,
IMG_UINT32 ui32ExtJobRef,
IMG_UINT32 ui32IntJobRef,
const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWCtx,
IMG_UINT32 ui32ExtJobRef,
IMG_UINT32 ui32IntJobRef,
const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWCtx,
IMG_UINT32 ui32ExtJobRef,
IMG_UINT32 ui32IntJobRef,
return false;
}
-static inline
-void trace_job_enqueue(IMG_UINT32 ui32FWContext,
- IMG_UINT32 ui32ExtJobRef,
- IMG_UINT32 ui32IntJobRef,
- const char *pszKickType)
-{
-}
-
#if defined(SUPPORT_RGX)
static inline
void trace_rogue_fence_updates(const char *cmd, const char *dm,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWContext,
IMG_UINT32 ui32Offset,
IMG_UINT uCount,
static inline
void trace_rogue_fence_checks(const char *cmd, const char *dm,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWContext,
IMG_UINT32 ui32Offset,
IMG_UINT uCount,
static inline
void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWCtx,
IMG_UINT32 ui32ExtJobRef,
IMG_UINT32 ui32IntJobRef,
static inline
void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWCtx,
IMG_UINT32 ui32ExtJobRef,
IMG_UINT32 ui32IntJobRef,
static inline
void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32GpuId,
IMG_UINT32 ui32FWCtx,
IMG_UINT32 ui32ExtJobRef,
IMG_UINT32 ui32IntJobRef,
IMG_HANDLE hClientTLStream;
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(SUPPORT_CUSTOM_OSID_EMISSION)
/*
* Connection-based values per application which can be modified by the
* AppHint settings 'OSid, OSidReg, bOSidAxiProtReg' for each application.
IMG_UINT32 ui32OSid;
IMG_UINT32 ui32OSidReg;
IMG_BOOL bOSidAxiProtReg;
-#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+#endif /* defined(SUPPORT_CUSTOM_OSID_EMISSION) */
#if defined(SUPPORT_DMA_TRANSFER)
IMG_BOOL bAcceptDmaRequests;
PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
void DebugCommonDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+typedef struct _IMG_FLAGS2DESC_
+{
+ IMG_UINT32 uiFlag;
+ const IMG_CHAR *pszLabel;
+} IMG_FLAGS2DESC;
+
+void DebugCommonFlagStrings(IMG_CHAR *psDesc,
+ IMG_UINT32 ui32DescSize,
+ const IMG_FLAGS2DESC *psConvTable,
+ IMG_UINT32 ui32TableSize,
+ IMG_UINT32 ui32Flags);
+
#endif /* DEBUG_COMMON_H */
IMG_CHAR *pszPDumpRegName;
/* Under Linux, this is the minor number of RenderNode corresponding to this Device */
- IMG_INT32 i32OsDeviceID;
+ IMG_INT32 i32KernelDeviceID;
/* Services layer enumeration of the device used in pvrdebug */
IMG_UINT32 ui32InternalID;
} PVRSRV_DEVICE_IDENTIFIER;
} DEVICE_MEMORY_INFO;
#define MMU_BAD_PHYS_ADDR (0xbadbad00badULL)
-#define DUMMY_PAGE ("DUMMY_PAGE")
-#define DEV_ZERO_PAGE ("DEV_ZERO_PAGE")
-#define PVR_DUMMY_PAGE_INIT_VALUE (0x0)
-#define PVR_ZERO_PAGE_INIT_VALUE (0x0)
typedef struct __DEFAULT_PAGE__
{
/*Page handle for the page allocated (UMA/LMA)*/
PG_HANDLE sPageHandle;
POS_LOCK psPgLock;
- ATOMIC_T atRefCounter;
/*Default page size in terms of log2 */
IMG_UINT32 ui32Log2PgSize;
IMG_UINT64 ui64PgPhysAddr;
typedef enum _PVRSRV_DEVICE_STATE_
{
PVRSRV_DEVICE_STATE_UNDEFINED = 0,
- PVRSRV_DEVICE_STATE_INIT,
+ PVRSRV_DEVICE_STATE_CREATING,
+ PVRSRV_DEVICE_STATE_CREATED,
PVRSRV_DEVICE_STATE_ACTIVE,
PVRSRV_DEVICE_STATE_DEINIT,
PVRSRV_DEVICE_STATE_BAD,
typedef struct DI_ENTRY DI_ENTRY;
#endif
+#if (RGX_NUM_DRIVERS_SUPPORTED > 1)
+#ifndef DI_VZ_DATA_DEFINED
+#define DI_VZ_DATA_DEFINED
+typedef struct DI_VZ_DATA DI_VZ_DATA;
+#endif
+#endif
+
typedef struct _PVRSRV_DEVICE_DEBUG_INFO_
{
DI_GROUP *psGroup;
DI_ENTRY *psRiscvDmiDIEntry;
IMG_UINT64 ui64RiscvDmi;
#endif
+ DI_ENTRY *psDevMemEntry;
+ IMG_HANDLE hGpuUtilUserDebugFS;
#endif /* SUPPORT_RGX */
#ifdef SUPPORT_VALIDATION
DI_ENTRY *psRGXRegsEntry;
#endif /* SUPPORT_VALIDATION */
-#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS
- DI_ENTRY *psPowMonEntry;
-#endif
#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS
DI_ENTRY *psPowerDataEntry;
#endif
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ DI_ENTRY *psPowerTimingStatsEntry;
+#endif
+#if (RGX_NUM_DRIVERS_SUPPORTED > 1)
+ DI_GROUP *psVZGroup;
+ DI_GROUP *apsVZDriverGroups[RGX_NUM_DRIVERS_SUPPORTED];
+ DI_ENTRY *apsVZDriverPriorityDIEntries[RGX_NUM_DRIVERS_SUPPORTED];
+ DI_ENTRY *apsVZDriverIsolationGroupDIEntries[RGX_NUM_DRIVERS_SUPPORTED];
+ DI_VZ_DATA *apsVZDriverData[RGX_NUM_DRIVERS_SUPPORTED];
+#endif
} PVRSRV_DEVICE_DEBUG_INFO;
#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
/* snapshot from the last LISR invocation */
#if defined(RGX_FW_IRQ_OS_COUNTERS)
- IMG_UINT32 aui32InterruptCountSnapshot[RGX_NUM_OS_SUPPORTED];
+ IMG_UINT32 aui32InterruptCountSnapshot[RGX_NUM_DRIVERS_SUPPORTED];
#else
IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_THREAD_NUM];
#endif
IMG_UINT64 ui64Clockns;
} LISR_EXECUTION_INFO;
-#define UPDATE_LISR_DBG_STATUS(status) psDeviceNode->sLISRExecutionInfo.ui32Status = (status)
+#define UPDATE_LISR_DBG_STATUS(status) \
+ do { \
+ psDeviceNode->sLISRExecutionInfo.ui32Status = (status); \
+ if ((status > RGX_LISR_INIT) && (status < RGX_LISR_PROCESSED)) \
+ { \
+ PVR_DPF((PVR_DBG_ERROR, "%s: IRQ %llu rejected: %s", __func__, psDeviceNode->ui64nLISR, #status)); \
+ } \
+ } while (0)
+
#define UPDATE_LISR_DBG_SNAPSHOT(idx, val) psDeviceNode->sLISRExecutionInfo.aui32InterruptCountSnapshot[idx] = (val)
#define UPDATE_LISR_DBG_TIMESTAMP() psDeviceNode->sLISRExecutionInfo.ui64Clockns = OSClockns64()
#define UPDATE_LISR_DBG_COUNTER() psDeviceNode->ui64nLISR++
MMU_DEVICEATTRIBS *psMMUDevAttrs;
/* Device specific MMU firmware attributes, used only in some devices */
MMU_DEVICEATTRIBS *psFirmwareMMUDevAttrs;
-
+ /* Physical Heap where MMU PT pages are allocated from, normally the
+ * system's default physical heap but can be different for AutoVz driver. */
PHYS_HEAP *psMMUPhysHeap;
/* lock for power state transitions */
callbacks the device must support:
*/
- PVRSRV_ERROR (*pfnDevSLCFlushRange)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
- MMU_CONTEXT *psMMUContext,
- IMG_DEV_VIRTADDR sDevVAddr,
- IMG_DEVMEM_SIZE_T uiSize,
- IMG_BOOL bInvalidate);
-
PVRSRV_ERROR (*pfnInvalFBSCTable)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
MMU_CONTEXT *psMMUContext,
IMG_UINT64 ui64FBSCEntries);
IMG_BOOL (*pfnHasFBCDCVersion31)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+ IMG_UINT32 (*pfnGetTFBCLossyGroup)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
MMU_DEVICEATTRIBS* (*pfnGetMMUDeviceAttributes)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_BOOL bKernelMemoryCtx);
PVRSRV_DEVICE_CONFIG *psDevConfig;
/* initialise device-specific physheaps */
PVRSRV_ERROR (*pfnPhysMemDeviceHeapsInit) (struct _PVRSRV_DEVICE_NODE_ *);
+ /* determining the appropriate LMA allocation policy */
+ PHYS_HEAP_POLICY (*pfnPhysHeapGetLMAPolicy) (PHYS_HEAP_USAGE_FLAGS);
+
/* initialise fw mmu, if FW not using GPU mmu, NULL otherwise. */
PVRSRV_ERROR (*pfnFwMMUInit) (struct _PVRSRV_DEVICE_NODE_ *);
+ /* Check device's FW Main physheap free memory */
+ PVRSRV_ERROR (*pfnCheckForSufficientFWPhysMem) (struct _PVRSRV_DEVICE_NODE_ *);
+
/* information about the device's address space and heaps */
DEVICE_MEMORY_INFO sDevMemoryInfo;
void *pvDevice;
#if defined(SUPPORT_GPUVIRT_VALIDATION)
- RA_ARENA *psOSSharedArena;
+ RA_ARENA *psOSSharedArena;
RA_ARENA *psOSidSubArena[GPUVIRT_VALIDATION_NUM_OS];
#endif
- /* FW_MAIN, FW_CONFIG and FW_GUEST heaps. Should be part of registered heaps? */
- PHYS_HEAP *psFWMainPhysHeap;
- PHYS_HEAP *psFWCfgPhysHeap;
- PHYS_HEAP *apsFWPremapPhysHeap[RGX_NUM_OS_SUPPORTED];
-
- IMG_UINT32 ui32RegisteredPhysHeaps;
- PHYS_HEAP **papsRegisteredPhysHeaps;
-
- /* PHYS_HEAP Mapping table to the platform's physical memory heap(s)
- * used by this device. The physical heaps are created based on
- * the PHYS_HEAP_CONFIG data from the platform's system layer at device
- * creation time.
- *
- * Contains PVRSRV_PHYS_HEAP_LAST entries for all the possible physical heaps allowed in the design.
- * It allows the system layer PhysHeaps for the device to be identified for use in creating new PMRs.
- * See PhysHeapCreatePMR()
+ /* When virtualisation support is enabled the Firmware heaps of virtualised
+ * drivers can be entirely premapped into the Fw's VA space, during init
+ * or during runtime on explicit request from Guest drivers. */
+ PHYS_HEAP *apsFWPremapPhysHeap[RGX_NUM_DRIVERS_SUPPORTED];
+
+ /* Head of the physical heap list. Tracks PhysHeap objects created from
+ * the PHYS_HEAP_CONFIG definitions supplied by the system layer at
+ * device creation time. There could be 1 or more and varies from system
+ * to system.
+ */
+ struct _PHYS_HEAP_ *psPhysHeapList;
+ POS_LOCK hPhysHeapLock;
+
+ /* The apsPhysHeap array is a mapping table to the system's, often fewer,
+ * physical memory heaps defined for this device. It contains
+ * PVRSRV_PHYS_HEAP_LAST entries, one for each possible physical
+ * heaps allowed in the design. Each PhysHeap in the design is acquired
+ * and stored in the mapping table during device create. Fall-back logic
+ * is employed to ensure a valid heap is always found from the set defined
+ * in the system layer for the device. Responsibility for this is shared
+ * between the common layer (PhysHeapInitDeviceHeaps) and sub-device
+ * layer (pfnPhysMemDeviceHeapsInit).
+ * It is used in the PhysMem module to create PMRs from a given PhysHeap
+ * of memory. See PhysHeapCreatePMR()
*/
PHYS_HEAP *apsPhysHeap[PVRSRV_PHYS_HEAP_LAST];
IMG_UINT32 ui32UserAllocHeapCount;
-#if defined(SUPPORT_AUTOVZ)
- /* Phys Heap reserved for storing the MMU mappings of firmware.
- * The memory backing up this Phys Heap must persist between driver or OS reboots */
- PHYS_HEAP *psFwMMUReservedPhysHeap;
-#endif
-
/* Flag indicating if the firmware has been initialised during the
* 1st boot of the Host driver according to the AutoVz life-cycle. */
IMG_BOOL bAutoVzFwIsUp;
+ /* Flags indicating VM state and if PVZ calls originating from it are valid */
+ IMG_UINT32 ui32VmState;
+
struct _PVRSRV_DEVICE_NODE_ *psNext;
struct _PVRSRV_DEVICE_NODE_ **ppsThis;
#endif
PVRSRV_DEVICE_DEBUG_INFO sDebugInfo;
+ IMG_BOOL bEnablePFDebug; /*!< EnablePageFaultDebug AppHint setting for device */
+
+ IMG_UINT32 ui32RGXLog2Non4KPgSize; /* Page size of Non4k heap in log2 form */
} PVRSRV_DEVICE_NODE;
/*
struct _PVRSRV_DEVICE_NODE_;
struct _CONNECTION_DATA_;
-
+struct _DEVMEMINT_HEAP_;
+
+/*************************************************************************/ /*!
+@Function Callback function PFN_HEAP_INIT
+@Description Device heap initialisation function. Called in server devmem
+ heap create if the callback pointer in RGX_HEAP_INFO is
+ not NULL.
+@Input psDeviceNode The device node.
+@Input psDevmemHeap Server internal devmem heap.
+@Output phPrivData Private data handle. Allocated resources
+ can be freed in PFN_HEAP_DEINIT.
+@Return PVRSRV_ERROR PVRSRV_OK or error code
+*/ /**************************************************************************/
+typedef PVRSRV_ERROR (*PFN_HEAP_INIT)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ struct _DEVMEMINT_HEAP_ *psDevmemHeap,
+ IMG_HANDLE *phPrivData);
+
+/*************************************************************************/ /*!
+@Function Callback function PFN_HEAP_DEINIT
+@Description Device heap deinit function. Called in server devmem
+ heap create if the callback pointer in RGX_HEAP_INFO is
+ not NULL.
+@Input hPrivData Private data handle. To free any resources.
+*/ /**************************************************************************/
+typedef void (*PFN_HEAP_DEINIT)(IMG_HANDLE hPrivData);
/*
A "heap config" is a blueprint to be used for initial setting up of heaps
aligned to at least this value */
IMG_UINT32 uiLog2ImportAlignment;
+ /* Callback function for device specific heap init. */
+ PFN_HEAP_INIT pfnInit;
+
+ /* Callback function for device specific heap deinit. */
+ PFN_HEAP_DEINIT pfnDeInit;
+
} DEVMEM_HEAP_BLUEPRINT;
void HeapCfgBlueprintInit(const IMG_CHAR *pszName,
IMG_DEVMEM_SIZE_T uiReservedRegionLength,
IMG_UINT32 ui32Log2DataPageSize,
IMG_UINT32 uiLog2ImportAlignment,
+ PFN_HEAP_INIT pfnInit,
+ PFN_HEAP_DEINIT pfnDeInit,
DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint);
/* Entire named heap config */
IMG_UINT32 *puiLog2ImportAlignmentOut
);
+PVRSRV_ERROR
+HeapCfgGetCallbacks(const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapIndex,
+ PFN_HEAP_INIT *ppfnInit,
+ PFN_HEAP_DEINIT *ppfnDeinit);
+
#endif
void DevicememHistoryDeInitKM(void);
+PVRSRV_ERROR DevicememHistoryDeviceInit(PVRSRV_DEVICE_NODE *psDevNode);
+PVRSRV_ERROR DevicememHistoryDeviceCreate(PVRSRV_DEVICE_NODE *psDevNode);
+void DevicememHistoryDeviceDestroy(PVRSRV_DEVICE_NODE *psDevNode);
+
PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR,
IMG_UINT32 ui32Offset,
IMG_DEV_VIRTADDR sDevVAddr,
{
IMG_PID uiPID;
IMG_DEV_VIRTADDR sDevVAddr;
+ PVRSRV_DEVICE_NODE *psDevNode;
} DEVICEMEM_HISTORY_QUERY_IN;
/* Store up to 4 results for a lookup. In the case of the faulting page being
typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_
{
IMG_UINT32 ui32NumResults;
+ IMG_UINT64 ui64SearchCount;
/* result 0 is the newest */
DEVICEMEM_HISTORY_QUERY_OUT_RESULT sResults[DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS];
} DEVICEMEM_HISTORY_QUERY_OUT;
+void DevicememHistoryDumpRecordStats(PVRSRV_DEVICE_NODE *psDevNode,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
IMG_BOOL
DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn,
DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION;
typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING;
+typedef struct _DEVMEMXINT_RESERVATION_ DEVMEMXINT_RESERVATION;
typedef struct _DEVMEMINT_PF_NOTIFY_ DEVMEMINT_PF_NOTIFY;
-
-/*************************************************************************/ /*!
-@Function DevmemIntUnpin
-@Description This is the counterpart to DevmemPin(). It is meant to be
- called when the allocation is NOT mapped in the device virtual
- space.
-
-@Input psPMR The physical memory to unpin.
-
-@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is
- registered to be reclaimed. Error otherwise.
-*/ /**************************************************************************/
-PVRSRV_ERROR DevmemIntUnpin(PMR *psPMR);
-
-/*************************************************************************/ /*!
-@Function DevmemIntUnpinInvalidate
-@Description This is the counterpart to DevmemIntPinValidate(). It is meant
- to be called for allocations that ARE mapped in the device
- virtual space and we have to invalidate the mapping.
-
-@Input psPMR The physical memory to unpin.
-
-@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is
- registered to be reclaimed. Error otherwise.
-*/ /**************************************************************************/
-PVRSRV_ERROR
-DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR);
-
-/*************************************************************************/ /*!
-@Function DevmemIntPin
-@Description This is the counterpart to DevmemIntUnpin().
- Is meant to be called if there is NO device mapping present.
-
-@Input psPMR The physical memory to pin.
-
-@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content
- was successfully restored.
-
- PVRSRV_ERROR_PMR_NEW_MEMORY when the content
- could not be restored and new physical memory
- was allocated.
-
- A different error otherwise.
-*/ /**************************************************************************/
-PVRSRV_ERROR DevmemIntPin(PMR *psPMR);
-
-/*************************************************************************/ /*!
-@Function DevmemIntPinValidate
-@Description This is the counterpart to DevmemIntUnpinInvalidate().
- Is meant to be called if there is IS a device mapping present
- that needs to be taken care of.
-
-@Input psDevmemMapping The mapping structure used for the passed PMR.
-
-@Input psPMR The physical memory to pin.
-
-@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content
- was successfully restored.
-
- PVRSRV_ERROR_PMR_NEW_MEMORY when the content
- could not be restored and new physical memory
- was allocated.
-
- A different error otherwise.
-*/ /**************************************************************************/
-PVRSRV_ERROR
-DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR);
/*
* DevmemServerGetImportHandle()
*
DevmemServerGetPrivData(DEVMEMINT_CTX *psDevmemCtx,
IMG_HANDLE *phPrivData);
-/*
- * DevmemIntAllocDefBackingPage
- *
- * This function allocates default backing page and initializes it
- * with a given default value
- *
- */
-PVRSRV_ERROR DevmemIntAllocDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode,
- PVRSRV_DEF_PAGE *psDefPage,
- IMG_INT uiInitValue,
- IMG_CHAR *pcDefPageName,
- IMG_BOOL bInitPage);
-/*
- * DevmemIntFreeDefBackingPage
- *
- * Frees a given page
- */
-void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode,
- PVRSRV_DEF_PAGE *psDefPage,
- IMG_CHAR *pcDefPageName);
-
-
/*
* DevmemIntCtxCreate()
*
*/
PVRSRV_ERROR
DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapIndex,
IMG_DEV_VIRTADDR sHeapBaseAddr,
- IMG_DEVMEM_SIZE_T uiHeapLength,
IMG_UINT32 uiLog2DataPageSize,
DEVMEMINT_HEAP **ppsDevmemHeapPtr);
/*
PVRSRV_ERROR
DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap);
+/* DevmemIntHeapGetBaseAddr()
+ *
+ * Get heap base address pre carveouts.
+ */
+IMG_DEV_VIRTADDR
+DevmemIntHeapGetBaseAddr(DEVMEMINT_HEAP *psDevmemHeap);
+
/*
* DevmemIntMapPMR()
*
PVRSRV_ERROR
DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation);
+/*************************************************************************/ /*!
+ * @Function DevmemXIntReserveRange()
+ * @Description Indicates that the specified range should be reserved from the
+ * given heap.
+ *
+ * In turn causes the page tables to be allocated to cover the
+ * specified range.
+ *
+ * If you call DevmemIntReserveRange() (and the call succeeds)
+ * then you are promising that you shall later call
+ * DevmemIntUnreserveRange().
+ *
+ * @Input psDevmemHeap Pointer to the heap the reservation is made
+ * on
+ * @Input sAllocationDevVAddr Virtual address of the reservation
+ * @Input uiAllocationSize Size of the reservation (in bytes)
+ * @Input ppsRsrv Return pointer to the reservation object
+ *
+ * @Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemXIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+ IMG_DEV_VIRTADDR sAllocationDevVAddr,
+ IMG_DEVMEM_SIZE_T uiAllocationSize,
+ DEVMEMXINT_RESERVATION **ppsRsrv);
+
+/*************************************************************************/ /*!
+ * @Function DevmemXIntUnreserveRange()
+ * @Description Undoes the state change caused by DevmemXIntReserveRage()
+ *
+ * @Input psRsrv Reservation handle for the range
+ *
+ * @Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemXIntUnreserveRange(DEVMEMXINT_RESERVATION *psRsrv);
+
+/*************************************************************************/ /*!
+ * @Function DevmemXIntMapPages()
+ * @Description Maps an arbitrary amount of pages from a PMR to a reserved range
+ * and takes references on the PMR.
+ *
+ * @Input psRsrv Reservation handle for the range
+ * @Input psPMR PMR that is mapped
+ * @Input uiPageCount Number of consecutive pages that are
+ * mapped
+ * @Input uiPhysPageOffset Logical offset in the PMR (measured in pages)
+ * @Input uiFlags Mapping flags
+ * @Input uiVirtPageOffset Offset from the reservation base to start the
+ * mapping from (measured in pages)
+ *
+ * @Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemXIntMapPages(DEVMEMXINT_RESERVATION *psRsrv,
+ PMR *psPMR,
+ IMG_UINT32 uiPageCount,
+ IMG_UINT32 uiPhysPageOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 uiVirtPageOffset);
+
+/*************************************************************************/ /*!
+ * @Function DevmemXIntUnmapPages()
+ * @Description Unmaps an arbitrary amount of pages from a reserved range and
+ * releases references on associated PMRs.
+ *
+ * @Input psRsrv Reservation handle for the range
+ * @Input uiVirtPageOffset Offset from the reservation base to start the
+ * mapping from (measured in pages)
+ * @Input uiPageCount Number of consecutive pages that are
+ * unmapped
+ *
+ * @Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemXIntUnmapPages(DEVMEMXINT_RESERVATION *psRsrv,
+ IMG_UINT32 uiVirtPageOffset,
+ IMG_UINT32 uiPageCount);
+
/*************************************************************************/ /*!
@Function DevmemIntChangeSparse
@Description Changes the sparse allocations of a PMR by allocating and freeing
IMG_DEV_VIRTADDR sDevVAddrBase,
IMG_UINT64 sCpuVAddrBase);
-/*
- * DevmemIntFlushDevSLCRange()
- *
- * Flush specified device context's virtual address range from SLC.
- */
-PVRSRV_ERROR
-DevmemIntFlushDevSLCRange(DEVMEMINT_CTX *psDevmemCtx,
- IMG_DEV_VIRTADDR sDevVAddr,
- IMG_DEVMEM_SIZE_T uiSize,
- IMG_BOOL bInvalidate);
-
/*
* DevmemIntRGXInvalidateFBSCTable()
*
@Description Registers a PID to be notified when a page fault occurs on a
specific device memory context.
@Input psDevmemCtx The context to be notified about.
-@Input ui32PID The PID of the process that would like to be
- notified.
@Input bRegister If true, register. If false, de-register.
@Return PVRSRV_ERROR
*/ /**************************************************************************/
PVRSRV_ERROR
DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx,
- IMG_INT32 ui32PID,
IMG_BOOL bRegister);
/*************************************************************************/ /*!
* the given virtual address.
*/
PVRSRV_ERROR
-DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+DevmemIntPDumpSaveToFileVirtual(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEMINT_CTX *psDevmemCtx,
IMG_DEV_VIRTADDR sDevAddrStart,
IMG_DEVMEM_SIZE_T uiSize,
IMG_UINT32 uiArraySize,
#pragma inline(DevmemIntPDumpSaveToFileVirtual)
#endif
static INLINE PVRSRV_ERROR
-DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+DevmemIntPDumpSaveToFileVirtual(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEMINT_CTX *psDevmemCtx,
IMG_DEV_VIRTADDR sDevAddrStart,
IMG_DEVMEM_SIZE_T uiSize,
IMG_UINT32 uiArraySize,
IMG_UINT32 ui32FileOffset,
IMG_UINT32 ui32PDumpFlags)
{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
PVR_UNREFERENCED_PARAMETER(psDevmemCtx);
PVR_UNREFERENCED_PARAMETER(sDevAddrStart);
PVR_UNREFERENCED_PARAMETER(uiSize);
IMG_UINT64 *puiAddress,
IMG_DEVMEM_OFFSET_T *puiOffset,
IMG_DEVMEM_SIZE_T *puiSize,
- IMG_BOOL bMemToDev,
+ IMG_UINT32 uiFlags,
PVRSRV_TIMELINE iUpdateTimeline);
PVRSRV_ERROR PVRSRVInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode);
HANDLETYPE(DEVMEMINT_CTX_EXPORT)
HANDLETYPE(DEVMEMINT_HEAP)
HANDLETYPE(DEVMEMINT_RESERVATION)
+HANDLETYPE(DEVMEMXINT_RESERVATION)
HANDLETYPE(DEVMEMINT_MAPPING)
HANDLETYPE(RGX_FW_MEMDESC)
HANDLETYPE(RGX_FREELIST)
#include "img_types.h"
#include "pvrsrv_error.h"
#include "pvrsrv.h"
-#include "htbuffer.h"
+
+#define HTBLOGK(SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple(SF, ## args); } while (0)
+
+/* macros to cast 64 or 32-bit pointers into 32-bit integer components for Host Trace */
+#define HTBLOG_PTR_BITS_HIGH(p) ((IMG_UINT32)((((IMG_UINT64)((uintptr_t)p))>>32)&0xffffffff))
+#define HTBLOG_PTR_BITS_LOW(p) ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff))
+
+/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */
+#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff))
+#define HTBLOG_U64_BITS_LOW(u) ((IMG_UINT32)(u&0xffffffff))
+
+/* Host Trace Buffer name */
+#define HTB_STREAM_NAME "PVRHTBuffer"
/************************************************************************/ /*!
@Function HTBInit
PVRSRV_ERROR
HTBDeInit(void);
-/*************************************************************************/ /*!
- @Function HTBConfigureKM
- @Description Configure or update the configuration of the Host Trace Buffer
-
- @Input ui32NameSize Size of the pszName string
-
- @Input pszName Name to use for the underlying data buffer
-
- @Input ui32BufferSize Size of the underlying data buffer
-
- @Return eError Internal services call returned eError error
- number
-*/ /**************************************************************************/
-PVRSRV_ERROR
-HTBConfigureKM(IMG_UINT32 ui32NameSize, const IMG_CHAR * pszName,
- const IMG_UINT32 ui32BufferSize);
-
-
/*************************************************************************/ /*!
@Function HTBControlKM
@Description Update the configuration of the Host Trace Buffer
const IMG_UINT64 ui64CRTS, const IMG_UINT32 ui32CalcClkSpd);
/*************************************************************************/ /*!
- @Function HTBLogKM
- @Description Record a Host Trace Buffer log event
-
- @Input PID The PID of the process the event is associated
- with. This is provided as an argument rather
- than querying internally so that events associated
- with a particular process, but performed by
- another can be logged correctly.
-
- @Input TID The TID of the process the event is associated with.
-
- @Input ui64TimeStamp The timestamp to be associated with this log event
+ @Function HTBLogSimple
+ @Description Record a Host Trace Buffer log event with implicit PID and Timestamp
@Input SF The log event ID
@Return PVRSRV_OK Success.
*/ /**************************************************************************/
-PVRSRV_ERROR
-HTBLogKM(IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStamp, HTB_LOG_SFids SF,
- IMG_UINT32 ui32NumArgs, IMG_UINT32 *aui32Args);
+IMG_INTERNAL PVRSRV_ERROR
+HTBLogSimple(IMG_UINT32 SF, ...);
+
+/* DEBUG log group enable */
+#if !defined(HTB_DEBUG_LOG_GROUP)
+#undef HTB_LOG_TYPE_DBG /* No trace statements in this log group should be checked in */
+#define HTB_LOG_TYPE_DBG __BUILDERROR__
+#endif
/*************************************************************************/ /*!
@Function HTBIsConfigured
IMG_UINT32 uiMaxRefCount;
} MMU_PAGESIZECONFIG;
+/*************************************************************************/ /*!
+@Function MMU_InitDevice
+
+@Description Creates MMU device specific resources.
+
+@Input psDevNode Device node of the device to create the
+ MMU context for
+
+@Return PVRSRV_OK if the initialisation process was successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_InitDevice(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+/*************************************************************************/ /*!
+@Function MMU_DeInitDevice
+
+@Description Clean-up MMU device specific resources.
+
+@Input psDevNode Device node of the device
+
+@Return None
+*/
+/*****************************************************************************/
+void MMU_DeInitDevice(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
/*************************************************************************/ /*!
@Function MMU_ContextCreate
IMG_UINT32 ui32PageCount,
IMG_UINT32 uiLog2PageSize);
-/*************************************************************************/ /*!
-@Function MMU_ChangeValidity
-
-@Description Sets or unsets the valid bit of page table entries for a given
- address range.
-
-@Input psMMUContext MMU context to operate on
-
-@Input sDevVAddr The device virtual base address of
- the range we want to modify
-
-@Input uiSizeBytes The size of the range in bytes
-
-@Input uiLog2PageSize Log2 of the used page size
-
-@Input bMakeValid Choose to set or unset the valid bit.
- (bMakeValid == IMG_TRUE ) -> SET
- (bMakeValid == IMG_FALSE) -> UNSET
-
-@Input psPMR The PMR backing the allocation.
- Needed in case we have sparse memory
- where we have to check whether a physical
- address actually backs the virtual.
-
-@Return PVRSRV_OK if successful
-*/
-/*****************************************************************************/
-PVRSRV_ERROR
-MMU_ChangeValidity(MMU_CONTEXT *psMMUContext,
- IMG_DEV_VIRTADDR sDevVAddr,
- IMG_DEVMEM_SIZE_T uiSizeBytes,
- IMG_UINT32 uiLog2PageSize,
- IMG_BOOL bMakeValid,
- PMR *psPMR);
-
/*************************************************************************/ /*!
@Function MMU_AcquireBaseAddr
void
MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext);
-#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#if defined(SUPPORT_CUSTOM_OSID_EMISSION)
/***********************************************************************************/ /*!
@Function MMU_SetOSid
#ifndef OSDI_IMPL_H
#define OSDI_IMPL_H
-#include <linux/stdarg.h>
+#if defined(__linux__)
+ #include <linux/version.h>
+
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ #include <linux/stdarg.h>
+ #else
+ #include <stdarg.h>
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+#else
+ #include <stdarg.h>
+#endif /* __linux__ */
#include "di_common.h"
#include "pvrsrv_error.h"
#endif
#endif
-#include <linux/stdarg.h>
+#if defined(__linux__) && defined(__KERNEL__)
+ #include <linux/version.h>
+
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+ #include <linux/stdarg.h>
+ #else
+ #include <stdarg.h>
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */
+#else
+ #include <stdarg.h>
+#endif /* __linux__ */
#if defined(__QNXNTO__)
#include <stdio.h>
*/ /**************************************************************************/
PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread);
+/*************************************************************************/ /*!
+@Function OSIsMapPhysNonContigSupported
+@Description Determine if the kernel mapping of physically non-contiguous
+ pages is supported in the OS layer
+
+ Note: For this function to return IMG_TRUE a full implementation
+ of the following functions is required:
+ OSMapPhysArrayToLin
+ OSUnMapPhysArrayToLin
+
+@Return IMG_BOOL
+*/ /**************************************************************************/
+IMG_BOOL OSIsMapPhysNonContigSupported(void);
+
+/*************************************************************************/ /*!
+@Function OSUnMapPhysArrayToLin
+@Description UnMap a kernel virtual address that was produced by mapping
+ a number of Pages in OSMapPhysArrayToLin.
+
+ Note: This function is only required if the Non contiguous
+ allocation feature is required, in this case
+ OSIsMapPhysNonContigSupported should return IMG_TRUE.
+ If not required this function should return
+ PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED
+
+@Input pvLinAddr The linear mapping to be unmapped
+@Input pvPrivData Optional implementation specific data.
+
+@Return None
+*/ /**************************************************************************/
+void OSUnMapPhysArrayToLin(void *pvLinAddr, void *pvPrivData);
+
+/*************************************************************************/ /*!
+@Function OSMapPhysArrayToLin
+@Description Given an array of OS page physical addresses and a count
+ of said Pages, this function will map those pages into a
+ virtually contiguous range, this allows for non physically
+ contiguous allocations to be mapped into the kernel.
+ Page size is assumed to be OS page size.
+
+ Note: This function is only required if the Non contiguous
+ allocation feature is required, in this case
+ OSIsMapPhysNonContigSupported should return IMG_TRUE.
+ If not required this function should return
+ PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED
+
+@Input pPagePA Array of Pages
+@Input uiPageCount Page count of pulPages
+@Output ppvLinAddr Pointer to a virtual kernel address of the
+ mapped Pages.
+@Output ppvPrivData Optional implementation specific data.
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSMapPhysArrayToLin(IMG_CPU_PHYADDR pPagePA[],
+ IMG_UINT32 uiPageCount,
+ void **ppvLinAddr,
+ void **ppvPrivData);
+
/*************************************************************************/ /*!
@Function OSMapPhysToLin
@Description Maps physical memory into a linear address range.
*/ /**************************************************************************/
void OSDeInitEnvData(void);
-/*************************************************************************/ /*!
-@Function OSVSScanf
-@Description OS function to support the standard C vsscanf() function.
-*/ /**************************************************************************/
-IMG_UINT32 OSVSScanf(const IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...);
-
/*************************************************************************/ /*!
@Function OSStringLCat
@Description OS function to support the BSD C strlcat() function.
OSWriteMemoryBarrier(addr); \
} while (0)
-#if defined(__linux__) && defined(__KERNEL__) && !defined(NO_HARDWARE)
- #define OSReadHWReg8(addr, off) ((IMG_UINT8)readb((IMG_BYTE __iomem *)(addr) + (off)))
- #define OSReadHWReg16(addr, off) ((IMG_UINT16)readw((IMG_BYTE __iomem *)(addr) + (off)))
- #define OSReadHWReg32(addr, off) ((IMG_UINT32)readl((IMG_BYTE __iomem *)(addr) + (off)))
+#if defined(NO_HARDWARE)
+ /* OSReadHWReg operations skipped in no hardware builds */
+ #define OSReadUncheckedHWReg8(addr, off) ((void)(addr), 0x4eU)
+ #define OSReadUncheckedHWReg16(addr, off) ((void)(addr), 0x3a4eU)
+ #define OSReadUncheckedHWReg32(addr, off) ((void)(addr), 0x30f73a4eU)
+#if defined(__QNXNTO__) && __SIZEOF_LONG__ == 8
+ /* This is needed for 64-bit QNX builds where the size of a long is 64 bits */
+ #define OSReadUncheckedHWReg64(addr, off) ((void)(addr), 0x5b376c9d30f73a4eUL)
+#else
+ #define OSReadUncheckedHWReg64(addr, off) ((void)(addr), 0x5b376c9d30f73a4eULL)
+#endif
+
+ #define OSWriteUncheckedHWReg8(addr, off, val)
+ #define OSWriteUncheckedHWReg16(addr, off, val)
+ #define OSWriteUncheckedHWReg32(addr, off, val)
+ #define OSWriteUncheckedHWReg64(addr, off, val) ((void)(val))
+
+ #define OSReadHWReg8(addr, off) OSReadUncheckedHWReg8(addr, off)
+ #define OSReadHWReg16(addr, off) OSReadUncheckedHWReg16(addr, off)
+ #define OSReadHWReg32(addr, off) OSReadUncheckedHWReg32(addr, off)
+ #define OSReadHWReg64(addr, off) OSReadUncheckedHWReg64(addr, off)
+
+ #define OSWriteHWReg8(addr, off, val) OSWriteUncheckedHWReg8(addr, off, val)
+ #define OSWriteHWReg16(addr, off, val) OSWriteUncheckedHWReg16(addr, off, val)
+ #define OSWriteHWReg32(addr, off, val) OSWriteUncheckedHWReg32(addr, off, val)
+ #define OSWriteHWReg64(addr, off, val) OSWriteUncheckedHWReg64(addr, off, val)
+
+#else
+
+#if defined(__linux__) && defined(__KERNEL__)
+ #define OSReadUncheckedHWReg8(addr, off) ((IMG_UINT8)readb((IMG_BYTE __iomem *)(addr) + (off)))
+ #define OSReadUncheckedHWReg16(addr, off) ((IMG_UINT16)readw((IMG_BYTE __iomem *)(addr) + (off)))
+ #define OSReadUncheckedHWReg32(addr, off) ((IMG_UINT32)readl((IMG_BYTE __iomem *)(addr) + (off)))
/* Little endian support only */
- #define OSReadHWReg64(addr, off) \
+ #define OSReadUncheckedHWReg64(addr, off) \
({ \
__typeof__(addr) _addr = addr; \
__typeof__(off) _off = off; \
); \
})
- #define OSWriteHWReg8(addr, off, val) writeb((IMG_UINT8)(val), (IMG_BYTE __iomem *)(addr) + (off))
- #define OSWriteHWReg16(addr, off, val) writew((IMG_UINT16)(val), (IMG_BYTE __iomem *)(addr) + (off))
- #define OSWriteHWReg32(addr, off, val) writel((IMG_UINT32)(val), (IMG_BYTE __iomem *)(addr) + (off))
+ #define OSWriteUncheckedHWReg8(addr, off, val) writeb((IMG_UINT8)(val), (IMG_BYTE __iomem *)(addr) + (off))
+ #define OSWriteUncheckedHWReg16(addr, off, val) writew((IMG_UINT16)(val), (IMG_BYTE __iomem *)(addr) + (off))
+ #define OSWriteUncheckedHWReg32(addr, off, val) writel((IMG_UINT32)(val), (IMG_BYTE __iomem *)(addr) + (off))
/* Little endian support only */
- #define OSWriteHWReg64(addr, off, val) do \
+ #define OSWriteUncheckedHWReg64(addr, off, val) do \
{ \
__typeof__(addr) _addr = addr; \
__typeof__(off) _off = off; \
writel((IMG_UINT32)(((IMG_UINT64)(_val) >> 32) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off) + 4); \
} while (0)
-
-#elif defined(NO_HARDWARE)
- /* OSReadHWReg operations skipped in no hardware builds */
- #define OSReadHWReg8(addr, off) ((void)(addr), 0x4eU)
- #define OSReadHWReg16(addr, off) ((void)(addr), 0x3a4eU)
- #define OSReadHWReg32(addr, off) ((void)(addr), 0x30f73a4eU)
-#if defined(__QNXNTO__) && __SIZEOF_LONG__ == 8
- /* This is needed for 64-bit QNX builds where the size of a long is 64 bits */
- #define OSReadHWReg64(addr, off) ((void)(addr), 0x5b376c9d30f73a4eUL)
-#else
- #define OSReadHWReg64(addr, off) ((void)(addr), 0x5b376c9d30f73a4eULL)
-#endif
-
- #define OSWriteHWReg8(addr, off, val)
- #define OSWriteHWReg16(addr, off, val)
- #define OSWriteHWReg32(addr, off, val)
- #define OSWriteHWReg64(addr, off, val)
-
-#else
+#else /* defined(__linux__) && defined(__KERNEL__) */
/*************************************************************************/ /*!
-@Function OSReadHWReg8
+@Function OSReadUncheckedHWReg8
@Description Read from an 8-bit memory-mapped device register.
The implementation should not permit the compiler to
reorder the I/O sequence.
the register to be read.
@Return The byte read.
*/ /**************************************************************************/
- IMG_UINT8 OSReadHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+ IMG_UINT8 OSReadUncheckedHWReg8(volatile void *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset);
/*************************************************************************/ /*!
-@Function OSReadHWReg16
+@Function OSReadUncheckedHWReg16
@Description Read from a 16-bit memory-mapped device register.
The implementation should not permit the compiler to
reorder the I/O sequence.
the register to be read.
@Return The word read.
*/ /**************************************************************************/
- IMG_UINT16 OSReadHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+ IMG_UINT16 OSReadUncheckedHWReg16(volatile void *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset);
/*************************************************************************/ /*!
-@Function OSReadHWReg32
+@Function OSReadUncheckedHWReg32
@Description Read from a 32-bit memory-mapped device register.
The implementation should not permit the compiler to
reorder the I/O sequence.
the register to be read.
@Return The long word read.
*/ /**************************************************************************/
- IMG_UINT32 OSReadHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+ IMG_UINT32 OSReadUncheckedHWReg32(volatile void *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset);
/*************************************************************************/ /*!
-@Function OSReadHWReg64
+@Function OSReadUncheckedHWReg64
@Description Read from a 64-bit memory-mapped device register.
The implementation should not permit the compiler to
reorder the I/O sequence.
the register to be read.
@Return The long long word read.
*/ /**************************************************************************/
- IMG_UINT64 OSReadHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+ IMG_UINT64 OSReadUncheckedHWReg64(volatile void *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset);
/*************************************************************************/ /*!
-@Function OSWriteHWReg8
+@Function OSWriteUncheckedHWReg8
@Description Write to an 8-bit memory-mapped device register.
The implementation should not permit the compiler to
reorder the I/O sequence.
@Input ui8Value The byte to be written to the register.
@Return None.
*/ /**************************************************************************/
- void OSWriteHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 ui8Value);
+ void OSWriteUncheckedHWReg8(volatile void *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset, IMG_UINT8 ui8Value);
/*************************************************************************/ /*!
-@Function OSWriteHWReg16
+@Function OSWriteUncheckedHWReg16
@Description Write to a 16-bit memory-mapped device register.
The implementation should not permit the compiler to
reorder the I/O sequence.
@Input ui16Value The word to be written to the register.
@Return None.
*/ /**************************************************************************/
- void OSWriteHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT16 ui16Value);
+ void OSWriteUncheckedHWReg16(volatile void *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT16 ui16Value);
/*************************************************************************/ /*!
-@Function OSWriteHWReg32
+@Function OSWriteUncheckedHWReg32
@Description Write to a 32-bit memory-mapped device register.
The implementation should not permit the compiler to
reorder the I/O sequence.
@Input ui32Value The long word to be written to the register.
@Return None.
*/ /**************************************************************************/
- void OSWriteHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+ void OSWriteUncheckedHWReg32(volatile void *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value);
/*************************************************************************/ /*!
-@Function OSWriteHWReg64
+@Function OSWriteUncheckedHWReg64
@Description Write to a 64-bit memory-mapped device register.
The implementation should not permit the compiler to
reorder the I/O sequence.
register.
@Return None.
*/ /**************************************************************************/
- void OSWriteHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value);
-#endif
+ void OSWriteUncheckedHWReg64(volatile void *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT64 ui64Value);
+
+#endif /* defined(__linux__) && defined(__KERNEL__) */
+
+#if !defined(DOXYGEN)
+ /* systems using real hardware must check that regular register
+ * operations don't attempt to access secure registers */
+ static INLINE IMG_UINT8 OSReadHWReg8(volatile void __iomem *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset)
+ {
+ PVR_ASSERT((ui32Offset) < RGX_HOST_SECURE_REGBANK_OFFSET);
+ return OSReadUncheckedHWReg8(pvLinRegBaseAddr, ui32Offset);
+ }
+
+ static INLINE IMG_UINT16 OSReadHWReg16(volatile void __iomem *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset)
+ {
+ PVR_ASSERT((ui32Offset) < RGX_HOST_SECURE_REGBANK_OFFSET);
+ return OSReadUncheckedHWReg16(pvLinRegBaseAddr, ui32Offset);
+ }
+
+ static INLINE IMG_UINT32 OSReadHWReg32(volatile void __iomem *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset)
+ {
+ PVR_ASSERT((ui32Offset) < RGX_HOST_SECURE_REGBANK_OFFSET);
+ return OSReadUncheckedHWReg32(pvLinRegBaseAddr, ui32Offset);
+ }
+
+ static INLINE IMG_UINT64 OSReadHWReg64(volatile void __iomem *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset)
+ {
+ PVR_ASSERT((ui32Offset) < RGX_HOST_SECURE_REGBANK_OFFSET);
+ return OSReadUncheckedHWReg64(pvLinRegBaseAddr, ui32Offset);
+ }
+
+ static INLINE void OSWriteHWReg8(volatile void __iomem *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT8 ui8Value)
+ {
+ PVR_ASSERT((ui32Offset) < RGX_HOST_SECURE_REGBANK_OFFSET);
+ OSWriteUncheckedHWReg8(pvLinRegBaseAddr, ui32Offset, ui8Value);
+ }
+
+ static INLINE void OSWriteHWReg16(volatile void __iomem *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT16 ui16Value)
+ {
+ PVR_ASSERT((ui32Offset) < RGX_HOST_SECURE_REGBANK_OFFSET);
+ OSWriteUncheckedHWReg16(pvLinRegBaseAddr, ui32Offset, ui16Value);
+ }
+
+ static INLINE void OSWriteHWReg32(volatile void __iomem *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value)
+ {
+ PVR_ASSERT((ui32Offset) < RGX_HOST_SECURE_REGBANK_OFFSET);
+ OSWriteUncheckedHWReg32(pvLinRegBaseAddr, ui32Offset, ui32Value);
+ }
+
+ static INLINE void OSWriteHWReg64(volatile void __iomem *pvLinRegBaseAddr,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT64 ui64Value)
+ {
+ PVR_ASSERT((ui32Offset) < RGX_HOST_SECURE_REGBANK_OFFSET);
+ OSWriteUncheckedHWReg64(pvLinRegBaseAddr, ui32Offset, ui64Value);
+ }
+
+#endif /* !defined(DOXYGEN) */
+#endif /* defined(NO_HARDWARE) */
/*************************************************************************/ /*!
@Description Pointer to a timer callback function.
#else
-/*! Function not implemented definition. */
+#if !defined(DOXYGEN)
#define OSFUNC_NOT_IMPLEMENTED 0
-/*! Assert used for OSFUNC_NOT_IMPLEMENTED. */
#define OSFUNC_NOT_IMPLEMENTED_ASSERT() PVR_ASSERT(OSFUNC_NOT_IMPLEMENTED)
+#endif
/*************************************************************************/ /*!
@Function OSWRLockCreate
@Description Divide a 64-bit value by a 32-bit value. Return a 32-bit
quotient.
The remainder is also returned in 'pui32Remainder'.
- This function allows for a more optional implementation
+ This function allows for a more optimal implementation
of a 64-bit division when the result is known to be
representable in 32-bits.
@Input ui64Divident The number to be divided.
void *pvOSData, IMG_HANDLE pvServerCleanupParam,
PFN_SERVER_CLEANUP pfnServerCleanup);
#endif
+#if defined(SUPPORT_SECURE_ALLOC_KM)
+PVRSRV_ERROR
+OSAllocateSecBuf(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszName,
+ PMR **ppsPMR);
+
+void
+OSFreeSecBuf(PMR *psPMR);
+#endif
#endif /* OSFUNC_H */
/******************************************************************************
@Output value Pointer to returned app hint value.
*/ /**************************************************************************/
#define OSGetKMAppHintBOOL(device, state, name, appHintDefault, value) \
- PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+ PVRSRVGetAppHint(state, # name, IMG_BOOL_TYPE, appHintDefault, value)
/**************************************************************************/ /*!
@def OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size)
#include "rgx_hwperf.h"
#include "device.h"
-#if defined(__linux__)
+#if defined(__linux__) && defined(CONFIG_EVENT_TRACING)
void PVRGpuTraceEnqueueEvent(
PVRSRV_DEVICE_NODE *psDevNode,
void PVRGpuTraceEnableFirmwareActivityCallback(void);
void PVRGpuTraceDisableFirmwareActivityCallback(void);
-#else /* defined(__linux__) */
+#if defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD)
+PVRSRV_ERROR
+PVRSRVGpuTraceWorkPeriodEventStatsRegister(IMG_HANDLE*
+ phGpuWorkPeriodEventStats);
+void PVRSRVGpuTraceWorkPeriodEventStatsUnregister(
+ IMG_HANDLE hGpuWorkPeriodEventStats);
+#endif /* defined(PVRSRV_ANDROID_TRACE_GPU_WORK_PERIOD) */
+
+#else /* defined(__linux__) && defined(CONFIG_EVENT_TRACING) */
static inline void PVRGpuTraceEnqueueEvent(
PVRSRV_DEVICE_NODE *psDevNode,
static inline void PVRGpuTraceEnableFirmwareActivityCallback(void) {}
static inline void PVRGpuTraceDisableFirmwareActivityCallback(void) {}
-#endif /* defined(__linux__) */
+#endif /* defined(__linux__) && defined(CONFIG_EVENT_TRACING) */
#endif /* PVR_GPUTRACE_H_ */
#undef PDUMP_TRACE
#if defined(PDUMP_TRACE)
-#define PDUMP_HERE_VAR IMG_UINT32 here = 0;
+#define PDUMP_HERE_VAR __maybe_unused IMG_UINT32 here = 0;
#define PDUMP_HERE(a) { here = (a); if (ui32Flags & PDUMP_FLAGS_DEBUG) PVR_DPF((PVR_DBG_WARNING, "HERE %d", (a))); }
#define PDUMP_HEREA(a) { here = (a); PVR_DPF((PVR_DBG_WARNING, "HERE ALWAYS %d", (a))); }
#else
-#define PDUMP_HERE_VAR IMG_UINT32 here = 0;
+#define PDUMP_HERE_VAR __maybe_unused IMG_UINT32 here = 0;
#define PDUMP_HERE(a) here = (a);
#define PDUMP_HEREA(a) here = (a);
#endif
/* Shared across pdump_x files */
PVRSRV_ERROR PDumpInitCommon(void);
void PDumpDeInitCommon(void);
+PVRSRV_ERROR PDumpValidateUMFlags(PDUMP_FLAGS_T uiFlags);
PVRSRV_ERROR PDumpReady(void);
void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset,
size_t *puiZeroPageSize,
IMG_UINT32 ui32Start,
IMG_UINT32 ui32End,
IMG_UINT32 ui32Interval,
- IMG_UINT32 ui32MaxParamFileSize);
+ IMG_UINT32 ui32MaxParamFileSize,
+ IMG_UINT32 ui32AutoTermTimeout);
PVRSRV_ERROR PDumpReg32(PVRSRV_DEVICE_NODE *psDeviceNode,
/* Register the connection with the PDump subsystem */
PVRSRV_ERROR
-PDumpRegisterConnection(void *hSyncPrivData,
+PDumpRegisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode,
+ void *hSyncPrivData,
PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks,
PDUMP_CONNECTION_DATA **ppsPDumpConnectionData);
/* Unregister the connection with the PDump subsystem */
void
-PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData);
+PDumpUnregisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PDUMP_CONNECTION_DATA *psPDumpConnectionData);
/* Register for notification of PDump Transition into/out of capture range */
PVRSRV_ERROR
IMG_UINT32 ui32Start,
IMG_UINT32 ui32End,
IMG_UINT32 ui32Interval,
- IMG_UINT32 ui32MaxParamFileSize)
+ IMG_UINT32 ui32MaxParamFileSize,
+ IMG_UINT32 ui32AutoTermTimeout)
{
PVR_UNREFERENCED_PARAMETER(psConnection);
PVR_UNREFERENCED_PARAMETER(psDeviceNode);
PVR_UNREFERENCED_PARAMETER(ui32End);
PVR_UNREFERENCED_PARAMETER(ui32Interval);
PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize);
+ PVR_UNREFERENCED_PARAMETER(ui32AutoTermTimeout);
return PVRSRV_OK;
}
#pragma inline(PDumpRegisterConnection)
#endif
static INLINE PVRSRV_ERROR
-PDumpRegisterConnection(void *hSyncPrivData,
+PDumpRegisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode,
+ void *hSyncPrivData,
PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks,
PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
PVR_UNREFERENCED_PARAMETER(hSyncPrivData);
PVR_UNREFERENCED_PARAMETER(pfnPDumpSyncBlocks);
PVR_UNREFERENCED_PARAMETER(ppsPDumpConnectionData);
#pragma inline(PDumpUnregisterConnection)
#endif
static INLINE void
-PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+PDumpUnregisterConnection(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PDUMP_CONNECTION_DATA *psPDumpConnectionData)
{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
}
#include "opaque_types.h"
-/*
- * PDUMP MMU attributes
- */
-typedef struct _PDUMP_MMU_ATTRIB_DEVICE_
-{
- /* Per-Device Pdump attribs */
-
- /*!< Pdump memory bank name */
- IMG_CHAR *pszPDumpMemDevName;
-
- /*!< Pdump register bank name */
- IMG_CHAR *pszPDumpRegDevName;
-
-} PDUMP_MMU_ATTRIB_DEVICE;
-
-typedef struct _PDUMP_MMU_ATTRIB_CONTEXT_
-{
- IMG_UINT32 ui32Dummy;
-} PDUMP_MMU_ATTRIB_CONTEXT;
-
-typedef struct _PDUMP_MMU_ATTRIB_HEAP_
-{
- /* data page info */
- IMG_UINT32 ui32DataPageMask;
-} PDUMP_MMU_ATTRIB_HEAP;
-
-typedef struct _PDUMP_MMU_ATTRIB_
-{
- struct _PDUMP_MMU_ATTRIB_DEVICE_ sDevice;
- struct _PDUMP_MMU_ATTRIB_CONTEXT_ sContext;
- struct _PDUMP_MMU_ATTRIB_HEAP_ sHeap;
-} PDUMP_MMU_ATTRIB;
-
#if defined(PDUMP)
PVRSRV_ERROR
PDumpMMUMalloc(PPVRSRV_DEVICE_NODE psDeviceNode,
page-size. */
IMG_DEVMEM_ALIGN_T uiAlign,
IMG_BOOL bInitialise,
- IMG_UINT32 ui32InitValue,
+ IMG_UINT8 ui8InitValue,
IMG_HANDLE *phHandlePtr,
IMG_UINT32 ui32PDumpFlags);
+PVRSRV_ERROR
+PDumpMallocUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode,
+ const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_UINT64 ui64Size,
+ /* alignment is alignment of start of buffer _and_
+ minimum contiguity - i.e. smallest allowable
+ page-size. */
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_BOOL bInitialise,
+ IMG_UINT8 ui8InitValue,
+ IMG_HANDLE *phHandlePtr,
+ IMG_UINT32 ui32PDumpFlags);
+
PVRSRV_ERROR
PDumpFree(PVRSRV_DEVICE_NODE *psDeviceNode,
IMG_HANDLE hPDumpAllocationInfoHandle);
+PVRSRV_ERROR
+PDumpFreeUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hPDumpAllocationInfoHandle);
+
void
PDumpMakeStringValid(IMG_CHAR *pszString,
IMG_UINT32 ui32StrLen);
}
static INLINE PVRSRV_ERROR
-PDumpMalloc(const IMG_CHAR *pszDevSpace,
+PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode,
+ const IMG_CHAR *pszDevSpace,
const IMG_CHAR *pszSymbolicAddress,
IMG_UINT64 ui64Size,
IMG_DEVMEM_ALIGN_T uiAlign,
IMG_BOOL bInitialise,
- IMG_UINT32 ui32InitValue,
+ IMG_UINT8 ui8InitValue,
IMG_HANDLE *phHandlePtr,
IMG_UINT32 ui32PDumpFlags)
{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(pszDevSpace);
+ PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress);
+ PVR_UNREFERENCED_PARAMETER(ui64Size);
+ PVR_UNREFERENCED_PARAMETER(uiAlign);
+ PVR_UNREFERENCED_PARAMETER(bInitialise);
+ PVR_UNREFERENCED_PARAMETER(ui8InitValue);
+ PVR_UNREFERENCED_PARAMETER(phHandlePtr);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR
+PDumpMallocUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode,
+ const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_UINT64 ui64Size,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_BOOL bInitialise,
+ IMG_UINT8 ui8InitValue,
+ IMG_HANDLE *phHandlePtr,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
PVR_UNREFERENCED_PARAMETER(pszDevSpace);
PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress);
PVR_UNREFERENCED_PARAMETER(ui64Size);
PVR_UNREFERENCED_PARAMETER(uiAlign);
PVR_UNREFERENCED_PARAMETER(bInitialise);
- PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+ PVR_UNREFERENCED_PARAMETER(ui8InitValue);
PVR_UNREFERENCED_PARAMETER(phHandlePtr);
PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
return PVRSRV_OK;
}
static INLINE PVRSRV_ERROR
-PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle)
+PDumpFree(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle);
+ return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR
+PDumpFreeUnlocked(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hPDumpAllocationInfoHandle)
{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle);
return PVRSRV_OK;
}
#endif /* PDUMP */
#define PMR_DEFAULT_PREFIX "PMR"
+#define PMR_SPARSE_PREFIX "SPMR"
#define PMR_SYMBOLICADDR_FMTSPEC "%s%"IMG_UINT64_FMTSPEC"_%"IMG_UINT64_FMTSPEC"_%s"
#define PMR_MEMSPACE_FMTSPEC "%s"
#define PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC "CC_%s"
-#if defined(PDUMP)
-#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \
- PDumpMalloc(pszPDumpMemDevName, PMR_OSALLOCPAGES_PREFIX, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr, PDUMP_NONE)
-#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
- PDumpFree(hHandle)
-#else
-#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \
- ((void)(*phHandlePtr=NULL))
-#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
- ((void)(0))
-#endif /* PDUMP */
-
PVRSRV_ERROR
PDumpPMRWRW32(PVRSRV_DEVICE_NODE *psDeviceNode,
const IMG_CHAR *pszDevSpace,
allocation is to be done
@Input pszSymbolicAddress Symbolic name of the allocation
@Input phHandlePtr PDUMP handle to the allocation
+@Input uiPid PID of the process owning the allocation
+ (or PVR_SYS_ALLOC_PID if the allocation
+ belongs to the driver)
@Output hMemHandle Handle to the allocated memory
@Output psDevPhysAddr Device Physical address of allocated
page
const IMG_CHAR *pszSymbolicAddress,
IMG_HANDLE *phHandlePtr,
#endif
+ IMG_PID uiPid,
IMG_HANDLE hMemHandle,
IMG_DEV_PHYADDR *psDevPhysAddr);
PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection,
PVRSRV_DEVICE_NODE *psDevNode,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
PhysmemNewRamBackedPMR_direct(CONNECTION_DATA * psConnection,
PVRSRV_DEVICE_NODE *psDevNode,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
IMG_UINT32 ui32PDumpFlags,
PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags);
-/*
- * PhysmemNewRamBackedLockedPMR
- *
- * Same as function above but is additionally locking down the PMR.
- *
- * Get the physical memory and lock down the PMR directly, we do not want to
- * defer the actual allocation to mapping time.
- *
- * In general the concept of on-demand allocations is not useful for
- * allocations where we give the users the freedom to map and unmap memory at
- * will. The user is not expecting their memory contents to suddenly vanish
- * just because they unmapped the buffer.
- * Even if they would know and be ok with it, we do not want to check for
- * every page we unmap whether we have to unlock the underlying PMR.
-*/
-PVRSRV_ERROR
-PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- IMG_DEVMEM_SIZE_T uiSize,
- PMR_SIZE_T uiChunkSize,
- IMG_UINT32 ui32NumPhysChunks,
- IMG_UINT32 ui32NumVirtChunks,
- IMG_UINT32 *pui32MappingTable,
- IMG_UINT32 uiLog2PageSize,
- PVRSRV_MEMALLOCFLAGS_T uiFlags,
- IMG_UINT32 uiAnnotationLength,
- const IMG_CHAR *pszAnnotation,
- IMG_PID uiPid,
- PMR **ppsPMRPtr,
- IMG_UINT32 ui32PDumpFlags,
- PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags);
-
/*************************************************************************/ /*!
@Function PhysmemImportPMR
@Description Import PMR a previously exported PMR
PMR_LOG2ALIGN_T uiLog2Contig,
PMR **ppsPMR);
-/*************************************************************************/ /*!
-@Function PVRSRVGetMaxPhysHeapCountKM
-@Description Get the user accessible physical heap count
-@Output puiPhysHeapCount user accessible physical heap count
-@Return PVRSRV_OK if successful
-*/ /**************************************************************************/
-PVRSRV_ERROR
-PVRSRVGetMaxPhysHeapCountKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 *puiPhysHeapCount);
-
/*************************************************************************/ /*!
@Function PVRSRVGetDefaultPhysicalHeapKM
@Description For the specified device, get the physical heap used for
*/ /**************************************************************************/
PVRSRV_ERROR
PVRSRVGetDefaultPhysicalHeapKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- PVRSRV_PHYS_HEAP *peHeap);
-
-/*************************************************************************/ /*!
-@Function PVRSRVGetHeapPhysMemUsageKM
-@Description Get the memory usage statistics for all user accessible
- physical heaps
-@Input ui32PhysHeapCount Total user accessible physical heaps
-@Output apPhysHeapMemStats Buffer to hold the memory statistics
-@Return PVRSRV_OK if successful
-*/ /**************************************************************************/
-PVRSRV_ERROR
-PVRSRVGetHeapPhysMemUsageKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 ui32PhysHeapCount,
- PHYS_HEAP_MEM_STATS *apPhysHeapMemStats);
-
-/*************************************************************************/ /*!
-@Function PVRSRVGetHeapPhysMemUsagePkdKM
-@Description Get the memory usage statistics for all user accessible
- physical heaps
-@Input ui32PhysHeapCount Total user accessible physical heaps
-@Output apPhysHeapMemStats Buffer to hold the memory statistics
-@Return PVRSRV_OK if successful
-*/ /**************************************************************************/
-PVRSRV_ERROR
-PVRSRVGetHeapPhysMemUsagePkdKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 ui32PhysHeapCount,
- PHYS_HEAP_MEM_STATS_PKD *apPhysHeapMemStats);
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PVRSRV_PHYS_HEAP *peHeap);
/*************************************************************************/ /*!
@Function PVRSRVPhysHeapGetMemInfoKM
*/ /**************************************************************************/
PVRSRV_ERROR
PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 ui32PhysHeapCount,
- PVRSRV_PHYS_HEAP *paePhysHeapID,
- PHYS_HEAP_MEM_STATS *paPhysHeapMemStats);
-
-/*************************************************************************/ /*!
-@Function PVRSRVPhysHeapGetMemInfoPkdKM
-@Description Get the memory usage statistics for a given physical heap ID
-@Input ui32PhysHeapCount Physical Heap count
-@Input paePhysHeapID Array of Physical Heap ID's
-@Output paPhysHeapMemStats Buffer to hold the memory statistics
-@Return PVRSRV_OK if successful
-*/ /**************************************************************************/
-PVRSRV_ERROR
-PVRSRVPhysHeapGetMemInfoPkdKM(CONNECTION_DATA *psConnection,
- PVRSRV_DEVICE_NODE *psDevNode,
- IMG_UINT32 ui32PhysHeapCount,
- PVRSRV_PHYS_HEAP *paePhysHeapID,
- PHYS_HEAP_MEM_STATS_PKD *paPhysHeapMemStats);
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32PhysHeapCount,
+ PVRSRV_PHYS_HEAP *paePhysHeapID,
+ PHYS_HEAP_MEM_STATS *paPhysHeapMemStats);
#endif /* SRVSRV_PHYSMEM_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title Definitions of PMR Mapping History for OS managed memory
+@Codingstyle IMG
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef PHYSMEM_CPUMAP_HISTORY_H_
+#define PHYSMEM_CPUMAP_HISTORY_H_
+
+#include "img_types.h"
+#include "img_defs.h"
+#include <powervr/mem_types.h>
+
+/*!
+*******************************************************************************
+ @Function CPUMappingHistoryInit
+ @Description CPU Mapping history initialisation
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR CPUMappingHistoryInit(void);
+
+/*!
+*******************************************************************************
+ @Function CPUMappingHistoryDeInit
+ @Description CPU Mapping history De-initialisation
+ @Return None
+******************************************************************************/
+void CPUMappingHistoryDeInit(void);
+
+/*!
+*******************************************************************************
+ @Function InsertMappingRecord
+ @Description CPU Mapping history Map Record insertion.
+ This function given relevant mapping information, inserts a
+ mapping record to the history buffer.
+ @Input pszAnnotation - The annotation related to the allocation to be
+ mapped.
+ @Input uiPID - The PID of the process mapping the allocation.
+ @Input pvAddress - The CPU virtual address of the newly mapped
+ allocation.
+ @Input sCPUPhyAddr - The CPU Physical address of the newly mapped
+ allocation.
+ @Input ui32CPUCacheFlags - The CPU Caching flags associated with the
+ mapping.
+ @Input uiMapOffset - The offset into the PMR at which the mapping
+ resides.
+ @input ui32PageCount - The number of pages mapped.
+ @Return None
+******************************************************************************/
+void InsertMappingRecord(const IMG_CHAR *pszAnnotation,
+ IMG_PID uiPID,
+ IMG_CPU_VIRTADDR pvAddress,
+ IMG_CPU_PHYADDR sCpuPhyAddr,
+ IMG_UINT32 ui32CPUCacheFlags,
+ size_t uiMapOffset,
+ IMG_UINT32 ui32PageCount);
+
+/*!
+*******************************************************************************
+ @Function InsertUnMappingRecord
+ @Description CPU Mapping history UnMap Record insertion.
+ This function given relevant mapping information, inserts an
+ un-mapping record to the history buffer.
+ @Input pvAddress - The CPU virtual address of the un-mapped
+ allocation.
+ @Input sCPUPhyAddr - The CPU Physical address of the un-mapped
+ allocation.
+ @Input ui32CPUCacheFlags - The CPU Caching flags associated with the
+ mapping.
+ @input ui32PageCount - The number of pages un-mapped.
+ @Return None
+******************************************************************************/
+void InsertUnMappingRecord(IMG_CPU_VIRTADDR pvAddress,
+ IMG_CPU_PHYADDR sCpuPhyAddr,
+ IMG_UINT32 ui32CPUCacheFlags,
+ IMG_UINT32 ui32PageCount);
+
+
+#endif /* PHYSMEM_CPUMAP_HISTORY_H_ */
#include "pmr.h"
-typedef PVRSRV_ERROR (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap,
- struct dma_buf_attachment *psAttachment);
+typedef void (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap,
+ struct dma_buf_attachment *psAttachment);
PVRSRV_ERROR
PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap,
@Function PhysmemCreateHeapLMA
@Description Create and register new LMA heap with LMA specific details.
@Input psDevNode Pointer to device node struct.
+@Input uiPolicy Heap allocation policy flags
@Input psConfig Heap configuration.
@Input pszLabel Debug identifier label
@Output ppsPhysHeap Pointer to the created heap.
*/ /**************************************************************************/
PVRSRV_ERROR
PhysmemCreateHeapLMA(PVRSRV_DEVICE_NODE *psDevNode,
- PHYS_HEAP_CONFIG *psConfig,
- IMG_CHAR *pszLabel,
- PHYS_HEAP **ppsPhysHeap);
+ PHYS_HEAP_POLICY uiPolicy,
+ PHYS_HEAP_CONFIG *psConfig,
+ IMG_CHAR *pszLabel,
+ PHYS_HEAP **ppsPhysHeap);
/*
* PhysmemNewLocalRamBackedPMR
PhysmemNewLocalRamBackedPMR(PHYS_HEAP *psPhysHeap,
CONNECTION_DATA *psConnection,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
@Input psConnection the connection to the originator process
@Input uiSize the size of the allocation
(must be a multiple of page size)
-@Input uiChunkSize when sparse allocations are requested,
- this is the allocated chunk size.
- For regular allocations, this will be
- the same as uiSize.
- (must be a multiple of page size)
@Input ui32NumPhysChunks when sparse allocations are requested,
this is the number of physical chunks
to be allocated.
PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap,
CONNECTION_DATA *psConnection,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
#define PMR_MAX_TRANSLATION_STACK_ALLOC (32)
-/* Maximum number of pages a PMR can have is 1G of memory */
-#define PMR_MAX_SUPPORTED_PAGE_COUNT (262144)
+/* Maximum size PMR can have is 8G of memory */
+#define PMR_MAX_SUPPORTED_SIZE (0x200000000ULL)
+/* Max number of pages in a PMR at 4k page size */
+#define PMR_MAX_SUPPORTED_4K_PAGE_COUNT (PMR_MAX_SUPPORTED_SIZE >> 12ULL)
typedef IMG_UINT64 PMR_BASE_T;
typedef IMG_UINT64 PMR_SIZE_T;
typedef struct _PMR_PAGELIST_ PMR_PAGELIST;
+/*
+ * PMRValidateSize
+ *
+ * Given a size value, check the value against the max supported
+ * PMR size of 1GB. Return IMG_FALSE if size exceeds max, IMG_TRUE
+ * otherwise.
+ */
+static inline IMG_BOOL PMRValidateSize(IMG_UINT64 uiSize)
+{
+ return (uiSize > PMR_MAX_SUPPORTED_SIZE) ? IMG_FALSE : IMG_TRUE;
+}
+
/*
* PMRCreatePMR
*
PVRSRV_ERROR
PMRCreatePMR(PHYS_HEAP *psPhysHeap,
PMR_SIZE_T uiLogicalSize,
- PMR_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
PVRSRV_ERROR
PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel);
-
-/*************************************************************************/ /*!
-@Function PMRUnpinPMR
-@Description This is the counterpart to PMRPinPMR(). It is meant to be
- called before repinning an allocation.
-
- For a detailed description see client API documentation.
-
-@Input psPMR The physical memory to unpin.
-
-@Input bDevMapped A flag that indicates if this PMR has been
- mapped to device virtual space.
- Needed to check if this PMR is allowed to be
- unpinned or not.
-
-@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is
- registered to be reclaimed. Error otherwise.
-*/ /**************************************************************************/
-PVRSRV_ERROR PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped);
-
-/*************************************************************************/ /*!
-@Function PMRPinPMR
-@Description This is the counterpart to PMRUnpinPMR(). It is meant to be
- called after unpinning an allocation.
-
- For a detailed description see client API documentation.
-
-@Input psPMR The physical memory to pin.
-
-@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content
- was successfully restored.
-
- PVRSRV_ERROR_PMR_NEW_MEMORY when the content
- could not be restored and new physical memory
- was allocated.
-
- A different error otherwise.
-*/ /**************************************************************************/
-PVRSRV_ERROR PMRPinPMR(PMR *psPMR);
-
/*
* PhysmemPMRExport()
*
address space. The caller does not need to call
PMRLockSysPhysAddresses before calling this function.
-@Input psPMR PMR to map.
+@Input psPMR PMR to map.
-@Input pOSMMapData OS specific data needed to create a mapping.
+@Input pOSMMapData OS specific data needed to create a mapping.
+
+@Input uiCpuAccessFlags Flags to indicate if the mapping request
+ requires read, write or both access.
@Return PVRSRV_ERROR: PVRSRV_OK on success or an error otherwise.
*/ /**************************************************************************/
PVRSRV_ERROR
-PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData);
+PMRMMapPMR(PMR *psPMR,
+ PMR_MMAP_DATA pOSMMapData,
+ PVRSRV_MEMALLOCFLAGS_T uiCpuAccessFlags);
/*
* PMRRefPMR()
PVRSRV_ERROR
PMRUnrefPMR(PMR *psPMR);
+/*
+ * PMRRefPMR2()
+ *
+ * Take a reference on the passed in PMR.
+ *
+ * This function does not perform address locking as opposed to PMRRefPMR().
+ */
+void
+PMRRefPMR2(PMR *psPMR);
+
+/*
+ * PMRUnrefPMR2()
+ *
+ * This undoes a call to any of the PhysmemNew* family of APIs
+ * (i.e. any PMR factory "constructor").
+ *
+ * This relinquishes a reference to the PMR, and, where the refcount
+ * reaches 0, causes the PMR to be destroyed (calling the finalizer
+ * callback on the PMR, if there is one)
+ */
+void
+PMRUnrefPMR2(PMR *psPMR);
+
/*
* PMRUnrefUnlockPMR()
*
PPVRSRV_DEVICE_NODE
PMR_DeviceNode(const PMR *psPMR);
-/*
- * PMRIsPMRLive()
- *
- * This function returns true if the PMR is in use and false otherwise.
- * This function is not thread safe and hence the caller needs to ensure the
- * thread safety by explicitly taking PMR or through other means.
- */
-IMG_BOOL PMRIsPMRLive(PMR *psPMR);
-
/*
* PMR_Flags()
*
IMG_BOOL
PMR_IsSparse(const PMR *psPMR);
-IMG_BOOL
-PMR_IsUnpinned(const PMR *psPMR);
-
void
PMR_LogicalSize(const PMR *psPMR,
IMG_DEVMEM_SIZE_T *puiLogicalSize);
IMG_UINT32
PMR_GetLog2Contiguity(const PMR *psPMR);
+/*
+ * PMRGetMaxChunkCount
+ *
+ * Given a PMR, calculate the maximum number of chunks supported by
+ * the PMR from the contiguity and return it.
+ */
+IMG_UINT32 PMRGetMaxChunkCount(PMR *psPMR);
+
const IMG_CHAR *
PMR_GetAnnotation(const PMR *psPMR);
size_t uiBufSz,
size_t *puiNumBytes);
-/*************************************************************************/ /*!
-@Brief Callback function type PFN_UNPIN_MEM_FN
-
-@Description Called to unpin an allocation.
- Once unpinned, the pages backing the allocation may be
- re-used by the Operating System for another purpose.
- When the pages are required again, they may be re-pinned
- (by calling PFN_PIN_MEM_FN). The driver will try to return
- same pages as before. The caller will be told if the
- content of these returned pages has been modified or if
- the pages returned are not the original pages.
-
- Implementation of this callback is optional.
-
-@Input pvPriv Private data (which was generated by the
- PMR factory when PMR was created)
-
-@Return PVRSRV_OK if the unpin was successful, an error code
- otherwise.
-*/ /**************************************************************************/
-typedef PVRSRV_ERROR (*PFN_UNPIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv);
-
-/*************************************************************************/ /*!
-@Brief Callback function type PFN_PIN_MEM_FN
-
-@Description Called to pin a previously unpinned allocation.
- The driver will try to return same pages as were previously
- assigned to the allocation. The caller will be told if the
- content of these returned pages has been modified or if
- the pages returned are not the original pages.
-
- Implementation of this callback is optional.
-
-@Input pvPriv Private data (which was generated by the
- PMR factory when PMR was created)
-
-@Input psMappingTable Mapping table, which describes how
- virtual 'chunks' are to be mapped to
- physical 'chunks' for the allocation.
-
-@Return PVRSRV_OK if the original pages were returned unmodified.
- PVRSRV_ERROR_PMR_NEW_MEMORY if the memory returned was modified
- or different pages were returned.
- Another PVRSRV_ERROR code on failure.
-*/ /**************************************************************************/
-typedef PVRSRV_ERROR (*PFN_PIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv,
- PMR_MAPPING_TABLE *psMappingTable);
-
/*************************************************************************/ /*!
@Brief Callback function type PFN_CHANGE_SPARSE_MEM_FN
@Input pvPriv Private data (which was generated by the
PMR factory when PMR was created)
-@Return PVRSRV_OK if the PMR destruction was successful, an error
- code otherwise.
- Currently PVRSRV_ERROR_PMR_STILL_REFERENCED is the only
- error returned from physmem_dmabuf.c layer and on this
- error, destroying of the PMR is aborted without disturbing
- the PMR state.
+@Return None
*/ /**************************************************************************/
-typedef PVRSRV_ERROR (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv);
+typedef void (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv);
/*************************************************************************/ /*!
@Brief Callback function type PFN_ACQUIRE_PMR_FACTORY_LOCK_FN
/*! Callback function pointer, see ::PFN_WRITE_BYTES_FN */
PFN_WRITE_BYTES_FN pfnWriteBytes;
- /*! Callback function pointer, see ::PFN_UNPIN_MEM_FN */
- PFN_UNPIN_MEM_FN pfnUnpinMem;
- /*! Callback function pointer, see ::PFN_PIN_MEM_FN */
- PFN_PIN_MEM_FN pfnPinMem;
-
/*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_FN */
PFN_CHANGE_SPARSE_MEM_FN pfnChangeSparseMem;
/*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN */
#include "pvrsrv_error.h"
#include "servicesext.h"
#include "opaque_types.h"
+#include "di_common.h"
/*!
*****************************************************************************
PVRSRV_DEV_POWER_STATE eCurrentPowerState,
PVRSRV_POWER_FLAGS ePwrFlags);
+const char *PVRSRVSysPowerStateToString(PVRSRV_SYS_POWER_STATE eState);
+const char *PVRSRVDevPowerStateToString(PVRSRV_DEV_POWER_STATE eState);
+
PVRSRV_ERROR PVRSRVPowerLockInit(PPVRSRV_DEVICE_NODE psDeviceNode);
void PVRSRVPowerLockDeInit(PPVRSRV_DEVICE_NODE psDeviceNode);
PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
PVRSRV_DEV_POWER_STATE eNewPowerState);
+/*!
+******************************************************************************
+
+ @Function PVRSRVSetDeviceCurrentPowerState
+
+ @Description Set the current device power state to eNewPowerState
+
+ @Input psPowerDevice : Power device
+ @Input eNewPowerState : New power state
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVSetDeviceCurrentPowerState(PVRSRV_POWER_DEV *psPowerDevice,
+ PVRSRV_DEV_POWER_STATE eNewPowerState);
+
/*!
******************************************************************************
PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode,
IMG_UINT32 ui32NewValue);
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+void PVRSRVSetFirmwareStartTime(PVRSRV_POWER_DEV *psPowerDevice, IMG_UINT32 ui32TimeStamp);
+
+void PVRSRVSetFirmwareHandshakeIdleTime(PVRSRV_POWER_DEV *psPowerDevice, IMG_UINT64 ui64Duration);
+
+int PVRSRVPowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData);
+#endif
#endif /* POWER_H */
#include "pvrsrv_error.h"
#include "allocmem.h"
#include "cache_ops.h"
+#include "device.h"
+#include "connection_server.h"
/*
* The publishing of Process Stats is controlled by the
void PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats);
+PVRSRV_ERROR PVRSRVStatsDeviceConnect(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+void PVRSRVStatsDeviceDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode);
+
#define MAX_POWER_STAT_ENTRIES 51
/*
void *pvCpuVAddr,
IMG_CPU_PHYADDR sCpuPAddr,
size_t uiBytes,
- void *pvPrivateData,
IMG_PID uiPid
DEBUG_MEMSTATS_PARAMS);
void
PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes);
-void
-PVRSRVStatsUpdateOOMStats(IMG_UINT32 ui32OOMStatType,
- IMG_PID pidOwner);
-
PVRSRV_ERROR
-PVRSRVServerUpdateOOMStats(IMG_UINT32 ui32OOMStatType,
- IMG_PID pidOwner);
+PVRSRVStatsUpdateOOMStat(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OOMStatType,
+ IMG_PID pidOwner);
-void PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+void PVRSRVStatsUpdateRenderContextStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32TotalNumPartialRenders,
IMG_UINT32 ui32TotalNumOutOfMemory,
IMG_UINT32 ui32TotalTAStores,
IMG_UINT32 ui32Total3DStores,
IMG_UINT32 ui32TotalTDMStores,
IMG_PID owner);
-void PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+void PVRSRVStatsUpdateZSBufferStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32NumReqByApp,
IMG_UINT32 ui32NumReqByFW,
IMG_PID owner);
-void PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+void PVRSRVStatsUpdateFreelistStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32NumGrowReqByApp,
IMG_UINT32 ui32NumGrowReqByFW,
IMG_UINT32 ui32InitFLPages,
IMG_UINT32 ui32NumHighPages,
IMG_PID ownerPid);
#endif
-#if defined(PVRSRV_ENABLE_PROCESS_STATS)
-/* Update pre/post power transition timing statistics */
-void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
- IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
- IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower);
-
-void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer);
-void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer);
-#else
-/* Update pre/post power transition timing statistics */
-static inline
-void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
- IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
- IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) {}
-static inline
-void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer) {}
-
-static inline
-void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer) {}
-#endif
-
-void SetFirmwareStartTime(IMG_UINT32 ui32TimeStamp);
-
-void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration);
-
/* Functions used for calculating the memory usage statistics of a process */
-PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize,
- IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats);
+PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid,
+ IMG_UINT32 ui32ArrSize,
+ IMG_BOOL bAllProcessStats,
+ IMG_UINT64 *pui64MemoryStats);
typedef struct {
IMG_UINT32 ui32Pid;
- IMG_UINT32 ui32KernelMemUsage;
- IMG_UINT32 ui32GraphicsMemUsage;
+ IMG_UINT64 ui64KernelMemUsage;
+ IMG_UINT64 ui64GraphicsMemUsage;
} PVRSRV_PER_PROCESS_MEM_USAGE;
-PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem,
- IMG_UINT32 *pui32NumberOfLivePids,
- PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData);
+PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT64 *pui64TotalMem,
+ IMG_UINT32 *pui32NumberOfLivePids,
+ PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData);
#endif /* PROCESS_STATS_H */
#include "img_types.h"
-typedef void (*PFN_SYS_DEV_DVFS_SET_FREQUENCY)(IMG_UINT32 ui32Freq);
-typedef void (*PFN_SYS_DEV_DVFS_SET_VOLTAGE)(IMG_UINT32 ui32Volt);
+typedef void (*PFN_SYS_DEV_DVFS_SET_FREQUENCY)(IMG_HANDLE hSysData, IMG_UINT32 ui32Freq);
+typedef void (*PFN_SYS_DEV_DVFS_SET_VOLTAGE)(IMG_HANDLE hSysData, IMG_UINT32 ui32Volt);
typedef struct _IMG_OPP_
{
IMG_HANDLE hPvzConnection; /*!< PVZ connection used for cross-VM hyper-calls */
POS_LOCK hPvzConnectionLock; /*!< Lock protecting PVZ connection */
- IMG_BOOL abVmOnline[RGX_NUM_OS_SUPPORTED];
IMG_BOOL bUnload; /*!< Driver unload is in progress */
******************************************************************************/
PVRSRV_DATA *PVRSRVGetPVRSRVData(void);
-#define PVRSRV_KM_ERRORS (PVRSRVGetPVRSRVData()->ui32DPFErrorCount)
+#define PVRSRV_KM_ERRORS ( PVRSRVGetPVRSRVData() ? PVRSRVGetPVRSRVData()->ui32DPFErrorCount : IMG_UINT32_MAX)
#define PVRSRV_ERROR_LIMIT_REACHED (PVRSRV_KM_ERRORS == IMG_UINT32_MAX)
#define PVRSRV_REPORT_ERROR() do { if (!(PVRSRV_ERROR_LIMIT_REACHED)) { PVRSRVGetPVRSRVData()->ui32DPFErrorCount++; } } while (0)
@Function LMA_HeapIteratorCreate
@Description
- Creates iterator for traversing physical heap requested by ui32Flags. The
+ Creates iterator for traversing physical heap requested by ePhysHeap. The
iterator will go through all of the segments (a segment is physically
contiguous) of the physical heap and return their CPU physical address and
size.
@Input psDevNode: Pointer to device node struct.
- @Input ui32Flags: Find heap that matches flags.
+ @Input ePhysHeap: Find the matching heap.
@Output ppsIter: Pointer to the iterator object.
@Return PVRSRV_OK upon success and PVRSRV_ERROR otherwise.
******************************************************************************/
PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode,
- PHYS_HEAP_USAGE_FLAGS ui32Flags,
+ PVRSRV_PHYS_HEAP ePhysHeap,
PHYS_HEAP_ITERATOR **ppsIter);
/*!
#if defined(SUPPORT_GPUVIRT_VALIDATION)
#if defined(EMULATOR)
- void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState);
- void SetTrustedDeviceAceEnabled(void);
+ void SetAxiProtOSid(IMG_HANDLE hSysData, IMG_UINT32 ui32OSid, IMG_BOOL bState);
+ void SetTrustedDeviceAceEnabled(IMG_HANDLE hSysData);
#endif
#endif
******************************************************************************/
PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void);
-/*!
-******************************************************************************
- @Function : PVRSRVPhysMemHeapsInit
-
- @Description : Registers and acquires physical memory heaps
-
- @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
- error code
-******************************************************************************/
-PVRSRV_ERROR PVRSRVPhysMemHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig);
-
-/*!
-******************************************************************************
- @Function : PVRSRVPhysMemHeapsDeinit
-
- @Description : Releases and unregisters physical memory heaps
-
- @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
- error code
-******************************************************************************/
-void PVRSRVPhysMemHeapsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode);
-
/*************************************************************************/ /*!
-@Function FindPhysHeapConfig
+@Function PVRSRVFindPhysHeapConfig
@Description Find Phys Heap Config from Device Config.
@Input psDevConfig Pointer to device config.
@Input ui32Flags Find heap that matches flags.
@Return PHYS_HEAP_CONFIG* Return a config, or NULL if not found.
*/ /**************************************************************************/
-PHYS_HEAP_CONFIG* FindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
- PHYS_HEAP_USAGE_FLAGS ui32Flags);
+PHYS_HEAP_CONFIG* PVRSRVFindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PHYS_HEAP_USAGE_FLAGS ui32Flags);
/*************************************************************************/ /*!
@Function PVRSRVGetDeviceInstance
PVRSRV_DEVICE_NODE* PVRSRVGetDeviceInstance(IMG_UINT32 ui32Instance);
/*************************************************************************/ /*!
-@Function PVRSRVGetDeviceInstanceByOSId
+@Function PVRSRVGetDeviceInstanceByKernelDevID
@Description Return the specified device instance by OS Id.
@Input i32OSInstance OS device Id to find
@Return PVRSRV_DEVICE_NODE* Return a device node, or NULL if not found.
*/ /**************************************************************************/
-PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByOSId(IMG_INT32 i32OSInstance);
+PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByKernelDevID(IMG_INT32 i32OSInstance);
/*************************************************************************/ /*!
@Function PVRSRVDefaultDomainPower
#define PVRSRV_DEVICE_H
#include "img_types.h"
-#include "physheap.h"
+#include "physheap_config.h"
#include "pvrsrv_error.h"
#include "pvrsrv_memalloc_physheap.h"
#include "pvrsrv_firmware_boot.h"
#include "rgx_fwif_km.h"
#include "servicesext.h"
#include "cache_ops.h"
+#include "opaque_types.h"
#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS)
#include "pvr_dvfs.h"
PVRSRV_SYS_POWER_STATE_OFF if the domain is powered off
*/ /**************************************************************************/
typedef PVRSRV_SYS_POWER_STATE
-(*PFN_SYS_GET_POWER)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+(*PFN_SYS_GET_POWER)(PPVRSRV_DEVICE_NODE psDevNode);
typedef void
(*PFN_SYS_DEV_INTERRUPT_HANDLED)(PVRSRV_DEVICE_CONFIG *psDevConfig);
#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
#if defined(SUPPORT_GPUVIRT_VALIDATION)
-typedef void (*PFN_SYS_DEV_VIRT_INIT)(IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
+typedef void (*PFN_SYS_DEV_VIRT_INIT)(IMG_HANDLE hSysData,
+ IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+ IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG_
PFN_SYS_DEV_SOC_TIMER_READ pfnSoCTimerRead;
#endif
- /*!
- *! Callback to handle memory budgeting. Can be used to reject allocations
- *! over a certain size (optional).
- */
- PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize;
-
/*!
*! Callback to perform host CPU cache maintenance. Might be needed for
*! architectures which allow extensions such as RISC-V (optional).
/*! Callbacks to ping the trusted device to securely run RGXStart/Stop() */
PFN_TD_RGXSTART pfnTDRGXStart;
PFN_TD_RGXSTOP pfnTDRGXStop;
+
+#if defined(PVR_ANDROID_HAS_DMA_HEAP_FIND)
+ /*! Name of DMA heap to allocate secure memory from. Used with dma_heap_find. */
+ IMG_CHAR *pszSecureDMAHeapName;
+#endif
#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
/*! Function that does device feature specific system layer initialisation */
#include "pmr.h"
#include "pvrsrv_error.h"
#include "physheap.h"
+#include "connection_server.h"
+#include "device.h"
PVRSRV_ERROR RIInitKM(void);
void RIDeInitKM(void);
PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR,
IMG_UINT32 ui32TextBSize,
- const IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN],
+ const IMG_CHAR *psz8TextB,
IMG_UINT64 uiOffset,
IMG_UINT64 uiSize,
IMG_BOOL bIsImport,
IMG_BOOL bIsSuballoc,
RI_HANDLE *phRIHandle);
-PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize,
+PVRSRV_ERROR RIWriteProcListEntryKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32TextBSize,
const IMG_CHAR *psz8TextB,
IMG_UINT64 ui64Size,
IMG_UINT64 ui64DevVAddr,
PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid,
IMG_UINT32 ui32ArrSize,
IMG_BOOL bAllProcessStats,
- IMG_UINT32 *ui32MemoryStats);
+ IMG_UINT64 *pui64MemoryStats);
static INLINE
PVRSRV_ERROR DestroyServerResource(const SHARED_DEV_CONNECTION hConnection,
@Description Creates and initialises a common layer Services device node
for an OS native device. First stage device discovery.
@Input pvOSDevice OS native device
-@Input i32OsDeviceID A unique identifier which helps recognise this
+@Input i32KernelDeviceID A unique identifier which helps recognise this
Device in the UM space provided by the OS.
@Output ppsDeviceNode Points to the new device node on success
@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
*/ /**************************************************************************/
PVRSRV_ERROR
-PVRSRVCommonDeviceCreate(void *pvOSDevice, IMG_INT32 i32OsDeviceID,
+PVRSRVCommonDeviceCreate(void *pvOSDevice, IMG_INT32 i32KernelDeviceID,
struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode);
/*************************************************************************/ /*!
IMG_EXPORT
PVRSRV_ERROR DevmemIntAllocHostMemKM(IMG_DEVMEM_SIZE_T ui32Size,
PVRSRV_MEMALLOCFLAGS_T uiFlags,
- IMG_UINT32 ui32LableLength,
+ IMG_UINT32 ui32LabelLength,
const IMG_CHAR *pszAllocLabel,
PMR **ppsPMR);
PVRSRV_ERROR DevmemIntFreeHostMemKM(PMR *psPMR);
IMG_EXPORT
-PVRSRV_ERROR PowerTestIoctlKM(IMG_UINT32 uiCmd,
- IMG_UINT32 uiIn1,
- IMG_UINT32 uiIn2,
- IMG_UINT32 *puiOut1,
- IMG_UINT32 *puiOut2);
+PVRSRV_ERROR PowerTestIoctlKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiCmd,
+ IMG_UINT32 uiIn1,
+ IMG_UINT32 uiIn2,
+ IMG_UINT32 *puiOut1,
+ IMG_UINT32 *puiOut2);
PVRSRV_ERROR TestIOCTLSyncFbFenceSignalPVR(CONNECTION_DATA * psConnection,
PVRSRV_DEVICE_NODE *psDevNode,
typedef enum _VMM_CONF_PARAM_
{
- VMM_CONF_PRIO_OSID0 = 0,
- VMM_CONF_PRIO_OSID1 = 1,
- VMM_CONF_PRIO_OSID2 = 2,
- VMM_CONF_PRIO_OSID3 = 3,
- VMM_CONF_PRIO_OSID4 = 4,
- VMM_CONF_PRIO_OSID5 = 5,
- VMM_CONF_PRIO_OSID6 = 6,
- VMM_CONF_PRIO_OSID7 = 7,
+ VMM_CONF_PRIO_DRV0 = 0,
+ VMM_CONF_PRIO_DRV1 = 1,
+ VMM_CONF_PRIO_DRV2 = 2,
+ VMM_CONF_PRIO_DRV3 = 3,
+ VMM_CONF_PRIO_DRV4 = 4,
+ VMM_CONF_PRIO_DRV5 = 5,
+ VMM_CONF_PRIO_DRV6 = 6,
+ VMM_CONF_PRIO_DRV7 = 7,
VMM_CONF_HCS_DEADLINE = 8
} VMM_CONF_PARAM;
by the VM manager before forwarding request to host.
If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED.
*/
- PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32FuncID,
- IMG_UINT32 ui32DevID,
- IMG_UINT64 ui64Size,
+ PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT64 ui64Size,
IMG_UINT64 ui64PAddr);
- PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32FuncID,
- IMG_UINT32 ui32DevID);
+ PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(void);
} sClientFuncTab;
struct {
/*
Corresponding server side entries to handle guest PVZ calls
NOTE:
- - Additional PVZ function ui32OSID parameter
- - OSID determination is responsibility of VM manager
- - Actual OSID value must be supplied by VM manager
+ - Additional PVZ function ui32DriverID parameter
+ - Driver ID determination is responsibility of VM manager
+ - Actual Driver ID value must be supplied by VM manager
- This can be done either in client/VMM/host side
- Must be done before host pvz function(s) are called
- - Host pvz function validates incoming OSID values
+ - Host pvz function validates incoming Driver ID values
*/
- PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32OSID,
- IMG_UINT32 ui32FuncID,
+ PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32DriverID,
IMG_UINT32 ui32DevID,
IMG_UINT64 ui64Size,
IMG_UINT64 ui64PAddr);
- PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32OSID,
- IMG_UINT32 ui32FuncID,
+ PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32DriverID,
IMG_UINT32 ui32DevID);
} sServerFuncTab;
information to the host; these events may in turn be forwarded to
the firmware
*/
- PVRSRV_ERROR (*pfnOnVmOnline)(IMG_UINT32 ui32OSID);
+ PVRSRV_ERROR (*pfnOnVmOnline)(IMG_UINT32 ui32DriverID,
+ IMG_UINT32 ui32DevID);
- PVRSRV_ERROR (*pfnOnVmOffline)(IMG_UINT32 ui32OSID);
+ PVRSRV_ERROR (*pfnOnVmOffline)(IMG_UINT32 ui32DriverID,
+ IMG_UINT32 ui32DevID);
- PVRSRV_ERROR (*pfnVMMConfigure)(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue);
+ PVRSRV_ERROR (*pfnVMMConfigure)(VMM_CONF_PARAM eVMMParamType,
+ IMG_UINT32 ui32ParamValue,
+ IMG_UINT32 ui32DevID);
} sVmmFuncTab;
} VMM_PVZ_CONNECTION;
#include "pvrsrv.h"
#include "img_types.h"
#include "pvrsrv_error.h"
-#include "vmm_pvz_common.h"
#include "vmm_impl.h"
/*!
#include "vmm_impl.h"
#include "img_types.h"
#include "pvrsrv_error.h"
-#include "vmm_pvz_common.h"
/*!
*******************************************************************************
@Return PVRSRV_OK on success. Otherwise, a PVRSRV error code
******************************************************************************/
PVRSRV_ERROR
-PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID,
- IMG_UINT32 ui32FuncID,
+PvzServerMapDevPhysHeap(IMG_UINT32 ui32DriverID,
IMG_UINT32 ui32DevID,
IMG_UINT64 ui64Size,
IMG_UINT64 ui64PAddr);
@Return PVRSRV_OK on success. Otherwise, a PVRSRV error code
******************************************************************************/
PVRSRV_ERROR
-PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID,
- IMG_UINT32 ui32FuncID,
+PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32DriverID,
IMG_UINT32 ui32DevID);
/*!
@Return PVRSRV_OK on success. Otherwise, a PVRSRV error code
******************************************************************************/
PVRSRV_ERROR
-PvzServerOnVmOnline(IMG_UINT32 ui32OSID);
+PvzServerOnVmOnline(IMG_UINT32 ui32DriverID,
+ IMG_UINT32 ui32DevID);
/*!
*******************************************************************************
the GPU and it is safe to remove the memory for such VM.
@Return PVRSRV_OK on success. PVRSRV_ERROR_TIMEOUT if for some reason
the FW is taking too long to clean-up the resources of the
- OSID. Otherwise, a PVRSRV_ERROR code.
+ DriverID. Otherwise, a PVRSRV_ERROR code.
******************************************************************************/
PVRSRV_ERROR
-PvzServerOnVmOffline(IMG_UINT32 ui32OSID);
+PvzServerOnVmOffline(IMG_UINT32 ui32DriverID,
+ IMG_UINT32 ui32DevID);
/*!
*******************************************************************************
******************************************************************************/
PVRSRV_ERROR
PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType,
- IMG_UINT32 ui32ParamValue);
+ IMG_UINT32 ui32ParamValue,
+ IMG_UINT32 ui32DevID);
#endif /* VMM_PVZ_SERVER_H */
#include "vmm_impl.h"
-bool IsVmOnline(IMG_UINT32 ui32OSID);
+bool IsVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID);
-PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSid);
+PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID);
-PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32OSid);
+PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32DriverID, IMG_UINT32 ui32DevID);
-PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue);
+PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType,
+ IMG_UINT32 ui32ParamValue,
+ IMG_UINT32 ui32DevID);
#endif /* VZ_VM_H */
be a hyper-call or cross-VM call
@Return PVRSRV_OK on success. Otherwise, a PVRSRV error code
******************************************************************************/
-PVRSRV_ERROR PvzConnectionInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+PVRSRV_ERROR PvzConnectionInit(void);
void PvzConnectionDeInit(void);
/*!
#include "client_mm_bridge.h"
#include "client_cache_bridge.h"
#include "services_km.h"
-#include "pvrsrv_memallocflags_internal.h"
#if defined(PDUMP)
#if defined(__KERNEL__)
IMG_DEV_PHYADDR sDevAddr;
IMG_BOOL bValid;
- PHYS_HEAP *psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN];
+ PHYS_HEAP *psPhysHeap = psDevNode->apsPhysHeap[FIRST_PHYSHEAP_MAPPED_TO_FW_MAIN_DEVMEM];
IMG_DEV_PHYADDR sHeapAddr;
eError = PhysHeapGetDevPAddr(psPhysHeap, &sHeapAddr);
AllocateDeviceMemory(SHARED_DEV_CONNECTION hDevConnection,
IMG_UINT32 uiLog2Quantum,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
uiOutFlags = uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK;
eError = BridgePhysmemNewRamBackedPMR(GetBridgeHandle(hDevConnection),
uiSize,
- uiChunkSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
pui32MappingTable,
SubAllocImportAlloc(RA_PERARENA_HANDLE hArena,
RA_LENGTH_T uiSize,
RA_FLAGS_T _flags,
+ RA_LENGTH_T uBaseAlignment,
const IMG_CHAR *pszAnnotation,
/* returned data */
RA_BASE_T *puiBase,
PMR (whether or not such PMR is backed by physical memory) */
DEVMEM_HEAP *psHeap;
DEVMEM_IMPORT *psImport;
- IMG_DEVMEM_ALIGN_T uiAlign;
PVRSRV_ERROR eError;
IMG_UINT32 ui32MappingTable = 0;
PVRSRV_MEMALLOCFLAGS_T uiFlags = (PVRSRV_MEMALLOCFLAGS_T) _flags;
/* Per-arena private handle is, for us, the heap */
psHeap = hArena;
- /* align to the l.s.b. of the size... e.g. 96kiB aligned to
- 32kiB. NB: There is an argument to say that the RA should never
- ask us for Non-power-of-2 size anyway, but I don't want to make
- that restriction arbitrarily now */
- uiAlign = uiSize & ~(uiSize-1);
-
- /* Technically this is only required for guest drivers due to
- fw heaps being pre-allocated and pre-mapped resulting in
- a 1:1 (i.e. virtual : physical) offset correlation but we
- force this behaviour for all drivers to maintain consistency
- (i.e. heap->VA uiAlign <= heap->PA uiLog2Quantum) */
- if (uiAlign > (IMG_DEVMEM_ALIGN_T)(1ULL << psHeap->uiLog2Quantum))
- {
- uiAlign = (IMG_DEVMEM_ALIGN_T)(1ULL << psHeap->uiLog2Quantum);
- }
-
/* The RA should not have invoked us with a size that is not a
multiple of the quantum anyway */
PVR_ASSERT((uiSize & ((1ULL<<psHeap->uiLog2Quantum)-1)) == 0);
eError = AllocateDeviceMemory(psHeap->psCtx->hDevConnection,
psHeap->uiLog2Quantum,
uiSize,
- uiSize,
1,
1,
&ui32MappingTable,
- uiAlign,
+ uBaseAlignment,
uiFlags,
IMG_FALSE,
"PMR sub-allocated",
uiLog2ImportAlignment,
aszHeapName,
uiHeapBlueprintID,
+ uiHeapIndex,
&ppsHeapArray[uiHeapIndex]);
PVR_GOTO_IF_ERROR(eError, e1);
context (specifically, for handling mapping to device MMU) */
IMG_HANDLE hDevMemServerContext;
IMG_HANDLE hPrivData;
- IMG_BOOL bHeapCfgMetaId = (uiHeapBlueprintID == DEVMEM_HEAPCFG_META);
+ IMG_BOOL bHeapCfgFWId = (uiHeapBlueprintID == DEVMEM_HEAPCFG_FORFW);
PVR_GOTO_IF_NOMEM(ppsCtxPtr, eError, e0);
/* Create (server-side) Device Memory context */
eError = BridgeDevmemIntCtxCreate(GetBridgeHandle(psCtx->hDevConnection),
- bHeapCfgMetaId,
+ bHeapCfgFWId,
&hDevMemServerContext,
&hPrivData,
&psCtx->ui32CPUCacheLineSize);
IMG_UINT32 ui32Log2ImportAlignment,
const IMG_CHAR *pszName,
DEVMEM_HEAPCFGID uiHeapBlueprintID,
+ IMG_UINT32 uiHeapIndex,
DEVMEM_HEAP **ppsHeapPtr)
{
PVRSRV_ERROR eError = PVRSRV_OK;
IMG_CHAR *pszStr;
IMG_UINT32 ui32pszStrSize;
- if (ppsHeapPtr == NULL ||
- uiReservedRegionLength % DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY)
- {
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0);
- }
+ PVR_LOG_RETURN_IF_INVALID_PARAM(ppsHeapPtr, "ppsHeapPtr");
+
+ /* Reserved VA space of a heap must always be multiple of DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY.
+ * Granularity has been chosen to support the max possible practically used OS page size. */
+ PVR_LOG_RETURN_IF_INVALID_PARAM((uiReservedRegionLength % DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY) == 0, "uiReservedRegionLength");
ui32PolicyVMRA = RA_POLICY_DEFAULT;
psHeap->pszSubAllocRAName = pszStr;
#if defined(__KERNEL__)
- if (uiHeapBlueprintID == DEVMEM_HEAPCFG_META)
+ if (uiHeapBlueprintID == DEVMEM_HEAPCFG_FORFW)
{
void *pvAppHintState = NULL;
- IMG_UINT32 ui32FirmwarePolicydefault = 0, ui32FirmwarePolicy=0;
+ IMG_UINT32 ui32FirmwarePolicydefault = PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY;
+ IMG_UINT32 ui32FirmwarePolicy = PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY;
OSCreateKMAppHintState(&pvAppHintState);
OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DevMemFWHeapPolicy,
&ui32FirmwarePolicydefault, &ui32FirmwarePolicy);
ui32PolicyVMRA = ui32Policy = ui32FirmwarePolicy;
OSFreeKMAppHintState(pvAppHintState);
+
+ /* Flag the change from default setting */
+ if (ui32FirmwarePolicy != ui32FirmwarePolicydefault)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: %s: DevMemFWHeapPolicy set to %u, default %u",
+ __func__, pszStr,
+ ui32FirmwarePolicy, ui32FirmwarePolicydefault));
+ }
}
#endif
* ensuring the MALLOC is present for every allocation made within the
* pdump capture range
*/
- if (uiHeapBlueprintID == DEVMEM_HEAPCFG_META)
+ if (uiHeapBlueprintID == DEVMEM_HEAPCFG_FORFW)
{
ui32Policy |= RA_POLICY_NO_SPLIT;
}
/* Create server-side counterpart of Device Memory heap */
eError = BridgeDevmemIntHeapCreate(GetBridgeHandle(psCtx->hDevConnection),
psCtx->hDevMemServerContext,
+ uiHeapBlueprintID,
+ uiHeapIndex,
sBaseAddress,
- uiLength,
ui32Log2Quantum,
&hDevMemServerHeap);
PVR_GOTO_IF_ERROR(eError, e6);
{
uiAlign = 1ULL << uiLog2Quantum;
}
- uiSize = (uiSize + uiAlign - 1) & ~(uiAlign - 1);
+ uiSize = PVR_ALIGN(uiSize, uiAlign);
*puiSize = uiSize;
*puiAlign = uiAlign;
IMG_UINT32 ui32CacheLineSize = 0;
DEVMEM_PROPERTIES_T uiProperties;
- if (uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+/* On nohw PDump builds, we try to minimise the amount
+ * of uninitialised data in captures.
+ */
+#if defined(PDUMP) && defined(NO_HARDWARE)
+ if (PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) && !PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags))
+ {
+ uiFlags |= PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC;
+ bPoisonOnAlloc = IMG_TRUE;
+ }
+#endif
+
+ if (uiFlags & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC)
{
/* Deferred Allocation not supported on SubAllocs*/
PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failParams);
DEVMEM_IMPORT *psImport;
IMG_UINT32 ui32MappingTable = 0;
+/* On nohw PDump builds, we try to minimise the amount
+ * of uninitialised data in captures.
+ */
+#if defined(PDUMP) && defined(NO_HARDWARE)
+ if (PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) && !PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags))
+ {
+ uiFlags |= PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC;
+ }
+#endif
+
eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize,
&uiSize,
&uiAlign);
eError = AllocateDeviceMemory(hDevConnection,
uiLog2HeapPageSize,
uiSize,
- uiSize,
1,
1,
&ui32MappingTable,
IMG_INTERNAL PVRSRV_ERROR
DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
DEVMEM_MEMDESC *psMemDesc = NULL;
DEVMEM_IMPORT *psImport;
+/* On nohw PDump builds, we try to minimise the amount
+ * of uninitialised data in captures.
+ */
+#if defined(PDUMP) && defined(NO_HARDWARE)
+ if (PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) && !PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags))
+ {
+ uiFlags |= PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC;
+ }
+#endif
+
eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize,
&uiSize,
&uiAlign);
eError = AllocateDeviceMemory(hDevConnection,
uiLog2HeapPageSize,
uiSize,
- uiChunkSize,
ui32NumPhysChunks,
ui32NumVirtChunks,
pui32MappingTable,
/*****************************************************************************
* Common MemDesc functions *
*****************************************************************************/
-IMG_INTERNAL PVRSRV_ERROR
-DevmemUnpin(DEVMEM_MEMDESC *psMemDesc)
-{
- PVRSRV_ERROR eError = PVRSRV_OK;
- DEVMEM_IMPORT *psImport = psMemDesc->psImport;
- DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psImport);
-
- if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE)
- {
- eError = PVRSRV_ERROR_INVALID_REQUEST;
- PVR_DPF((PVR_DBG_ERROR,
- "%s: The passed allocation is not valid to unpin",
- __func__));
-
- goto e_exit;
- }
-
- /* Stop if the allocation might have suballocations. */
- if (!(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE))
- {
- eError = PVRSRV_ERROR_INVALID_PARAMS;
- PVR_DPF((PVR_DBG_ERROR,
- "%s: The passed allocation is not valid to unpin because "
- "there might be suballocations on it. Make sure you allocate a page multiple "
- "of the heap when using PVRSRVAllocDeviceMem()",
- __func__));
-
- goto e_exit;
- }
-
- /* Stop if the Import is still mapped to CPU */
- if (psImport->sCPUImport.ui32RefCount)
- {
- eError = PVRSRV_ERROR_STILL_MAPPED;
- PVR_DPF((PVR_DBG_ERROR,
- "%s: There are still %u references on the CPU mapping. "
- "Please remove all CPU mappings before unpinning.",
- __func__,
- psImport->sCPUImport.ui32RefCount));
-
- goto e_exit;
- }
-
- /* Only unpin if it is not already unpinned
- * Return PVRSRV_OK */
- if (uiProperties & DEVMEM_PROPERTIES_UNPINNED)
- {
- goto e_exit;
- }
-
- /* Unpin it and invalidate mapping */
- if (psImport->sDeviceImport.bMapped)
- {
- eError = BridgeDevmemIntUnpinInvalidate(GetBridgeHandle(psImport->hDevConnection),
- psImport->sDeviceImport.hMapping,
- psImport->hPMR);
- }
- else
- {
- /* Or just unpin it */
- eError = BridgeDevmemIntUnpin(GetBridgeHandle(psImport->hDevConnection),
- psImport->hPMR);
- }
-
- /* Update flags and RI when call was successful */
- if (eError == PVRSRV_OK)
- {
- OSLockAcquire(psImport->hLock);
- psImport->uiProperties |= DEVMEM_PROPERTIES_UNPINNED;
- OSLockRelease(psImport->hLock);
- }
- else
- {
- /* Or just show what went wrong */
- PVR_DPF((PVR_DBG_ERROR, "%s: Unpin aborted because of error %d",
- __func__,
- eError));
- }
-
-e_exit:
- return eError;
-}
-
-
-IMG_INTERNAL PVRSRV_ERROR
-DevmemPin(DEVMEM_MEMDESC *psMemDesc)
-{
- PVRSRV_ERROR eError = PVRSRV_OK;
- DEVMEM_IMPORT *psImport = psMemDesc->psImport;
- DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psImport);
-
- if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE)
- {
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_REQUEST, e_exit);
- }
-
- /* Only pin if it is unpinned */
- if ((uiProperties & DEVMEM_PROPERTIES_UNPINNED) == 0)
- {
- goto e_exit;
- }
-
- /* Pin it and make mapping valid */
- if (psImport->sDeviceImport.bMapped)
- {
- eError = BridgeDevmemIntPinValidate(GetBridgeHandle(psImport->hDevConnection),
- psImport->sDeviceImport.hMapping,
- psImport->hPMR);
- }
- else
- {
- /* Or just pin it */
- eError = BridgeDevmemIntPin(GetBridgeHandle(psImport->hDevConnection),
- psImport->hPMR);
- }
-
- if ((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_PMR_NEW_MEMORY))
- {
- OSLockAcquire(psImport->hLock);
- psImport->uiProperties &= ~DEVMEM_PROPERTIES_UNPINNED;
- OSLockRelease(psImport->hLock);
- }
- else
- {
- /* Or just show what went wrong */
- PVR_DPF((PVR_DBG_ERROR, "%s: Pin aborted because of error %d",
- __func__,
- eError));
- }
-
-e_exit:
- return eError;
-}
-
IMG_INTERNAL PVRSRV_ERROR
DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, IMG_DEVMEM_SIZE_T* puiSize)
IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS;
DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport);
- /* Do not try to map unpinned memory */
- if (uiProperties & DEVMEM_PROPERTIES_UNPINNED)
- {
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failFlags);
- }
-
OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams);
}
/* Don't map memory for deferred allocations */
- if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+ if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC)
{
PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE);
bMap = IMG_FALSE;
OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
}
PVR_ASSERT(eError != PVRSRV_OK);
-failFlags:
+
return eError;
}
IMG_BOOL bDestroyed = IMG_FALSE;
DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport);
- /* Do not try to map unpinned memory */
- if (uiProperties & DEVMEM_PROPERTIES_UNPINNED)
- {
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failFlags);
- }
-
OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams);
}
/* Don't map memory for deferred allocations */
- if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+ if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC)
{
PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE);
bMap = IMG_FALSE;
OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
}
PVR_ASSERT(eError != PVRSRV_OK);
-failFlags:
+
return eError;
}
{
PVRSRV_ERROR eError;
- /* Do not try to map unpinned memory */
- if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_UNPINNED)
- {
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failCheck);
- }
-
OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
__func__,
failRelease:
OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
PVR_ASSERT(eError != PVRSRV_OK);
-failCheck:
+
return eError;
}
psContext->hDevMemServerContext,
psFaultAddress);
}
-IMG_INTERNAL PVRSRV_ERROR
-DevmemFlushDeviceSLCRange(DEVMEM_MEMDESC *psMemDesc,
- IMG_DEV_VIRTADDR sDevVAddr,
- IMG_DEVMEM_SIZE_T uiSize,
- IMG_BOOL bInvalidate)
-{
- DEVMEM_IMPORT *psImport = psMemDesc->psImport;
- return BridgeDevmemFlushDevSLCRange(GetBridgeHandle(psImport->hDevConnection),
- psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext,
- sDevVAddr,
- uiSize,
- bInvalidate);
-}
#if defined(RGX_FEATURE_FBCDC)
IMG_INTERNAL PVRSRV_ERROR
@Input psContext Memory context the process that would like to
be notified about.
-@Input ui32PID The PID of the calling process.
@Input bRegister If true, register. If false, de-register.
@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
error code
*/ /***************************************************************************/
IMG_INTERNAL PVRSRV_ERROR
RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext,
- IMG_UINT32 ui32PID,
IMG_BOOL bRegister)
{
PVRSRV_ERROR eError;
eError = BridgeDevmemIntRegisterPFNotifyKM(GetBridgeHandle(psContext->hDevConnection),
psContext->hDevMemServerContext,
- ui32PID,
bRegister);
if (eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED)
{
if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL)
{
PVRSRV_ERROR eErr;
- eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection),
- PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT,
+ eErr = BridgePVRSRVStatsUpdateOOMStat(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+ PVRSRV_DEVICE_STAT_TYPE_OOM_VIRTMEM_COUNT,
OSGetCurrentProcessID());
- PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats");
+ PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVStatsUpdateOOMStat");
}
#endif
goto failSVM;
return PVRSRV_ERROR_INVALID_PARAMS;
}
+ if ((*puiFlags & PVRSRV_MEMALLOCFLAG_DEFER_PHYS_ALLOC) &&
+ (*puiFlags & PVRSRV_MEMALLOCFLAG_PHYS_ALLOC_NOW))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Defer Alloc and Alloc Now are mutually exclusive.",
+ __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
if (uiAlign & (uiAlign-1))
{
PVR_DPF((PVR_DBG_ERROR,
(eError == PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL))
{
PVRSRV_ERROR eErr;
- eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection),
- PVRSRV_PROCESS_STAT_TYPE_INVALID_VIRTMEM,
+ eErr = BridgePVRSRVStatsUpdateOOMStat(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+ PVRSRV_DEVICE_STAT_TYPE_INVALID_VIRTMEM,
OSGetCurrentProcessID());
- PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats");
+ PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVStatsUpdateOOMStat");
}
#endif
return eError;
/* Round the provided import alignment to the configured heap alignment */
uiAlign = 1ULL << psHeap->uiLog2ImportAlignment;
- uiAlign = (psImport->uiAlign + uiAlign - 1) & ~(uiAlign-1);
+ uiAlign = PVR_ALIGN(psImport->uiAlign, uiAlign);
psDeviceImport = &psImport->sDeviceImport;
if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL)
{
PVRSRV_ERROR eErr;
- eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection),
- PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT,
+ eErr = BridgePVRSRVStatsUpdateOOMStat(GetBridgeHandle(psHeap->psCtx->hDevConnection),
+ PVRSRV_DEVICE_STAT_TYPE_OOM_VIRTMEM_COUNT,
OSGetCurrentProcessID());
- PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats");
+ PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVStatsUpdateOOMStat");
}
#endif
PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM, failVMRAAlloc);
if (psHeap->bPremapped)
{
- /* no virtual address reservation and mapping are required for memory that's already mapped */
+ /* No virtual address reservation and mapping are required for
+ * memory that is already pre-mapped e.g. FW heaps in VZ configs */
psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON;
psDeviceImport->hMapping = LACK_OF_MAPPING_POISON;
}
PVR_ASSERT(uiOffset + uiSize <= (psMemDescPhys->uiNumPages <<
psMemDescPhys->uiLog2PageSize));
- eError = BridgePMRPDumpLoadMem(psMemDescPhys->hBridge,
+ eError = BridgePMRPDumpLoadMem(GetBridgeHandle(psMemDescPhys->hConnection),
psMemDescPhys->hPMR,
uiOffset,
uiSize,
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/ /**************************************************************************/
-#if defined(__linux__)
+#if defined(__linux__) && defined(__KERNEL__)
#include <linux/version.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
);
}
-
-/*************************************************************************/ /*!
-*/ /**************************************************************************/
-static PVRSRV_ERROR
-_HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStampus,
- HTB_LOG_SFids SF, va_list args)
-{
-#if defined(__KERNEL__)
- IMG_UINT32 i;
- IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF);
-#if defined(__KLOCWORK__)
- IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS + 1]; // Prevent KW False-positive
-#else
- IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS];
-#endif
-
- PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS);
- ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS) ?
- HTB_LOG_MAX_PARAMS : ui32NumArgs;
-
- /* unpack var args before sending over bridge */
- for (i=0; i<ui32NumArgs; i++)
- {
- aui32Args[i] = va_arg(args, IMG_UINT32);
- }
-
- return BridgeHTBLog(hSrvHandle, PID, TID, ui64TimeStampus, SF,
- ui32NumArgs, aui32Args);
-#else
- PVR_UNREFERENCED_PARAMETER(hSrvHandle);
- PVR_UNREFERENCED_PARAMETER(PID);
- PVR_UNREFERENCED_PARAMETER(TID);
- PVR_UNREFERENCED_PARAMETER(ui64TimeStampus);
- PVR_UNREFERENCED_PARAMETER(SF);
- PVR_UNREFERENCED_PARAMETER(args);
-
- PVR_ASSERT(0=="HTB Logging in UM is not yet supported");
- return PVRSRV_ERROR_NOT_SUPPORTED;
-#endif
-}
-
-
-/*************************************************************************/ /*!
- @Function HTBLog
- @Description Record a Host Trace Buffer log event
- @Input PID The PID of the process the event is associated
- with. This is provided as an argument rather
- than querying internally so that events
- associated with a particular process, but
- performed by another can be logged correctly.
- @Input ui64TimeStampus The timestamp to be associated with this
- log event
- @Input SF The log event ID
- @Input ... Log parameters
- @Return PVRSRV_OK Success.
-*/ /**************************************************************************/
-IMG_INTERNAL PVRSRV_ERROR
-HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStampns,
- IMG_UINT32 SF, ...)
-{
- PVRSRV_ERROR eError;
- va_list args;
- va_start(args, SF);
- eError =_HTBLog(hSrvHandle, PID, TID, ui64TimeStampns, SF, args);
- va_end(args);
- return eError;
-}
-
-
-/*************************************************************************/ /*!
- @Function HTBLogSimple
- @Description Record a Host Trace Buffer log event with implicit PID and
- Timestamp
- @Input SF The log event ID
- @Input ... Log parameters
- @Return PVRSRV_OK Success.
-*/ /**************************************************************************/
-IMG_INTERNAL PVRSRV_ERROR
-HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...)
-{
- PVRSRV_ERROR eError;
- IMG_UINT64 ui64Timestamp;
- va_list args;
- va_start(args, SF);
- OSClockMonotonicns64(&ui64Timestamp);
- eError = _HTBLog(hSrvHandle, OSGetCurrentProcessID(), OSGetCurrentThreadID(), ui64Timestamp,
- SF, args);
- va_end(args);
- return eError;
-}
-
/* EOF */
#if defined(__KERNEL__) && defined(__clang__)
#define DEVICE_MEMSETCPY_NON_VECTOR_KM
-#if !defined(BITS_PER_BYTE)
-#define BITS_PER_BYTE (8)
-#endif /* BITS_PER_BYTE */
-
/* Loading or storing 16 or 32 bytes is only supported on 64-bit machines. */
#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES > 8
typedef __uint128_t uint128_t;
#include "img_defs.h"
#include "pvr_debug.h"
#include "pvrsrv_error.h"
+#include "dllist.h"
#include "uniq_key_splay_tree.h"
#include "hash.h"
*/
#define MINIMUM_HASH_SIZE (64)
-
/* #define RA_VALIDATE */
#if defined(__KLOCWORK__)
/* arena name for diagnostics output */
IMG_CHAR name[RA_MAX_NAME_LENGTH];
- /* allocations within this arena are quantum sized */
+ /* Spans / Imports within this arena are at least quantum sized
+ * and are a multiple of the uQuantum. This also has the effect of
+ * aligning these Spans to the uQuantum.
+ */
RA_LENGTH_T uQuantum;
/* import interface, if provided */
POS_LOCK hLock;
/* Policies that govern the resource area */
- IMG_UINT32 ui32PolicyFlags;
+ RA_POLICY_T ui32PolicyFlags;
/* LockClass of this arena. This is used within lockdep to decide if a
* recursive call sequence with the same lock class is allowed or not.
IMG_BOOL bIncludeFreeSegments;
};
+static PVRSRV_ERROR _RA_FreeMultiUnlocked(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize);
+static PVRSRV_ERROR
+_RA_FreeMultiUnlockedSparse(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ RA_LENGTH_T uiChunkSize,
+ IMG_UINT32 *puiFreeIndices,
+ IMG_UINT32 *puiFreeCount);
+
/*************************************************************************/ /*!
@Function _RequestAllocFail
@Description Default callback allocator used if no callback is specified,
@Input _h - callback handle
@Input _uSize - requested allocation size
@Input _uflags - allocation flags
+@Input _uBaseAlignment - Alignment for the returned allocated base
@Input _pBase - receives allocated base
@Output _pActualSize - actual allocation size
@Input _pRef - user reference
_RequestAllocFail(RA_PERARENA_HANDLE _h,
RA_LENGTH_T _uSize,
RA_FLAGS_T _uFlags,
+ RA_LENGTH_T _uBaseAlignment,
const IMG_CHAR *_pszAnnotation,
RA_BASE_T *_pBase,
RA_LENGTH_T *_pActualSize,
PVR_UNREFERENCED_PARAMETER(_pActualSize);
PVR_UNREFERENCED_PARAMETER(_phPriv);
PVR_UNREFERENCED_PARAMETER(_uFlags);
+ PVR_UNREFERENCED_PARAMETER(_uBaseAlignment);
PVR_UNREFERENCED_PARAMETER(_pBase);
PVR_UNREFERENCED_PARAMETER(_pszAnnotation);
}
#endif
+static INLINE void _FreeTableLimitBoundsCheck(IMG_UINT32 *uiIndex)
+{
+ if (*uiIndex >= FREE_TABLE_LIMIT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Index exceeds FREE_TABLE_LIMIT (1TB), "
+ "Clamping Index to FREE_TABLE_LIMIT"));
+ *uiIndex = FREE_TABLE_LIMIT - 1;
+ }
+}
+
#if defined(RA_VALIDATE)
/*************************************************************************/ /*!
BT *pBTTemp = NULL;
uIndex = pvr_log2(pBT->uSize);
- PVR_ASSERT(uIndex < FREE_TABLE_LIMIT);
+ _FreeTableLimitBoundsCheck(&uIndex);
+
PVR_ASSERT(!_IsInFreeList(pArena, pBT));
pBT->type = btt_free;
/* the flags item in the splay tree must have been created before-hand by
_InsertResource */
PVR_ASSERT(pArena->per_flags_buckets != NULL);
- PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
/* Handle NULL values for RELEASE builds and/or disabled ASSERT DEBUG builds */
- if (unlikely((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->buckets == NULL)))
+ if (unlikely(pArena->per_flags_buckets == NULL))
{
return;
}
IMG_UINT32 uIndex;
uIndex = pvr_log2(pBT->uSize);
- PVR_ASSERT(uIndex < FREE_TABLE_LIMIT);
+ _FreeTableLimitBoundsCheck(&uIndex);
+
PVR_ASSERT(_IsInFreeList(pArena, pBT));
if (pBT->next_free != NULL)
/* the flags item in the splay tree must have already been created
(otherwise how could there be a segment with these flags */
PVR_ASSERT(pArena->per_flags_buckets != NULL);
- PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
/* Handle unlikely NULL values for RELEASE or ASSERT-disabled builds */
- if (unlikely((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->buckets == NULL)))
+ if (unlikely(pArena->per_flags_buckets == NULL))
{
pBT->type = btt_live;
return;
for (walker = first_elt; (walker != NULL) && (nb_max_try != 0); walker = walker->next_free)
{
const RA_BASE_T aligned_base = (uAlignment > 1) ?
- (walker->base + uAlignment - 1) & ~(uAlignment - 1)
+ PVR_ALIGN(walker->base, uAlignment)
: walker->base;
if (walker->base + walker->uSize >= aligned_base + uSize)
return NULL;
}
+/*************************************************************************/ /*!
+ * @Function _FreeMultiBaseArray
+ *
+ * @Description Given an array (Could be complete or partial reference)
+ * free the region given as the array and size. This function
+ * should be used only when it is known that multiple Real
+ * bases will be freed from the array.
+ *
+ * @Input pArena - The RA Arena to free the bases on.
+ * @Input aBaseArray - The Base array to free from
+ * @Input uiBaseArraySize - The Size of the base array to free.
+ *
+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code otherwise.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_FreeMultiBaseArray(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize)
+{
+ IMG_UINT32 i;
+ for (i = 0; i < uiBaseArraySize; i++)
+ {
+ if (RA_BASE_IS_REAL(aBaseArray[i]))
+ {
+ BT *pBT;
+ pBT = (BT *) HASH_Remove_Extended(pArena->pSegmentHash, &aBaseArray[i]);
+
+ if (pBT)
+ {
+ pArena->ui64FreeArenaSize += pBT->uSize;
+
+ PVR_ASSERT(pBT->base == aBaseArray[i]);
+ _FreeBT(pArena, pBT);
+ aBaseArray[i] = INVALID_BASE_ADDR;
+ }
+ else
+ {
+ /* Did we attempt to remove a ghost page?
+ * Essentially the base was marked real but was actually a ghost.
+ */
+ PVR_ASSERT(!"Attempt to free non-existing real base!");
+ return PVRSRV_ERROR_INVALID_REQUEST;
+ }
+ }
+#if defined(DEBUG)
+ else
+ {
+ aBaseArray[i] = INVALID_BASE_ADDR;
+ }
+#endif
+ }
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+ * @Function _FreeSingleBaseArray
+ *
+ * @Description Given an array (Could be complete or partial reference)
+ * free the region given as the array and size. This function
+ * should be used only when it is known that a single Real
+ * base will be freed from the array. All Bases will be
+ * sanitised after the real has been freed.
+ *
+ * @Input pArena - The RA Arena to free the bases on.
+ * @Input aBaseArray - The Base array to free from, entry 0 should be a
+ * Real base
+ * @Input uiBaseArraySize - The Size of the base array to free.
+ *
+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code otherwise.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_FreeSingleBaseArray(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize)
+{
+ BT *pBT;
+ PVR_ASSERT(RA_BASE_IS_REAL(aBaseArray[0]));
+
+ pBT = (BT *) HASH_Remove_Extended(pArena->pSegmentHash, &aBaseArray[0]);
+
+ if (pBT)
+ {
+ pArena->ui64FreeArenaSize += pBT->uSize;
+
+ PVR_ASSERT(pBT->base == aBaseArray[0]);
+ _FreeBT(pArena, pBT);
+ }
+ else
+ {
+ /* Did we attempt to remove a ghost page?
+ * Essentially the base was marked real but was actually ghost.
+ */
+ PVR_ASSERT(!"Attempt to free non-existing real base!");
+ return PVRSRV_ERROR_INVALID_REQUEST;
+ }
+
+ /* Set all entries to INVALID_BASE_ADDR */
+ OSCachedMemSet(aBaseArray, 0xFF, uiBaseArraySize * sizeof(RA_BASE_T));
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+ * @Function _GenerateGhostBases
+ *
+ * @Description Given an array (Could be complete or partial reference)
+ * generate Ghost bases for the allocation and size.
+ *
+ * @Input uiBase - The Real base to generate Ghost Bases from.
+ * @Input uiBaseSize - The size of the Real Base
+ * @Input uiChunkSize - The Base chunk size used to generate Ghost
+ * bases on specific boundaries.
+ * @Input aBaseArray - The array to add the Ghost bases to.
+ *
+ * @Return array index of element past last Ghost base of given array.
+*/ /**************************************************************************/
+static IMG_UINT32
+_GenerateGhostBases(RA_BASE_T uiRealBase,
+ RA_LENGTH_T uiBaseSize,
+ RA_LENGTH_T uiChunkSize,
+ RA_BASE_ARRAY_T aBaseArray)
+{
+ IMG_UINT32 ui32Index = 0;
+ RA_LENGTH_T uiRemaining = uiBaseSize - uiChunkSize;
+ RA_LENGTH_T uiCurrentBase = uiRealBase + uiChunkSize;
+ aBaseArray[ui32Index] = uiRealBase;
+
+ for (ui32Index = 1; uiRemaining != 0; ui32Index++)
+ {
+ aBaseArray[ui32Index] = RA_BASE_SET_GHOST_BIT(uiCurrentBase);
+ uiCurrentBase += uiChunkSize;
+ uiRemaining -= uiChunkSize;
+ }
+
+ return ui32Index;
+}
+
+/*************************************************************************/ /*!
+ * @Function _FindRealBaseFromGhost
+ *
+ * @Description Given an array and an index into that array for the Ghost Base
+ * find the Real Base hosting the Ghost base in the RA.
+ * @Input aBaseArray - The array the Ghost and Real base reside on.
+ * @Input ui32GhostBaseIndex - The index into the given array for the Ghost Base.
+ * @Output pRealBase - The Real Base hosting the Ghost base.
+ * @Output pui32RealBaseIndex - The index of the Real Base found in the array.
+ *
+ * @Return None.
+*/ /**************************************************************************/
+static void
+_FindRealBaseFromGhost(RA_BASE_ARRAY_T aBaseArray,
+ IMG_UINT32 ui32GhostBaseIndex,
+ RA_BASE_T *pRealBase,
+ IMG_UINT32 *pui32RealBaseIndex)
+{
+ IMG_UINT32 ui32Index = ui32GhostBaseIndex;
+
+ PVR_ASSERT(RA_BASE_IS_GHOST(aBaseArray[ui32GhostBaseIndex]));
+
+ while (ui32Index != 0 &&
+ RA_BASE_IS_GHOST(aBaseArray[ui32Index]))
+ {
+ ui32Index--;
+ }
+
+ *pRealBase = aBaseArray[ui32Index];
+ *pui32RealBaseIndex = ui32Index;
+}
+
+/*************************************************************************/ /*!
+ * @Function _ConvertGhostBaseToReal
+ *
+ * @Description Convert the given Ghost Base to a Real Base in the
+ * RA. This is mainly used in free paths so we can be
+ * agile with memory regions.
+ * @Input pArena - The RA Arena to convert the base on.
+ * @Input aBaseArray - The Base array to convert the base on.
+ * @Input uiRealBase - The Base hosting the Ghost base to convert.
+ * @Input ui32RealBaseArrayIndex - The index in the array of the Real Base.
+ * @Input ui32GhostBaseArrayIndex - The index in the array of the Ghost Base.
+ * @Input uiChunkSize - The chunk size used to generate the Ghost bases on.
+ *
+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_ConvertGhostBaseToReal(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_T uiRealBase,
+ IMG_UINT32 ui32RealBaseArrayIndex,
+ IMG_UINT32 ui32GhostBaseArrayIndex,
+ RA_LENGTH_T uiChunkSize)
+{
+ BT *pOrigRealBT;
+ BT *pNewRealBT;
+
+ pOrigRealBT = (BT *) HASH_Retrieve_Extended(pArena->pSegmentHash, &uiRealBase);
+ pNewRealBT = _SegmentSplit(pOrigRealBT,
+ uiChunkSize *
+ (ui32GhostBaseArrayIndex - ui32RealBaseArrayIndex));
+ PVR_LOG_RETURN_IF_FALSE(pNewRealBT != NULL,
+ "Unable to split BT, no memory available to allocate new BT",
+ PVRSRV_ERROR_OUT_OF_MEMORY);
+
+ if (!HASH_Insert_Extended(pArena->pSegmentHash, &pNewRealBT->base, (uintptr_t) pNewRealBT))
+ {
+ PVR_LOG_RETURN_IF_ERROR(PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE, "HASH_Insert_Extended");
+ }
+
+ aBaseArray[ui32GhostBaseArrayIndex] = pNewRealBT->base;
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+ * @Function _FreeGhostBasesFromReal
+ *
+ * @Description Given a ghost base and size, free the contiguous ghost bases from the
+ * real base. This has the effect of shrinking the size of the real base.
+ * If ghost pages remain after the free region, a new Real base will be
+ * created to host them.
+ * @Input pArena - The RA Arena to free the Ghost Bases from.
+ * @Input aBaseArray - The array to remove bases from
+ * @Input uiBaseArraySize - The size of the Base array to free from.
+ * @Input uiChunkSize - The chunk size used to generate the Ghost Bases.
+ * @Input ui32GhostBaseIndex - The index into the array of the initial Ghost base to free
+ * @Input ui32FreeCount - The number of Ghost bases to free from the Real base.
+ *
+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_FreeGhostBasesFromReal(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ RA_LENGTH_T uiChunkSize,
+ IMG_UINT32 ui32GhostBaseIndex,
+ IMG_UINT32 ui32FreeCount)
+{
+ PVRSRV_ERROR eError;
+ RA_BASE_T uiRealBase;
+ IMG_UINT32 ui32RealBaseIndex;
+ IMG_UINT32 ui32FreeEndIndex;
+
+ _FindRealBaseFromGhost(aBaseArray,
+ ui32GhostBaseIndex,
+ &uiRealBase,
+ &ui32RealBaseIndex);
+
+ /* Make the first Ghost Base to free, real. */
+ eError = _ConvertGhostBaseToReal(pArena,
+ aBaseArray,
+ uiRealBase,
+ ui32RealBaseIndex,
+ ui32GhostBaseIndex,
+ uiChunkSize);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal");
+
+ /* Calculate the Base after the last to free. */
+ ui32FreeEndIndex = ui32GhostBaseIndex + ui32FreeCount;
+
+ /*
+ * If the end of the free region is a Ghost base then we need to
+ * make it a real base so that we can free the intended middle region.
+ */
+ if (ui32FreeEndIndex != uiBaseArraySize &&
+ RA_BASE_IS_GHOST(aBaseArray[ui32FreeEndIndex]))
+ {
+ eError = _ConvertGhostBaseToReal(pArena,
+ aBaseArray,
+ aBaseArray[ui32GhostBaseIndex],
+ ui32GhostBaseIndex,
+ ui32FreeEndIndex,
+ uiChunkSize);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal");
+ }
+
+ /* Free the region calculated */
+ eError = _FreeSingleBaseArray(pArena,
+ &aBaseArray[ui32GhostBaseIndex],
+ ui32FreeCount);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal");
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+ * @Function _ConvertGhostBaseFreeReal
+ *
+ * @Description Used in the case that we want to keep some indices that are ghost pages
+ * but the indices to free start with the real base. In this case we can
+ * convert the keep point to a real base, then free the original real base
+ * along with all ghost bases prior to the new real.
+ *
+ * @Input pArena - The RA Arena to free the bases from.
+ * @Input aBaseArray - The Base array to free from.
+ * @Input uiChunkSize - The chunk size used to generate the Ghost bases.
+ * @Input uiGhostBaseIndex - The index into the array of the Ghost base to convert.
+ *
+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_ConvertGhostBaseFreeReal(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_LENGTH_T uiChunkSize,
+ IMG_UINT32 uiRealBaseIndex,
+ IMG_UINT32 uiGhostBaseIndex)
+{
+ PVRSRV_ERROR eError;
+ RA_BASE_T uiRealBase = aBaseArray[uiRealBaseIndex];
+
+ eError = _ConvertGhostBaseToReal(pArena,
+ aBaseArray,
+ uiRealBase,
+ uiRealBaseIndex,
+ uiGhostBaseIndex,
+ uiChunkSize);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal");
+
+ eError = _FreeSingleBaseArray(pArena,
+ &aBaseArray[uiRealBaseIndex],
+ uiGhostBaseIndex - uiRealBaseIndex);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArray");
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+ * @Function _FreeBaseArraySlice
+ *
+ * @Description Free Bases in an Array Slice.
+ * This function assumes that the slice is within a single Real base alloc.
+ * i.e the uiFreeStartIndex and uiFreeCount remain fully within a single real
+ * base alloc and do not cross into another Real base region.
+ *
+ * @Input pArena - The RA Arena to free bases from.
+ * @Input aBaseArray - The Base array to free from.
+ * @Input uiBaseArraySize - The size of the Base array to free from.
+ * @Input uiChunkSize - The base chunk size used to generate the Ghost bases.
+ * @Input uiFreeStartIndex - The index in the array to start freeing from
+ * @Input uiFreeCount - The number of bases to free.
+ *
+ * @Return PVRSRV_OK on Success, PVRSRV_ERROR code on Failure.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_FreeBaseArraySlice(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ RA_LENGTH_T uiChunkSize,
+ IMG_UINT32 uiFreeStartIndex,
+ IMG_UINT32 uiFreeCount)
+{
+ /*3 cases:
+ * Key: () = Region to Free
+ * [R] = Newly Real
+ * R = Real Base
+ * G = Ghost Base
+ * 1. We free the whole Realbase (inc all Ghost bases)
+ * e.g. (RGGGGG)
+ * e.g. RGGG(R)RGG
+ * 2 .We free the Real base but not all the Ghost bases meaning the first
+ * ghost base after the last freed will become a real base.
+ * e.g. RGGGG(RGGGG)[R]GGG
+ * e.g. (RGGGG)[R]GGGG
+ * 3. We free some ghost bases from the real base
+ * e.g. RGGG(GGG)
+ * e.g. RGGG(GGG)[R]GGG
+ *
+ * Invalid Scenarios:
+ * 1. RGG(GR)GGGRG
+ * 2. RGG(GRG)GGRG
+ * Higher levels should prevent these situations by ensuring that the free
+ * index and count always focus on a single real base.
+ * Scenario 1 & 2, correctly handled, would be a case 3. followed by a case 2.
+ */
+ PVRSRV_ERROR eError;
+
+ PVR_LOG_RETURN_IF_FALSE(uiBaseArraySize >= uiFreeStartIndex &&
+ uiBaseArraySize >= uiFreeStartIndex + (uiFreeCount - 1),
+ "Free Index given out of array bounds",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ /* Find which case we have */
+
+ /* Case 1 or 2 */
+ if (RA_BASE_IS_REAL(aBaseArray[uiFreeStartIndex]))
+ {
+ /* Case 1 */
+ if (uiFreeStartIndex + uiFreeCount == uiBaseArraySize ||
+ RA_BASE_IS_REAL(aBaseArray[uiFreeStartIndex + uiFreeCount]) ||
+ RA_BASE_IS_INVALID(aBaseArray[uiFreeStartIndex + uiFreeCount]))
+ {
+ eError = _FreeSingleBaseArray(pArena,
+ &aBaseArray[uiFreeStartIndex],
+ uiFreeCount);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArray");
+ }
+ /* Case 2*/
+ else
+ {
+ eError = _ConvertGhostBaseFreeReal(pArena,
+ aBaseArray,
+ uiChunkSize,
+ uiFreeStartIndex,
+ uiFreeStartIndex + uiFreeCount);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal");
+ }
+ }
+ /* Case 3 */
+ else if (RA_BASE_IS_GHOST(aBaseArray[uiFreeStartIndex]))
+ {
+ eError = _FreeGhostBasesFromReal(pArena,
+ aBaseArray,
+ uiBaseArraySize,
+ uiChunkSize,
+ uiFreeStartIndex,
+ uiFreeCount);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_FreeGhostBasesFromReal");
+ }
+ /* Attempt to free an invalid base, this could be a duplicated
+ * value in the free sparse index array */
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Attempt to free already free base Index %u", uiFreeStartIndex));
+ PVR_ASSERT(!"Attempted double free.")
+ return PVRSRV_ERROR_RA_FREE_INVALID_CHUNK;
+ }
+
+ return PVRSRV_OK;
+}
+
/*************************************************************************/ /*!
@Function _AllocAlignSplit
@Description Given a valid BT, trim the start and end of the BT according
{
RA_BASE_T aligned_base;
- aligned_base = (uAlignment > 1) ? (pBT->base + uAlignment - 1) & ~(uAlignment - 1) : pBT->base;
+ aligned_base = (uAlignment > 1) ? PVR_ALIGN(pBT->base, uAlignment) : pBT->base;
_FreeListRemove(pArena, pBT);
index_high = index_low;
}
- PVR_ASSERT(index_low < FREE_TABLE_LIMIT);
- PVR_ASSERT(index_high < FREE_TABLE_LIMIT);
+ _FreeTableLimitBoundsCheck(&index_high);
+ _FreeTableLimitBoundsCheck(&index_low);
+
PVR_ASSERT(index_low <= index_high);
if (unlikely((pArena->ui32PolicyFlags & RA_POLICY_BUCKET_MASK) == RA_POLICY_BUCKET_BEST_FIT))
return _AllocAlignSplit(pArena, pBT, uSize, uAlignment, base, phPriv);
}
+/*************************************************************************/ /*!
+@Function _AttemptAllocAlignedAssured
+@Description Attempt an allocation from an arena. If the arena allows
+ non-contiguous allocations, the allocation is guaranteed
+ given there is enough memory to satisfy the full allocation.
+@Input pArena The arena.
+@Input uSize The requested allocation size.
+@Input uLog2MinContigSize The Log2 minimum contiguity of the bases returned.
+@Input uFlags Allocation flags
+@Input uAlignment Required uAlignment, or 0.
+ Must be a power of 2 if not 0
+@Input aBaseArray Array to allocate bases to.
+@Input bSparseAlloc Is the allocation we are making sparse.
+@Output bPhysContig Is the allocation we made physically contiguous
+ or did we use the scoop logic
+@Return Success: PVRSRV_OK
+ Fail: PVRSRV_ERROR code.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_AttemptAllocAlignedAssured(RA_ARENA *pArena,
+ RA_LENGTH_T uSize,
+ IMG_UINT32 uLog2MinContigSize,
+ RA_FLAGS_T uFlags,
+ RA_LENGTH_T uAlignment,
+ RA_BASE_ARRAY_T aBaseArray,
+ IMG_BOOL bSparseAlloc,
+ IMG_BOOL *bPhysContig)
+{
+ IMG_UINT32 index_low; /* log2 Lowest contiguity required */
+ IMG_UINT32 index_high; /* log2 Size of full alloc */
+ IMG_UINT32 i;
+ struct _BT_ *pBT = NULL;
+ RA_PERISPAN_HANDLE phPriv;
+ RA_LENGTH_T uiRemaining = uSize;
+ RA_BASE_T uiBase;
+ IMG_UINT32 uiCurrentArrayIndex = 0;
+
+ PVR_ASSERT(pArena != NULL);
+
+ pArena->per_flags_buckets = PVRSRVSplay(uFlags, pArena->per_flags_buckets);
+ if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->uiFlags != uFlags))
+ {
+ /* no chunks with these flags. */
+ return PVRSRV_ERROR_RA_NO_RESOURCE_WITH_FLAGS;
+ }
+
+ if (pArena->ui64FreeArenaSize < uSize)
+ {
+ /* Not enough memory to accommodate kick back for a chance to import more */
+ return PVRSRV_ERROR_RA_OUT_OF_RESOURCE;
+ }
+
+ if (uLog2MinContigSize && uAlignment)
+ {
+ index_low = uLog2MinContigSize;
+ index_high = pvr_log2(uSize);
+ }
+ else if (uLog2MinContigSize)
+ {
+ index_low = uLog2MinContigSize;
+ index_high = pvr_log2(uSize);
+ }
+ else if (uAlignment)
+ {
+ index_low = 0;
+ index_high = pvr_log2(uSize + uAlignment - 1);
+ }
+ else
+ {
+ index_low = 0;
+ index_high = pvr_log2(uSize);
+ }
+
+ PVR_ASSERT(index_low < FREE_TABLE_LIMIT);
+ PVR_ASSERT(index_high < FREE_TABLE_LIMIT);
+ PVR_ASSERT(index_low <= index_high);
+
+ /* Start at index_high + 1 as then we can check all buckets larger than the desired alloc
+ * If we don't find one larger then we could still find one of requested size in index_high and
+ * shortcut the non-contiguous allocation path. We check index_high + 1 first as it is
+ * guaranteed to have a free region of the requested size if the bucket has entries. Whereas
+ * index_high is not guaranteed to have an allocation that meets the size requested due to it
+ * representing all free regions of size 2^bucket index to 2^bucket index +1. e.g we could have
+ * a request for 19*4k Pages which would be represented by bucket 16, bucket 16 represents free
+ * entries from 16*4k pages to 31*4k Pages in size, if this bucket only had free entries of
+ * 17*4k pages the search would fail, hence not guaranteed at index_high.
+ */
+#if defined(PVR_CTZLL)
+ i = PVR_CTZLL((~(((IMG_ELTS_MAPPINGS)1 << (index_high + 1)) - 1)) & pArena->per_flags_buckets->bHasEltsMapping);
+#else
+ for (i = index_high + 1; (i < FREE_TABLE_LIMIT) && (pArena->per_flags_buckets->buckets[i] == NULL); i++)
+ {
+ }
+#endif
+
+ PVR_ASSERT(i <= FREE_TABLE_LIMIT);
+
+ if (i != FREE_TABLE_LIMIT)
+ {
+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, 1);
+ }
+ else
+ {
+ /* In this case we have searched all buckets index_high + 1 to FREE_TABLE_LIMIT and not found an
+ * available bucket with the required allocation size.
+ * Because we haven't found an allocation of the requested size in index_high + 1 there is still a chance
+ * that we can find an allocation of correct size in index_high, when index_high references the bucket
+ * containing the largest free chunks in the RA Arena. i.e All buckets > index_high == NULL.
+ * We do a final search in that bucket here before we attempt to scoop memory or return NULL.
+ */
+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[index_high], uSize, uAlignment, 1);
+ }
+
+ /* We managed to find a contiguous allocation block of sufficient size */
+ if (pBT != NULL)
+ {
+ IMG_BOOL bResult;
+ bResult = _AllocAlignSplit(pArena, pBT, uSize, uAlignment, &uiBase, &phPriv);
+ if (bResult)
+ {
+ if (!bSparseAlloc)
+ {
+ aBaseArray[0] = uiBase;
+ }
+ else
+ {
+ _GenerateGhostBases(uiBase, uSize, 1ULL << uLog2MinContigSize, aBaseArray);
+ }
+ }
+ else
+ {
+ return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED;
+ }
+ *bPhysContig = IMG_TRUE;
+
+ return PVRSRV_OK;
+ }
+
+ /*
+ * If this arena doesn't have the non-contiguous allocation functionality enabled, then
+ * don't attempt to scoop for non physically contiguous allocations. Sparse allocations
+ * are still able to use the scoop functionality as they map in a chunk at a time in the
+ * worst case.
+ */
+ if (unlikely((pArena->ui32PolicyFlags & RA_POLICY_ALLOC_ALLOW_NONCONTIG_MASK) == 0) &&
+ !bSparseAlloc)
+ {
+ return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED;
+ }
+
+ /* Attempt to Scoop memory from non-contiguous blocks */
+ for (i = index_high; i >= index_low && uiRemaining != 0; i--)
+ {
+ /* While we have chunks of at least our contig size in the bucket to use */
+ for (
+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], 1ULL << uLog2MinContigSize, uAlignment,(unsigned int) ~0);
+ pBT != NULL && uiRemaining != 0;
+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], 1ULL << uLog2MinContigSize, uAlignment,(unsigned int) ~0))//~0 Try all elements in bucket
+ {
+ /* Grab largest chunk possible that is a multiple of our min contiguity size
+ * N.B: C always rounds towards 0 so this effectively floors for us */
+ IMG_BOOL bResult;
+ RA_BASE_T uiAlignedBase =
+ (uAlignment > 1) ? PVR_ALIGN(pBT->base, uAlignment) : pBT->base;
+ RA_LENGTH_T uiMaxSizeAvailable = (pBT->uSize - (uiAlignedBase - pBT->base));
+ RA_LENGTH_T uiMaxMultipleOfContig = (uiMaxSizeAvailable >> uLog2MinContigSize) << uLog2MinContigSize;
+
+ /*
+ * If the size of the BT is larger than the remaining memory to allocate
+ * then just allocate what we need. The rest will be trimmed and put back
+ * into the pool in _AllocAlignSplit
+ */
+ if (uiMaxMultipleOfContig > uiRemaining)
+ {
+ uiMaxMultipleOfContig = uiRemaining;
+ }
+
+ bResult = _AllocAlignSplit(pArena, pBT, uiMaxMultipleOfContig, uAlignment, &uiBase, &phPriv);
+ if (!bResult)
+ {
+ /* Something went wrong with splitting or adding to hash,
+ * We can try find another chunk, although this should
+ * never occur.
+ */
+ PVR_ASSERT(!"_AllocAlignSplit issue.");
+ continue;
+ }
+
+ uiRemaining -= uiMaxMultipleOfContig;
+
+ uiCurrentArrayIndex += _GenerateGhostBases(uiBase,
+ uiMaxMultipleOfContig,
+ 1ULL << uLog2MinContigSize,
+ &aBaseArray[uiCurrentArrayIndex]);
+ }
+ }
+
+ /* If we didn't manage to scoop enough memory then we need to unwind the allocations we just made */
+ if (uiRemaining != 0)
+ {
+ goto error_unwind;
+ }
+ *bPhysContig = IMG_FALSE;
+
+ return PVRSRV_OK;
+
+error_unwind:
+ _RA_FreeMultiUnlocked(pArena,
+ aBaseArray,
+ uiCurrentArrayIndex);
+ return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED;
+}
+
/*************************************************************************/ /*!
@Function _AttemptImportSpanAlloc
@Description Attempt to Import more memory and create a new span.
PVRSRV_ERROR eError;
*pImportSize = uRequestSize;
- /*
- Ensure that we allocate sufficient space to meet the uAlignment
- constraint
- */
- if (uAlignment > pArena->uQuantum)
- {
- *pImportSize += (uAlignment - pArena->uQuantum);
- }
/* apply over-allocation multiplier after all alignment adjustments */
*pImportSize *= uImportMultiplier;
/* ensure that we import according to the quanta of this arena */
- *pImportSize = (*pImportSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+ *pImportSize = PVR_ALIGN(*pImportSize, pArena->uQuantum);
eError = pArena->pImportAlloc(pArena->pImportHandle,
*pImportSize, uImportFlags,
+ uAlignment,
pszAnnotation,
pImportBase, pImportSize,
&hPriv);
PFN_RA_ALLOC imp_alloc,
PFN_RA_FREE imp_free,
RA_PERARENA_HANDLE arena_handle,
- IMG_UINT32 ui32PolicyFlags)
+ RA_POLICY_T ui32PolicyFlags)
{
RA_ARENA *pArena;
PVRSRV_ERROR eError;
RA_LOG2QUANTUM_T uLog2Quantum,
IMG_UINT64 ui64CpuBase,
IMG_UINT64 ui64SpanDevBase,
- IMG_UINT64 ui64SpanSize)
+ IMG_UINT64 ui64SpanSize,
+ RA_POLICY_T ui32PolicyFlags)
{
RA_ARENA *psRA;
IMG_BOOL bSuccess;
NULL, /* No Import */
NULL, /* No free import */
NULL, /* No import handle */
- RA_POLICY_DEFAULT); /* No restriction on import splitting */
+ ui32PolicyFlags); /* No restriction on import splitting */
PVR_LOG_GOTO_IF_FALSE(psRA != NULL, "RA_Create() failed", return_);
bSuccess = RA_Add(psRA, (RA_BASE_T) ui64SpanDevBase, (RA_LENGTH_T) ui64SpanSize, 0, NULL);
"base=0x%llx, size=0x%llx", __func__, pArena->name,
(unsigned long long)base, (unsigned long long)uSize));
- uSize = (uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+ uSize = PVR_ALIGN(uSize, pArena->uQuantum);
bt = _InsertResource(pArena, base, uSize, uFlags);
if (bt != NULL)
{
eError = _AttemptImportSpanAlloc(pArena,
uSize,
uImportMultiplier,
- uFlags,
+ uImportFlags,
uAlignment,
pszAnnotation,
&uImportBase,
return PVRSRV_OK;
}
+static PVRSRV_ERROR
+_RA_AllocMultiUnlocked(RA_ARENA *pArena,
+ RA_LENGTH_T uRequestSize,
+ IMG_UINT32 uiLog2ChunkSize,
+ IMG_UINT8 uImportMultiplier,
+ RA_FLAGS_T uImportFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ IMG_BOOL bSparseAlloc,
+ IMG_BOOL *bPhysContig)
+{
+ PVRSRV_ERROR eError;
+ RA_LENGTH_T uSize = uRequestSize;
+ RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK);
+
+ PVR_LOG_RETURN_IF_FALSE(pArena != NULL && uImportMultiplier != 0 && uSize != 0,
+ "One of the necessary parameters is 0",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ PVR_ASSERT((uRequestSize & ((1 << uiLog2ChunkSize) - 1)) == 0)
+ PVR_LOG_RETURN_IF_FALSE((uRequestSize & ((1 << uiLog2ChunkSize) - 1)) == 0,
+ "Require uiLog2ChunkSize pow 2 & multiple of uRequestSize",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ /* Enforce these constraints so we can use those bits to handle Ghost bases. */
+ PVR_LOG_RETURN_IF_FALSE(uiLog2ChunkSize >= RA_BASE_FLAGS_LOG2 &&
+ uiLog2ChunkSize <= RA_BASE_CHUNK_LOG2_MAX,
+ "Log2 chunk size must be 12-64",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ /* Ensure Base Array is large enough for intended allocation */
+ PVR_LOG_RETURN_IF_FALSE(uiBaseArraySize * (1 << uiLog2ChunkSize) >= uRequestSize,
+ "Not enough array space to store alloc bases",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ PVR_ASSERT(is_arena_valid(pArena));
+
+ /* Must be a power of 2 */
+ PVR_ASSERT((uAlignment & (uAlignment - 1)) == 0);
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: arena='%s', size=0x%llx(0x%llx), "
+ "log2ChunkSize=0x%llx", __func__, pArena->name,
+ (unsigned long long)uSize, (unsigned long long)uRequestSize,
+ (unsigned long long)uiLog2ChunkSize));
+
+ /* if allocation failed then we might have an import source which
+ can provide more resource, else we will have to fail the
+ allocation to the caller. */
+ eError = _AttemptAllocAlignedAssured(pArena,
+ uSize,
+ uiLog2ChunkSize,
+ uFlags,
+ 1ULL << uiLog2ChunkSize,
+ aBaseArray,
+ bSparseAlloc,
+ bPhysContig);
+ if (eError)
+ {
+ RA_BASE_T uImportBase;
+ RA_LENGTH_T uImportSize;
+ BT *pBT;
+
+ if (eError == PVRSRV_ERROR_RA_OUT_OF_RESOURCE)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"RA out of resource, attempt to import more if possible:"
+ " uSize:0x%llx"
+ " uFlags:0x%llx",
+ (unsigned long long) uSize,
+ (unsigned long long) uFlags));
+ }
+ else if (eError == PVRSRV_ERROR_RA_NO_RESOURCE_WITH_FLAGS)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"RA no resource for flags, attempt to import some if possible:"
+ " uSize:0x%llx"
+ " uFlags:0x%llx",
+ (unsigned long long) uSize,
+ (unsigned long long) uFlags));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"RA Failed to Allocate, could be fragmented, attempt to import"
+ " more resource if possible."));
+ }
+
+ eError = _AttemptImportSpanAlloc(pArena,
+ uSize,
+ uImportMultiplier,
+ uFlags,
+ 1ULL << uiLog2ChunkSize,
+ pszAnnotation,
+ &uImportBase,
+ &uImportSize,
+ &pBT);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ pArena->ui64FreeArenaSize += uImportSize;
+ pArena->ui64TotalArenaSize += uImportSize;
+
+ eError = _AttemptAllocAlignedAssured(pArena,
+ uSize,
+ uiLog2ChunkSize,
+ uFlags,
+ 1Ull << uiLog2ChunkSize,
+ aBaseArray,
+ bSparseAlloc,
+ bPhysContig);
+ if (eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: name='%s' second alloc failed!",
+ __func__, pArena->name));
+ /*
+ On failure of _AttemptAllocAligned() depending on the exact point
+ of failure, the imported segment may have been used and freed, or
+ left untouched. If the later, we need to return it.
+ */
+ _FreeBT(pArena, pBT);
+
+ return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED;
+ }
+#if defined(DEBUG)
+ /*
+ * This block of code checks to see if the extra memory we just imported was
+ * used for the second allocation. If we imported memory but did not use it,
+ * it indicates there is a bug in the allocation logic. We can still recover by
+ * freeing the imported span but we emit an error to signal that there is an
+ * issue.
+ * */
+ else
+ {
+ IMG_UINT32 i;
+ IMG_BOOL bBasesInNewSpan = IMG_FALSE;
+
+ for (i = 0; i < uiBaseArraySize; i++)
+ {
+ RA_BASE_T uiBase = RA_BASE_STRIP_GHOST_BIT(aBaseArray[i]);
+
+ /* If the base hasn't been allocated then skip it */
+ if (aBaseArray[i] == INVALID_BASE_ADDR)
+ {
+ continue;
+ }
+
+ if (uiBase >= uImportBase &&
+ uiBase <= uImportBase + uImportSize)
+ {
+ bBasesInNewSpan = IMG_TRUE;
+ }
+ }
+
+ if (!bBasesInNewSpan)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: name='%s' alloc did not occur in the imported span!",
+ __func__, pArena->name));
+ /*
+ Remove the imported span which should not be in use (if it is then
+ that is okay, but essentially no span should exist that is not used).
+ */
+ _FreeBT(pArena, pBT);
+
+ pArena->ui64FreeArenaSize -= uImportSize;
+ pArena->ui64TotalArenaSize -= uImportSize;
+ }
+ }
+#endif
+ }
+
+ PVR_ASSERT(is_arena_valid(pArena));
+
+ pArena->ui64FreeArenaSize -= uSize;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+RA_AllocMulti(RA_ARENA *pArena,
+ RA_LENGTH_T uRequestSize,
+ IMG_UINT32 uiLog2ChunkSize,
+ IMG_UINT8 uImportMultiplier,
+ RA_FLAGS_T uImportFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ IMG_BOOL *bPhysContig)
+{
+ PVRSRV_ERROR eError;
+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+ eError = _RA_AllocMultiUnlocked(pArena,
+ uRequestSize,
+ uiLog2ChunkSize,
+ uImportMultiplier,
+ uImportFlags,
+ pszAnnotation,
+ aBaseArray,
+ uiBaseArraySize,
+ IMG_FALSE, /* Sparse alloc */
+ bPhysContig);
+ OSLockRelease(pArena->hLock);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+RA_AllocMultiSparse(RA_ARENA *pArena,
+ IMG_UINT32 uiLog2ChunkSize,
+ IMG_UINT8 uImportMultiplier,
+ RA_FLAGS_T uImportFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ IMG_UINT32 *puiAllocIndices,
+ IMG_UINT32 uiAllocCount)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bPhysContig;
+
+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+
+ /*
+ * In this case the arguments given show the allocation is
+ * sparse but has no specific indices, this indicates
+ * we want to populate the full aBaseArray
+ */
+ if (puiAllocIndices == NULL)
+ {
+ RA_LENGTH_T uRequestSize = (RA_LENGTH_T) uiAllocCount << uiLog2ChunkSize;
+ eError = _RA_AllocMultiUnlocked(pArena,
+ uRequestSize,
+ uiLog2ChunkSize,
+ uImportMultiplier,
+ uImportFlags,
+ pszAnnotation,
+ aBaseArray,
+ uiBaseArraySize,
+ IMG_TRUE, /* Sparse alloc */
+ &bPhysContig);
+ PVR_LOG_IF_ERROR(eError, "RA_AllocMulti");
+ OSLockRelease(pArena->hLock);
+ return eError;
+ }
+
+ /*
+ * This case is optimised for single allocations as we can skip
+ * some of the iteration logic in the full allocation path.
+ */
+ if (uiAllocCount == 1)
+ {
+ eError = _RA_AllocMultiUnlocked(pArena,
+ 1ULL << uiLog2ChunkSize,
+ uiLog2ChunkSize,
+ uImportMultiplier,
+ uImportFlags,
+ pszAnnotation,
+ &aBaseArray[puiAllocIndices[0]],
+ 1,
+ IMG_TRUE, /* Sparse alloc */
+ &bPhysContig);
+ PVR_LOG_IF_ERROR(eError, "RA_AllocMulti");
+ OSLockRelease(pArena->hLock);
+ return eError;
+ }
+
+ /*
+ * By consolidating / grouping the indices given we can perform sparse allocations
+ * in blocks, this has the effect of reducing fragmentation and creating optimal free
+ * scenarios. Free can be performed in blocks rather than a chunk at a time, this reduces
+ * the amount of BT merging cycles we perform.
+ */
+ for (i = 0; i < uiAllocCount;)
+ {
+ IMG_UINT32 j;
+ IMG_UINT32 uiConsolidate = 1;
+
+ for (j = i;
+ j + 1 != uiAllocCount &&
+ puiAllocIndices[j + 1] == puiAllocIndices[j] + 1;
+ j++)
+ {
+ uiConsolidate++;
+ }
+
+ eError = _RA_AllocMultiUnlocked(pArena,
+ (IMG_UINT64) uiConsolidate << uiLog2ChunkSize,
+ uiLog2ChunkSize,
+ uImportMultiplier,
+ uImportFlags,
+ pszAnnotation,
+ &aBaseArray[puiAllocIndices[i]],
+ uiConsolidate,
+ IMG_TRUE, /* Sparse alloc */
+ &bPhysContig);
+ PVR_LOG_GOTO_IF_ERROR(eError, "RA_AllocMulti", unwind_alloc);
+ i += uiConsolidate;
+ }
+
+ OSLockRelease(pArena->hLock);
+ return PVRSRV_OK;
+
+unwind_alloc:
+ if (i != 0)
+ {
+ PVRSRV_ERROR eFreeError;
+ eFreeError = _RA_FreeMultiUnlockedSparse(pArena,
+ aBaseArray,
+ uiBaseArraySize,
+ 1ULL << uiLog2ChunkSize,
+ puiAllocIndices,
+ &i);
+ PVR_LOG_IF_ERROR(eFreeError, "_RA_FreeMultiUnlockedSparse");
+ }
+
+ OSLockRelease(pArena->hLock);
+ return eError;
+}
+
/*************************************************************************/ /*!
@Function RA_Find_BT_VARange
@Description To find the boundary tag associated with the given device
RA_FLAGS_T uImportFlags)
{
IMG_PSPLAY_TREE psSplaynode;
- BT *pBT = pArena->pHeadSegment;
IMG_UINT32 uIndex;
- uIndex = pvr_log2 (uRequestSize);
-
/* Find the splay node associated with these import flags */
psSplaynode = PVRSRVFindNode(uImportFlags, pArena->per_flags_buckets);
return NULL;
}
+ uIndex = pvr_log2(uRequestSize);
+
/* Find the free Boundary Tag from the bucket that holds the requested range */
while (uIndex < FREE_TABLE_LIMIT)
{
- pBT = psSplaynode->buckets[uIndex];
+ BT *pBT = psSplaynode->buckets[uIndex];
while (pBT)
{
PVR_ASSERT(is_arena_valid(pArena));
/* Align the requested size to the Arena Quantum */
- uSize = ((uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1));
+ uSize = PVR_ALIGN(uSize, pArena->uQuantum);
/* Must be a power of 2 or 0 */
PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0);
if (uAlignment > 1)
{
- if (base != ((base + uAlignment - 1) & ~(uAlignment - 1)))
+ if (base != PVR_ALIGN(base, uAlignment))
{
PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, unlock_);
}
OSLockRelease(pArena->hLock);
}
+static PVRSRV_ERROR
+_RA_FreeMultiUnlocked(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize)
+{
+ PVRSRV_ERROR eError;
+
+ /* Free the whole array */
+ if (uiBaseArraySize == 1)
+ {
+ eError = _FreeSingleBaseArray(pArena, aBaseArray, uiBaseArraySize);
+ PVR_LOG_IF_ERROR(eError, "_FreeSingleBaseArray");
+ }
+ else
+ {
+ eError = _FreeMultiBaseArray(pArena, aBaseArray, uiBaseArraySize);
+ PVR_LOG_IF_ERROR(eError, "_FreeMultiBaseArray");
+ }
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+RA_FreeMulti(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize)
+{
+ PVRSRV_ERROR eError;
+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+ eError = _RA_FreeMultiUnlocked(pArena,
+ aBaseArray,
+ uiBaseArraySize);
+ OSLockRelease(pArena->hLock);
+
+ return eError;
+}
+
+static PVRSRV_ERROR
+_RA_FreeMultiUnlockedSparse(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ RA_LENGTH_T uiChunkSize,
+ IMG_UINT32 *puiFreeIndices,
+ IMG_UINT32 *puiFreeCount)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiFreeCount = *puiFreeCount;
+ *puiFreeCount = 0;
+
+ /* Handle case where we only have 1 base to free. */
+ if (uiFreeCount == 1)
+ {
+ eError = _FreeBaseArraySlice(pArena,
+ aBaseArray,
+ uiBaseArraySize,
+ uiChunkSize,
+ puiFreeIndices[0],
+ 1);
+ PVR_LOG_IF_ERROR(eError, "_FreeBaseArraySlice");
+ if (eError == PVRSRV_OK)
+ {
+ *puiFreeCount = uiFreeCount;
+ }
+ return eError;
+ }
+
+ for (i = 0; i < uiFreeCount;)
+ {
+ IMG_UINT32 j;
+ IMG_UINT32 uiConsolidate = 1;
+
+ PVR_ASSERT(RA_BASE_IS_REAL(aBaseArray[i]));
+
+ for (j = i;
+ puiFreeIndices[j + 1] == puiFreeIndices[j] + 1 &&
+ RA_BASE_IS_GHOST(aBaseArray[puiFreeIndices[j + 1]]);
+ j++)
+ {
+ uiConsolidate++;
+ }
+
+ eError = _FreeBaseArraySlice(pArena,
+ aBaseArray,
+ uiBaseArraySize,
+ uiChunkSize,
+ puiFreeIndices[i],
+ uiConsolidate);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_FreeBaseArraySlice");
+
+ i += uiConsolidate;
+ *puiFreeCount += uiConsolidate;
+ }
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+RA_FreeMultiSparse(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ IMG_UINT32 uiLog2ChunkSize,
+ IMG_UINT32 *puiFreeIndices,
+ IMG_UINT32 *puiFreeCount)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_LOG_RETURN_IF_FALSE(puiFreeCount != NULL,
+ "puiFreeCount Required",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ /* Ensure Base Array is large enough for intended free */
+ PVR_LOG_RETURN_IF_FALSE(uiBaseArraySize >= *puiFreeCount,
+ "Attempt to free more bases than array holds",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ PVR_LOG_RETURN_IF_FALSE(puiFreeIndices != NULL,
+ "puiFreeIndices Required",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ PVR_LOG_RETURN_IF_FALSE(uiLog2ChunkSize >= RA_BASE_FLAGS_LOG2 &&
+ uiLog2ChunkSize <= RA_BASE_CHUNK_LOG2_MAX,
+ "Log2 chunk size must be 12-64",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+ eError = _RA_FreeMultiUnlockedSparse(pArena,
+ aBaseArray,
+ uiBaseArraySize,
+ 1ULL << uiLog2ChunkSize,
+ puiFreeIndices,
+ puiFreeCount);
+ OSLockRelease(pArena->hLock);
+
+ return eError;
+}
+
+static PVRSRV_ERROR
+_TrimBlockMakeReal(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ IMG_UINT32 uiLog2ChunkSize,
+ IMG_UINT32 uiStartIndex,
+ IMG_UINT32 uiEndIndex)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RA_BASE_T sRealBase;
+ IMG_UINT32 uiRealBaseIndex;
+
+ /* Note: Error return paths in this function do not require unwinding.
+ * Free logic is performed based upon indices and detection of Real base regions,
+ * performance wise it would be more costly to unwind the conversion here than to
+ * just free a smaller Real base region.
+ */
+
+ /* Check Start index is real, if not make it real */
+ if (RA_BASE_IS_GHOST(aBaseArray[uiStartIndex]))
+ {
+ _FindRealBaseFromGhost(aBaseArray,
+ uiStartIndex,
+ &sRealBase,
+ &uiRealBaseIndex);
+
+ eError = _ConvertGhostBaseToReal(pArena,
+ aBaseArray,
+ sRealBase,
+ uiRealBaseIndex,
+ uiStartIndex,
+ 1ULL << uiLog2ChunkSize);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal");
+ }
+
+ /* Check end +1 is real or end of array , if ghost make real */
+ if (uiEndIndex + 1 != uiBaseArraySize &&
+ RA_BASE_IS_GHOST(aBaseArray[uiEndIndex + 1]))
+ {
+ _FindRealBaseFromGhost(aBaseArray,
+ uiEndIndex + 1,
+ &sRealBase,
+ &uiRealBaseIndex);
+
+ eError = _ConvertGhostBaseToReal(pArena,
+ aBaseArray,
+ sRealBase,
+ uiRealBaseIndex,
+ uiEndIndex + 1,
+ 1ULL << uiLog2ChunkSize);
+ PVR_LOG_RETURN_IF_ERROR(eError, "_ConvertGhostBaseToReal");
+ }
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+RA_SwapSparseMem(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ IMG_UINT32 uiLog2ChunkSize,
+ IMG_UINT32 *puiXIndices,
+ IMG_UINT32 *puiYIndices,
+ IMG_UINT32 uiSwapCount)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiSwapped = 0;
+ IMG_UINT32 uiStartIndex;
+ /* Consolidation values counting the bases after the start index*/
+ IMG_UINT32 uiXConsol;
+ IMG_UINT32 uiYConsol;
+ /* Consolidation limit, the smallest consecutive indices between the
+ * two inputs
+ */
+ IMG_UINT32 uiConsolidateLimit;
+ IMG_UINT32 uiTotalSwapCount;
+ IMG_UINT32 i;
+
+ /*
+ * The algorithm below aims to swap the desired indices whilst also
+ * maintaining a maximum contiguity of allocation blocks where possible.
+ * It does this by:
+ * Consolidating the contiguous indices of X and Y.
+ * Selecting the smallest of these consolidations as a range to swap in a block.
+ * Trim both block ranges using the indices range to ensure that Real bases are
+ * created to represent regions that have been split due to the indices.
+ * Perform the swap and update the swapped count ready for the next iteration.
+ * Note: Maintaining contiguity improves performance of free logic for sparse
+ * allocations because we can free in regions rather than chunks.
+ */
+ while (uiSwapped != uiSwapCount)
+ {
+ IMG_UINT32 x, y;
+ uiTotalSwapCount = 1;
+ uiStartIndex = uiSwapped;
+ uiXConsol = 0;
+ uiYConsol = 0;
+
+ /* Calculate contiguous indices at X */
+ for (x = uiStartIndex;
+ x < uiSwapCount &&
+ puiXIndices[x] + 1 == puiXIndices[x + 1];
+ x++)
+ {
+ uiXConsol++;
+ }
+
+ /* Calculate contiguous indices at Y */
+ for (y = uiStartIndex;
+ y < uiSwapCount &&
+ puiYIndices[y] + 1 == puiYIndices[y + 1];
+ y++)
+ {
+ uiYConsol++;
+ }
+
+ /* Find lowest consolidation value */
+ uiConsolidateLimit = (uiXConsol < uiYConsol) ? uiXConsol : uiYConsol;
+
+ /* Perform RealBase translation where required */
+ eError = _TrimBlockMakeReal(pArena,
+ aBaseArray,
+ uiBaseArraySize,
+ uiLog2ChunkSize,
+ puiXIndices[uiStartIndex],
+ puiXIndices[uiStartIndex + uiConsolidateLimit]);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_TrimBlockMakeReal", unwind);
+
+ eError = _TrimBlockMakeReal(pArena,
+ aBaseArray,
+ uiBaseArraySize,
+ uiLog2ChunkSize,
+ puiYIndices[uiStartIndex],
+ puiYIndices[uiStartIndex + uiConsolidateLimit]);
+ PVR_LOG_GOTO_IF_ERROR(eError, "_TrimBlockMakeReal", unwind);
+
+ uiTotalSwapCount += uiConsolidateLimit;
+ uiSwapped += uiTotalSwapCount;
+ i = uiStartIndex;
+
+ do
+ {
+ SWAP(aBaseArray[puiXIndices[i]], aBaseArray[puiYIndices[i]]);
+ uiTotalSwapCount--;
+ i++;
+ }
+ while (uiTotalSwapCount != 0);
+ }
+
+ return PVRSRV_OK;
+
+unwind:
+ /* If we hit an error when Trimming we should revert the swapping
+ * that has already been performed.
+ */
+ for (i = 0; i < uiSwapped; i++)
+ {
+ SWAP(aBaseArray[puiXIndices[i]], aBaseArray[puiYIndices[i]]);
+ }
+
+ return eError;
+}
+
IMG_INTERNAL void
RA_Get_Usage_Stats(RA_ARENA *pArena, PRA_USAGE_STATS psRAStats)
{
psRAStats->ui64FreeArenaSize = pArena->ui64FreeArenaSize;
}
+IMG_INTERNAL IMG_CHAR *
+RA_GetArenaName(RA_ARENA *pArena)
+{
+ return pArena->name;
+}
+
/* #define _DBG(...) PVR_LOG((__VA_ARGS__)) */
#define _DBG(...)
/* combine contiguous segments */
while ((pNext = pNext->pNextSegment) != NULL &&
- pNext->type == btt_live &&
- pNext->base == pData->uiAddr + pData->uiSize)
+ pNext->type == pNext->pPrevSegment->type &&
+ pNext->type == btt_live &&
+ pNext->base == pData->uiAddr + pData->uiSize)
{
_DBG("(%s()) combining segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", "
"type=%u", __func__, (void *) pNext->base, pNext->uSize,
IMG_UINT32 uiRecognisedQuantum = 0;
- IMG_UINT32 uiLastBase = 0;
- IMG_UINT32 uiLastSize = 0;
+ IMG_UINT64 uiLastBase = 0;
+ IMG_UINT64 uiLastSize = 0;
IMG_UINT32 i;
+ IMG_UINT32 uiRemainder;
PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT64 uiLargestFreeSegmentSize = 0;
+ IMG_UINT32 uiFragPercentage = 0;
+
/* -- papRegionArray Structure --
* papRegionArray Indexes
* | Chunk 0 Chunk 1 Chunk 2 Chunk 3
return PVRSRV_ERROR_INVALID_PARAMS;
}
- pIter = RA_IteratorAcquire(pArena, IMG_FALSE);
+ pIter = RA_IteratorAcquire(pArena, IMG_TRUE);
PVR_LOG_RETURN_IF_NOMEM(pIter, "RA_IteratorAcquire");
uiRecognisedQuantum = pArena->uQuantum > 0 ? pArena->uQuantum : 4096;
while (RA_IteratorNext(pIter, &sIterData))
{
- if (sIterData.uiAddr >= uiLastBase)
+ if (!sIterData.bFree && sIterData.uiAddr >= uiLastBase)
{
uiLastBase = sIterData.uiAddr;
uiLastSize = sIterData.uiSize;
}
}
- uiRegionCount = ((uiLastBase + uiLastSize) / uiRecognisedQuantum) / uiRegionSize;
- if (((uiLastBase + uiLastSize) / uiRecognisedQuantum) % uiRegionSize != 0
- || uiRegionCount == 0)
+ uiRegionCount = OSDivide64(uiLastBase + uiLastSize, uiRecognisedQuantum,
+ &uiRemainder);
+ uiRegionCount = OSDivide64(uiRegionCount, uiRegionSize, &uiRemainder);
+ if (uiRemainder != 0 || uiRegionCount == 0)
{
uiRegionCount += 1;
}
while (RA_IteratorNext(pIter, &sIterData))
{
+ IMG_UINT64 uiDataDivRecQuant;
+
IMG_UINT32 uiAddrRegionIdx = 0;
IMG_UINT32 uiAddrRegionOffset = 0;
IMG_UINT32 uiAddrChunkIdx = 0;
IMG_UINT32 uiRegionIdx = 0;
IMG_UINT32 uiChunkIdx = 0;
-#if defined(__KERNEL__) && defined(__linux__)
- IMG_UINT64 uiDataDivRecQuant = sIterData.uiSize;
- uiQuantisedSizeMod = do_div(uiDataDivRecQuant, uiRecognisedQuantum);
- uiQuantisedSize = (IMG_UINT32)uiDataDivRecQuant;
-
- uiDataDivRecQuant = sIterData.uiAddr;
- do_div(uiDataDivRecQuant, uiRecognisedQuantum);
- uiAddrRegionOffset = do_div(uiDataDivRecQuant, uiRegionSize);
- uiAddrRegionIdx = (IMG_UINT32)uiDataDivRecQuant;
+ /* If the current data is for a free block, use it to track largest
+ * contiguous free segment size.
+ */
+ if (sIterData.bFree && sIterData.uiSize > uiLargestFreeSegmentSize)
+ {
+ uiLargestFreeSegmentSize = sIterData.uiSize;
+ continue;
+ }
- uiDataDivRecQuant = sIterData.uiAddr;
- do_div(uiDataDivRecQuant, uiRecognisedQuantum);
-#else
- IMG_UINT64 uiDataDivRecQuant = sIterData.uiAddr / uiRecognisedQuantum;
- uiAddrRegionIdx = uiDataDivRecQuant / uiRegionSize;
- uiAddrRegionOffset = uiDataDivRecQuant % uiRegionSize;
+ uiDataDivRecQuant = OSDivide64(sIterData.uiAddr, uiRecognisedQuantum,
+ &uiRemainder);
+ uiAddrRegionIdx = OSDivide64(uiDataDivRecQuant, uiRegionSize,
+ &uiAddrRegionOffset);
+ uiQuantisedSize = OSDivide64(sIterData.uiSize, uiRecognisedQuantum,
+ &uiQuantisedSizeMod);
- uiQuantisedSize = sIterData.uiSize / uiRecognisedQuantum;
- uiQuantisedSizeMod = sIterData.uiSize % uiRecognisedQuantum;
-#endif
uiAddrChunkIdx = uiAddrRegionOffset / uiChunkSize;
uiAddrChunkOffset = uiAddrRegionOffset % uiChunkSize;
uiAddrChunkShift = uiChunkSize - uiAddrChunkOffset;
uiQuantisedSize += 1;
}
-#if defined(__KERNEL__) && defined(__linux__)
- uiDataDivRecQuant += uiQuantisedSize - 1;
- do_div(uiDataDivRecQuant, uiRegionSize);
- uiAllocLastRegionIdx = (IMG_UINT32)uiDataDivRecQuant;
-#else
- uiAllocLastRegionIdx =
- (uiDataDivRecQuant + uiQuantisedSize - 1) / uiRegionSize;
-#endif
+ uiAllocLastRegionIdx = OSDivide64(uiDataDivRecQuant + uiQuantisedSize - 1,
+ uiRegionSize, &uiRemainder);
uiAllocChunkSize = (uiAddrChunkOffset + uiQuantisedSize) / uiChunkSize;
if ((uiAddrChunkOffset + uiQuantisedSize) % uiChunkSize > 0)
}
}
}
-
- RA_IteratorRelease(pIter);
+ if (pArena->ui64FreeArenaSize && uiLargestFreeSegmentSize)
+ {
+ /* N.B This can look strange in a dual RA when comparing to the dump visualisation
+ * as spans that are freed are not included in the segment list, regardless it is
+ * an accurate representation for the spans in the Arena.
+ */
+ uiFragPercentage = OSDivide64(100 * pArena->ui64FreeArenaSize,
+ pArena->ui64FreeArenaSize + uiLargestFreeSegmentSize,
+ &uiRemainder);
+ }
pfnLogDump(pPrivData, "~~~ '%s' Resource Arena Block Dump", pArena->name);
pfnLogDump(pPrivData, " Block Size: %uB", uiRecognisedQuantum);
pfnLogDump(pPrivData,
" Span Memory Usage: %"IMG_UINT64_FMTSPEC"B"
- " Free Span Memory: %"IMG_UINT64_FMTSPEC"B",
+ " Free Span Memory: %"IMG_UINT64_FMTSPEC"B"
+ " Largest Free Region Size: %"IMG_UINT64_FMTSPEC"B"
+ " Percent Fragmented %u%%",
pArena->ui64TotalArenaSize,
- pArena->ui64FreeArenaSize);
+ pArena->ui64FreeArenaSize,
+ uiLargestFreeSegmentSize,
+ uiFragPercentage);
pfnLogDump(pPrivData,
"===============================================================================");
}
}
}
+
+ RA_IteratorRelease(pIter);
+
OSFreeMem(papRegionArray);
return eError;
SyncPrimBlockImport(RA_PERARENA_HANDLE hArena,
RA_LENGTH_T uSize,
RA_FLAGS_T uFlags,
+ RA_LENGTH_T uBaseAlignment,
const IMG_CHAR *pszAnnotation,
RA_BASE_T *puiBase,
RA_LENGTH_T *puiActualSize,
RA_LENGTH_T uiSpanSize;
PVRSRV_ERROR eError;
PVR_UNREFERENCED_PARAMETER(uFlags);
+ PVR_UNREFERENCED_PARAMETER(uBaseAlignment);
/* Check we've not been called with an unexpected size */
PVR_LOG_GOTO_IF_INVALID_PARAM(hArena, eError, e0);
static INLINE IMG_UINT32 SyncPrimGetOffset(SYNC_PRIM *psSyncInt)
{
- IMG_UINT64 ui64Temp;
+ IMG_UINT64 ui64Temp = psSyncInt->uiSpanAddr - psSyncInt->psSyncBlock->uiSpanBase;
- PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL);
-
- ui64Temp = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
+
return TRUNCATE_64BITS_TO_32BITS(ui64Temp);
}
static void SyncPrimGetCPULinAddr(SYNC_PRIM *psSyncInt)
{
- SYNC_PRIM_BLOCK *psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ SYNC_PRIM_BLOCK *psSyncBlock = psSyncInt->psSyncBlock;
psSyncInt->sCommon.pui32LinAddr = psSyncBlock->pui32LinAddr +
(SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32));
SYNC_PRIM_BLOCK *psSyncBlock;
SYNC_PRIM_CONTEXT *psContext;
- psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psSyncBlock = psSyncInt->psSyncBlock;
psContext = psSyncBlock->psContext;
#if !defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST)
PVR_UNREFERENCED_PARAMETER(bFreeFirstSyncPrim);
#else
/* Defer freeing the first allocated sync prim in the sync context */
- if (psSyncInt != psContext->hFirstSyncPrim || (psSyncInt == psContext->hFirstSyncPrim && bFreeFirstSyncPrim))
+ if (psSyncInt != psContext->hFirstSyncPrim || bFreeFirstSyncPrim)
#endif
{
PVRSRV_ERROR eError;
SHARED_DEV_CONNECTION hDevConnection =
- psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection;
+ psSyncInt->psSyncBlock->psContext->hDevConnection;
if (GetInfoPageDebugFlags(hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED)
{
- if (psSyncInt->u.sLocal.hRecord)
+ if (psSyncInt->hRecord)
{
/* remove this sync record */
eError = DestroyServerResource(hDevConnection,
NULL,
BridgeSyncRecordRemoveByHandle,
- psSyncInt->u.sLocal.hRecord);
+ psSyncInt->hRecord);
PVR_LOG_IF_ERROR(eError, "BridgeSyncRecordRemoveByHandle");
}
}
(void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_RESET_VALUE);
#endif
- RA_Free(psContext->psSubAllocRA, psSyncInt->u.sLocal.uiSpanAddr);
+ RA_Free(psContext->psSubAllocRA, psSyncInt->uiSpanAddr);
OSFreeMem(psSyncInt);
_SyncPrimContextUnref(psContext);
}
static void SyncPrimLocalUnref(SYNC_PRIM *psSyncInt)
{
- if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount))
+ if (!OSAtomicRead(&psSyncInt->hRefCount))
{
PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalUnref sync already freed"));
}
- else if (0 == OSAtomicDecrement(&psSyncInt->u.sLocal.hRefCount))
+ else if (0 == OSAtomicDecrement(&psSyncInt->hRefCount))
{
SyncPrimLocalFree(psSyncInt, IMG_FALSE);
}
{
SYNC_PRIM_BLOCK *psSyncBlock;
- psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psSyncBlock = psSyncInt->psSyncBlock;
return psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psSyncInt);
}
(RA_PERISPAN_HANDLE *) &psSyncBlock);
PVR_GOTO_IF_ERROR(eError, fail_raalloc);
- psNewSync->eType = SYNC_PRIM_TYPE_LOCAL;
- OSAtomicWrite(&psNewSync->u.sLocal.hRefCount, 1);
- psNewSync->u.sLocal.uiSpanAddr = uiSpanAddr;
- psNewSync->u.sLocal.psSyncBlock = psSyncBlock;
+ OSAtomicWrite(&psNewSync->hRefCount, 1);
+ psNewSync->uiSpanAddr = uiSpanAddr;
+ psNewSync->psSyncBlock = psSyncBlock;
SyncPrimGetCPULinAddr(psNewSync);
*ppsSync = &psNewSync->sCommon;
_SyncPrimContextRef(psContext);
#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST)
/* If this is the first sync prim allocated in the context, keep a handle to it */
- if (psSyncBlock->uiSpanBase == 0 && psNewSync->u.sLocal.uiSpanAddr == 0)
+ if (psSyncBlock->uiSpanBase == 0 && psNewSync->uiSpanAddr == 0)
{
psContext->hFirstSyncPrim = psNewSync;
}
/* record this sync */
eError = BridgeSyncRecordAdd(
GetBridgeHandle(psSyncBlock->psContext->hDevConnection),
- &psNewSync->u.sLocal.hRecord,
+ &psNewSync->hRecord,
psSyncBlock->hServerSyncPrimBlock,
psSyncBlock->ui32FirmwareAddr,
SyncPrimGetOffset(psNewSync),
__func__,
szClassName,
PVRSRVGETERRORSTRING(eError)));
- psNewSync->u.sLocal.hRecord = NULL;
+ psNewSync->hRecord = NULL;
}
}
else
{
PVRSRV_ERROR eError;
- if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
- {
- SYNC_PRIM_BLOCK *psSyncBlock;
- SYNC_PRIM_CONTEXT *psContext;
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
- psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
- psContext = psSyncBlock->psContext;
+ psSyncBlock = psSyncInt->psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ eError = BridgeSyncPrimSet(GetBridgeHandle(psContext->hDevConnection),
+ psSyncBlock->hServerSyncPrimBlock,
+ SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32),
+ ui32Value);
- eError = BridgeSyncPrimSet(GetBridgeHandle(psContext->hDevConnection),
- psSyncBlock->hServerSyncPrimBlock,
- SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32),
- ui32Value);
- }
- else
- {
- /* Server sync not supported, attempted use of server sync */
- return PVRSRV_ERROR_NOT_SUPPORTED;
- }
return eError;
}
PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out);
psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
- if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
- {
- SyncPrimLocalUnref(psSyncInt);
- }
- else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
- {
- /* Server sync not supported, attempted use of server sync */
- return PVRSRV_ERROR_NOT_SUPPORTED;
- }
- else
- {
- /*
- Either the client has given us a bad pointer or there is an
- error in this module
- */
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out);
- }
+
+ SyncPrimLocalUnref(psSyncInt);
+
err_out:
return eError;
psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
- /* There is no check for the psSyncInt to be LOCAL as this call
- substitutes the Firmware updating a sync and that sync could
- be a server one */
-
eError = _SyncPrimSetValue(psSyncInt, ui32Value);
err_out:
PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out);
psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
- if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
- {
- /* Invalid sync type */
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out);
- }
eError = _SyncPrimSetValue(psSyncInt, ui32Value);
psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
- if (likely(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL))
- {
- *phBlock = psSyncInt->u.sLocal.psSyncBlock->hServerSyncPrimBlock;
- *pui32Offset = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
- }
- else
- {
- PVR_DPF((PVR_DBG_ERROR, "%s: psSync not a Local sync prim (%d)",
- __func__, psSyncInt->eType));
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, err_out);
- }
+ *phBlock = psSyncInt->psSyncBlock->hServerSyncPrimBlock;
+ *pui32Offset = psSyncInt->uiSpanAddr - psSyncInt->psSyncBlock->uiSpanBase;
err_out:
return eError;
PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out);
psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
- if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
- {
- *pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt);
- }
- else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
- {
- /* Server sync not supported, attempted use of server sync */
- return PVRSRV_ERROR_NOT_SUPPORTED;
- }
- else
- {
- /* Either the client has given us a bad pointer or there is an
- * error in this module
- */
- PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out);
- }
+ *pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt);
err_out:
return eError;
PVR_ASSERT(psSync != NULL);
psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
- if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
- {
- /* Invalid sync type */
- PVR_ASSERT(IMG_FALSE);
- return;
- }
-
- psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psSyncBlock = psSyncInt->psSyncBlock;
psContext = psSyncBlock->psContext;
eError = BridgeSyncPrimPDump(GetBridgeHandle(psContext->hDevConnection),
PVR_ASSERT(psSync != NULL);
psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
- if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
- {
- /* Invalid sync type */
- PVR_ASSERT(IMG_FALSE);
- return;
- }
-
- psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psSyncBlock = psSyncInt->psSyncBlock;
psContext = psSyncBlock->psContext;
eError = BridgeSyncPrimPDumpValue(GetBridgeHandle(psContext->hDevConnection),
PVR_ASSERT(psSync != NULL);
psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
- if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
- {
- /* Invalid sync type */
- PVR_ASSERT(IMG_FALSE);
- return;
- }
-
- psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psSyncBlock = psSyncInt->psSyncBlock;
psContext = psSyncBlock->psContext;
eError = BridgeSyncPrimPDumpPol(GetBridgeHandle(psContext->hDevConnection),
PVR_ASSERT(psSync != NULL);
psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
- if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
- {
- /* Invalid sync type */
- PVR_ASSERT(IMG_FALSE);
- return;
- }
-
- psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psSyncBlock = psSyncInt->psSyncBlock;
psContext = psSyncBlock->psContext;
#if defined(__linux__) && defined(__i386__)
if (eError != PVRSRV_OK)
{
/* Mask reporting of the errors seen under normal operation */
- if ((eError != PVRSRV_ERROR_RESOURCE_UNAVAILABLE) &&
- (eError != PVRSRV_ERROR_TIMEOUT) &&
+ if ((eError != PVRSRV_ERROR_TIMEOUT) &&
(eError != PVRSRV_ERROR_STREAM_READLIMIT_REACHED))
{
PVR_LOG_ERROR(eError, "BridgeTLAcquireData");
#include "uniq_key_splay_tree.h"
/**
- * This function performs a simple top down splay
+ * PVRSRVSplay - perform a simple top down splay
+ * @ui32Flags: flags that must splayed to the root (if possible)
+ * @psTree: psTree The tree to splay.
*
- * @param uiFlags the flags that must splayed to the root (if possible).
- * @param psTree The tree to splay.
- * @return the resulting tree after the splay operation.
+ * Return the resulting tree after the splay operation.
*/
IMG_INTERNAL
IMG_PSPLAY_TREE PVRSRVSplay (IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree)
/**
- * This function inserts a node into the Tree (unless it is already present, in
+ * PVRSRVInsert - insert a node into the Tree (unless it is already present, in
* which case it is equivalent to performing only a splay operation
+ * @ui32Flags: the key of the new node
+ * @psTree: tree into which one wants to add a new node
*
- * @param uiFlags the key of the new node
- * @param psTree The tree into which one wants to add a new node
- * @return The resulting with the node in it
+ * Return the resulting tree after the splay operation.
*/
IMG_INTERNAL
IMG_PSPLAY_TREE PVRSRVInsert(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree)
/**
- * Deletes a node from the tree (unless it is not there, in which case it is
- * equivalent to a splay operation)
+ * PVRSRVDelete - delete a node from the tree (unless it is not there, in which
+ * case it is equivalent to a splay operation)
+ * @ui32Flags: value of the node to remove
+ * @psTree: tree into which the node must be removed
*
- * @param uiFlags the value of the node to remove
- * @param psTree the tree into which the node must be removed
- * @return the resulting tree
+ * Return the resulting tree.
*/
IMG_INTERNAL
IMG_PSPLAY_TREE PVRSRVDelete(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree)
}
/**
- * This function picks up the appropriate node for the given flags
+ * PVRSRVFindNode - pick up the appropriate node for the given flags
+ * @ui32Flags: flags that must associated with the node
+ * @psTree: current splay tree node
*
- * @param uiFlags the flags that must associated with the node.
- * @param psTree current splay tree node.
- * @return the resulting tree node after the search operation.
+ * Return the resulting tree node after the search operation.
*/
IMG_INTERNAL
IMG_PSPLAY_TREE PVRSRVFindNode(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree)
* x........... FBCDC_V3_1_USED
* *............ PVRSRV_SYSTEM_DMA
* x............ PVRSRV_SYSTEM_DMA_USED
+ * *............. TFBC_LOSSY_GROUP
+ * x............. TFBC_LOSSY_GROUP_1
* |...|...|...|...|...|...|...|...
*****************************************************************************/
#define PVRSRV_SYSTEM_DMA_SHIFT (12)
#define PVRSRV_SYSTEM_DMA_USED (1U << PVRSRV_SYSTEM_DMA_SHIFT)
+/* Flag to be passed over the bridge during connection stating whether GPU supports TFBC and is
+ configured to use lossy compression control group 1 (25% / 37.5% / 50%) */
+#define PVRSRV_TFBC_LOSSY_GROUP_SHIFT (13)
+#define PVRSRV_TFBC_LOSSY_GROUP_1 (1U << PVRSRV_TFBC_LOSSY_GROUP_SHIFT)
+
static INLINE IMG_HANDLE GetBridgeHandle(SHARED_DEV_CONNECTION hDevConnection)
{
#if defined(__KERNEL__)
typedef IMG_UINT32 DEVMEM_HEAPCFGID;
#define DEVMEM_HEAPCFG_FORCLIENTS 0
-#define DEVMEM_HEAPCFG_META 1
+#define DEVMEM_HEAPCFG_FORFW 1
/*
typedef IMG_HANDLE DEVMEM_BRIDGE_HANDLE;
-/*************************************************************************/ /*!
-@Function DevmemUnpin
-@Description This is the counterpart to DevmemPin(). It is meant to be
- called before repinning an allocation.
-
- For a detailed description see client API documentation.
-
-@Input phMemDesc The MemDesc that is going to be unpinned.
-
-@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is
- registered to be reclaimed. Error otherwise.
-*/ /**************************************************************************/
-IMG_INTERNAL PVRSRV_ERROR
-DevmemUnpin(DEVMEM_MEMDESC *psMemDesc);
-
-/*************************************************************************/ /*!
-@Function DevmemPin
-@Description This is the counterpart to DevmemUnpin(). It is meant to be
- called after unpinning an allocation.
-
- For a detailed description see client API documentation.
-
-@Input phMemDesc The MemDesc that is going to be pinned.
-
-@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content
- was successfully restored.
-
- PVRSRV_ERROR_PMR_NEW_MEMORY when the content
- could not be restored and new physical memory
- was allocated.
-
- A different error otherwise.
-*/ /**************************************************************************/
-IMG_INTERNAL PVRSRV_ERROR
-DevmemPin(DEVMEM_MEMDESC *psMemDesc);
IMG_INTERNAL PVRSRV_ERROR
DevmemGetHeapInt(DEVMEM_HEAP *psHeap,
func takes a copy if it needs it. */
const IMG_CHAR *pszName,
DEVMEM_HEAPCFGID uiHeapBlueprintID,
+ IMG_UINT32 uiHeapIndex,
DEVMEM_HEAP **ppsHeapPtr);
/*
* DevmemDestroyHeap()
PVRSRV_ERROR
DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection,
IMG_DEVMEM_SIZE_T uiSize,
- IMG_DEVMEM_SIZE_T uiChunkSize,
IMG_UINT32 ui32NumPhysChunks,
IMG_UINT32 ui32NumVirtChunks,
IMG_UINT32 *pui32MappingTable,
DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext,
IMG_DEV_VIRTADDR *psFaultAddress);
-IMG_INTERNAL PVRSRV_ERROR
-DevmemFlushDeviceSLCRange(DEVMEM_MEMDESC *psMemDesc,
- IMG_DEV_VIRTADDR sDevVAddr,
- IMG_DEVMEM_SIZE_T uiSize,
- IMG_BOOL bInvalidate);
-
IMG_INTERNAL PVRSRV_ERROR
DevmemInvalidateFBSCTable(DEVMEM_CONTEXT *psContext,
IMG_UINT64 ui64FBSCEntries);
@Input psContext Memory context the process that would like to
be notified about.
-@Input ui32PID The PID of the calling process.
@Input bRegister If true, register. If false, de-register.
@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
error code
*/ /**************************************************************************/
IMG_INTERNAL PVRSRV_ERROR
RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext,
- IMG_UINT32 ui32PID,
IMG_BOOL bRegister);
/*************************************************************************/ /*!
#define DEVMEM_HEAPNAME_MAXLENGTH 160
-/*
- * Reserved VA space of a heap must always be multiple of DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY,
- * this check is validated in the DDK. Note this is only reserving "Virtual Address" space and
- * physical allocations (and mappings thereon) should only be done as much as required (to avoid
- * wastage).
- * Granularity has been chosen to support the max possible practically used OS page size.
- */
-#define DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY 0x10000 /* 64KB is MAX anticipated OS page size */
-
/*
* VA heap size should be at least OS page size. This check is validated in the DDK.
*/
IMG_HANDLE hDevMemServerHeap;
/* This heap is fully allocated and premapped into the device address space.
- * Used in virtualisation for firmware heaps of Guest and optionally Host drivers. */
+ * Used in virtualisation for firmware heaps of Guest and optionally Host
+ * drivers. */
IMG_BOOL bPremapped;
};
#define DEVMEM_PROPERTIES_EXPORTABLE (1UL<<0) /*!< Is it exportable? */
#define DEVMEM_PROPERTIES_IMPORTED (1UL<<1) /*!< Is it imported from another process? */
#define DEVMEM_PROPERTIES_SUBALLOCATABLE (1UL<<2) /*!< Is it suballocatable? */
-#define DEVMEM_PROPERTIES_UNPINNED (1UL<<3) /*!< Is it currently pinned? */
#define DEVMEM_PROPERTIES_IMPORT_IS_ZEROED (1UL<<4) /*!< Is the memory fully zeroed? */
#define DEVMEM_PROPERTIES_IMPORT_IS_CLEAN (1UL<<5) /*!< Is the memory clean, i.e. not been used before? */
#define DEVMEM_PROPERTIES_SECURE (1UL<<6) /*!< Is it a special secure buffer? No CPU maps allowed! */
#define DEVMEM_PROPERTIES_NO_CPU_MAPPING (1UL<<8) /* No CPU Mapping is allowed, RW attributes
are further derived from allocation memory flags */
#define DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE (1UL<<9) /* No sparse resizing allowed, once a memory
- layout is chosen, no change allowed later,
- This includes pinning and unpinning */
+ layout is chosen, no change allowed later */
typedef struct DEVMEM_DEVICE_IMPORT_TAG
PVRSRV_MEMALLOCFLAGS_T uiFlags; /*!< Flags for this import */
IMG_HANDLE hPMR; /*!< Handle to the PMR */
DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the memdesc */
- DEVMEM_BRIDGE_HANDLE hBridge; /*!< Bridge connection for the server */
+ SHARED_DEV_CONNECTION hConnection; /*!< Services connection for the server */
void *pvUserData; /*!< User data */
};
@Input uiMapFlags
@Input hPMR Reference to the PMR of this import struct.
@Input uiProperties Properties of the import. Is it exportable,
- imported, suballocatable, unpinned?
+ imported, suballocatable?
******************************************************************************/
void DevmemImportStructInit(DEVMEM_IMPORT *psImport,
IMG_DEVMEM_SIZE_T uiSize,
{
DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport);
- if (uiProperties &
- (DEVMEM_PROPERTIES_UNPINNED | DEVMEM_PROPERTIES_SECURE))
- {
#if defined(SUPPORT_SECURITY_VALIDATION)
- if (uiProperties & DEVMEM_PROPERTIES_SECURE)
- {
- PVR_DPF((PVR_DBG_WARNING,
+ if (uiProperties & DEVMEM_PROPERTIES_SECURE)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
"%s: Allocation is a secure buffer. "
"It should not be possible to map to CPU, but for security "
"validation this will be allowed for testing purposes, "
"as long as the buffer is pinned.",
__func__));
- }
-
- if (uiProperties & DEVMEM_PROPERTIES_UNPINNED)
-#endif
- {
- PVR_DPF((PVR_DBG_ERROR,
- "%s: Allocation is currently unpinned or a secure buffer. "
- "Not possible to map to CPU!",
- __func__));
- return PVRSRV_ERROR_INVALID_MAP_REQUEST;
- }
}
+#endif
if (uiProperties & DEVMEM_PROPERTIES_NO_CPU_MAPPING)
{
IMG_UINT32 uiNumPages,
IMG_UINT32 uiLog2PageSize,
PVRSRV_MEMALLOCFLAGS_T uiFlags,
- IMG_HANDLE hBridge);
+ SHARED_DEV_CONNECTION hConnection);
void
DevmemXPhysDescFree(DEVMEMX_PHYSDESC *psPhysDesc);
#include "htbuffer_types.h"
#include "htbuffer_init.h"
-#if defined(__KERNEL__)
-#define HTBLOGK(SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple((IMG_HANDLE) NULL, SF, ## args); } while (0)
-
-/* Host Trace Buffer name */
-#define HTB_STREAM_NAME "PVRHTBuffer"
-
-#else
-#define HTBLOG(handle, SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple(handle, SF, ## args); } while (0)
-#endif
-
-/* macros to cast 64 or 32-bit pointers into 32-bit integer components for Host Trace */
-#define HTBLOG_PTR_BITS_HIGH(p) ((IMG_UINT32)((((IMG_UINT64)((uintptr_t)p))>>32)&0xffffffff))
-#define HTBLOG_PTR_BITS_LOW(p) ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff))
-
-/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */
-#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff))
-#define HTBLOG_U64_BITS_LOW(u) ((IMG_UINT32)(u&0xffffffff))
-
-/*************************************************************************/ /*!
- @Function HTBLog
- @Description Record a Host Trace Buffer log event
-
- @Input PID The PID of the process the event is associated
- with. This is provided as an argument rather
- than querying internally so that events associated
- with a particular process, but performed by
- another can be logged correctly.
-
- @Input TID The TID (Thread ID) of the thread the event is
- associated with.
-
- @Input TimeStampus The timestamp in us for this event
-
- @Input SF The log event ID
-
- @Input ... Log parameters
-
- @Return PVRSRV_OK Success.
-
-*/ /**************************************************************************/
-IMG_INTERNAL PVRSRV_ERROR
-HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStampns, IMG_UINT32 SF, ...);
-
-
/*************************************************************************/ /*!
- @Function HTBLogSimple
- @Description Record a Host Trace Buffer log event with implicit PID and Timestamp
-
- @Input SF The log event ID
-
- @Input ... Log parameters
-
- @Return PVRSRV_OK Success.
-
+ @Function HTBControl
+ @Description Update the configuration of the Host Trace Buffer
+ @Input hSrvHandle Server Handle
+ @Input ui32NumFlagGroups Number of group enable flags words
+ @Input aui32GroupEnable Flags words controlling groups to be logged
+ @Input ui32LogLevel Log level to record
+ @Input ui32EnablePID PID to enable logging for a specific process
+ @Input eLogPidMode Enable logging for all or specific processes,
+ @Input eOpMode Control what trace data is dropped if the TL
+ buffer is full
+ @Return eError Internal services call returned eError error
+ number
*/ /**************************************************************************/
IMG_INTERNAL PVRSRV_ERROR
-HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...);
-
-
-
-/* DEBUG log group enable */
-#if !defined(HTB_DEBUG_LOG_GROUP)
-#undef HTB_LOG_TYPE_DBG /* No trace statements in this log group should be checked in */
-#define HTB_LOG_TYPE_DBG __BUILDERROR__
-#endif
+HTBControl(
+ IMG_HANDLE hSrvHandle,
+ IMG_UINT32 ui32NumFlagGroups,
+ IMG_UINT32 * aui32GroupEnable,
+ IMG_UINT32 ui32LogLevel,
+ IMG_UINT32 ui32EnablePID,
+ HTB_LOGMODE_CTRL eLogPidMode,
+ HTB_OPMODE_CTRL eOpMode);
#if defined(__cplusplus)
#include "img_types.h"
#include "img_defs.h"
-/*************************************************************************/ /*!
- @Function HTBConfigure
- @Description Configure the Host Trace Buffer.
- Once these parameters are set they may not be changed
-
- @Input hSrvHandle Server Handle
-
- @Input pszBufferName Name to use for the TL buffer, this will be
- required to request trace data from the TL
-
- @Input ui32BufferSize Requested TL buffer size in bytes
-
- @Return eError Internal services call returned eError error
- number
-*/ /**************************************************************************/
-IMG_INTERNAL PVRSRV_ERROR
-HTBConfigure(
- IMG_HANDLE hSrvHandle,
- IMG_CHAR * pszBufferName,
- IMG_UINT32 ui32BufferSize
-);
-
/*************************************************************************/ /*!
@Function HTBControl
@Description Update the configuration of the Host Trace Buffer
/* X-Macro for Process stat keys */
#define PVRSRV_PROCESS_STAT_KEY \
- X(PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS, "Connections") \
- X(PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS, "ConnectionsMax") \
- X(PVRSRV_PROCESS_STAT_TYPE_RC_OOMS, "RenderContextOutOfMemoryEvents") \
- X(PVRSRV_PROCESS_STAT_TYPE_RC_PRS, "RenderContextPartialRenders") \
- X(PVRSRV_PROCESS_STAT_TYPE_RC_GROWS, "RenderContextGrows") \
- X(PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS, "RenderContextPushGrows") \
- X(PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES, "RenderContextTAStores") \
- X(PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES, "RenderContext3DStores") \
- X(PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES, "RenderContextCDMStores") \
- X(PVRSRV_PROCESS_STAT_TYPE_RC_TDM_STORES, "RenderContextTDMStores") \
- X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP, "ZSBufferRequestsByApp") \
- X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW, "ZSBufferRequestsByFirmware") \
- X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP, "FreeListGrowRequestsByApp") \
- X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW, "FreeListGrowRequestsByFirmware") \
- X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT, "FreeListInitialPages") \
- X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES, "FreeListMaxPages") \
X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \
X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \
X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \
X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \
X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") \
X(PVRSRV_PROCESS_STAT_TYPE_TOTAL, "MemoryUsageTotal") \
- X(PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX, "MemoryUsageTotalMax") \
- X(PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT, "MemoryOOMCountDeviceVirtual") \
- X(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, "MemoryOOMCountPhysicalHeap") \
- X(PVRSRV_PROCESS_STAT_TYPE_INVALID_VIRTMEM, "MemoryOOMCountDeviceVirtualAtAddress")
-
+ X(PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX, "MemoryUsageTotalMax")
+
+/* X-Macro for Device stat keys */
+#define PVRSRV_DEVICE_STAT_KEY \
+ X(PVRSRV_DEVICE_STAT_TYPE_CONNECTIONS, "Connections") \
+ X(PVRSRV_DEVICE_STAT_TYPE_MAX_CONNECTIONS, "ConnectionsMax") \
+ X(PVRSRV_DEVICE_STAT_TYPE_RC_OOMS, "RenderContextOutOfMemoryEvents") \
+ X(PVRSRV_DEVICE_STAT_TYPE_RC_PRS, "RenderContextPartialRenders") \
+ X(PVRSRV_DEVICE_STAT_TYPE_RC_GROWS, "RenderContextGrows") \
+ X(PVRSRV_DEVICE_STAT_TYPE_RC_PUSH_GROWS, "RenderContextPushGrows") \
+ X(PVRSRV_DEVICE_STAT_TYPE_RC_TA_STORES, "RenderContextTAStores") \
+ X(PVRSRV_DEVICE_STAT_TYPE_RC_3D_STORES, "RenderContext3DStores") \
+ X(PVRSRV_DEVICE_STAT_TYPE_RC_CDM_STORES, "RenderContextCDMStores") \
+ X(PVRSRV_DEVICE_STAT_TYPE_RC_TDM_STORES, "RenderContextTDMStores") \
+ X(PVRSRV_DEVICE_STAT_TYPE_ZSBUFFER_REQS_BY_APP, "ZSBufferRequestsByApp") \
+ X(PVRSRV_DEVICE_STAT_TYPE_ZSBUFFER_REQS_BY_FW, "ZSBufferRequestsByFirmware") \
+ X(PVRSRV_DEVICE_STAT_TYPE_FREELIST_GROW_REQS_BY_APP, "FreeListGrowRequestsByApp") \
+ X(PVRSRV_DEVICE_STAT_TYPE_FREELIST_GROW_REQS_BY_FW, "FreeListGrowRequestsByFirmware") \
+ X(PVRSRV_DEVICE_STAT_TYPE_FREELIST_PAGES_INIT, "FreeListInitialPages") \
+ X(PVRSRV_DEVICE_STAT_TYPE_FREELIST_MAX_PAGES, "FreeListMaxPages") \
+ X(PVRSRV_DEVICE_STAT_TYPE_OOM_VIRTMEM_COUNT, "MemoryOOMCountDeviceVirtual") \
+ X(PVRSRV_DEVICE_STAT_TYPE_OOM_PHYSMEM_COUNT, "MemoryOOMCountPhysicalHeap") \
+ X(PVRSRV_DEVICE_STAT_TYPE_INVALID_VIRTMEM, "MemoryOOMCountDeviceVirtualAtAddr")
/* X-Macro for Driver stat keys */
#define PVRSRV_DRIVER_STAT_KEY \
PVRSRV_PROCESS_STAT_TYPE_COUNT
}PVRSRV_PROCESS_STAT_TYPE;
+typedef enum {
+#define X(stat_type, stat_str) stat_type,
+ PVRSRV_DEVICE_STAT_KEY
+#undef X
+ PVRSRV_DEVICE_STAT_TYPE_COUNT
+}PVRSRV_DEVICE_STAT_TYPE;
+
typedef enum {
#define X(stat_type, stat_str) stat_type,
PVRSRV_DRIVER_STAT_KEY
PVRSRV_DRIVER_STAT_TYPE_COUNT
}PVRSRV_DRIVER_STAT_TYPE;
-extern const IMG_CHAR *const pszProcessStatType[];
-
-extern const IMG_CHAR *const pszDriverStatType[];
-
#endif // PROC_STATS_H
typedef IMG_UINT64 RA_BASE_T;
typedef IMG_UINT32 RA_LOG2QUANTUM_T;
typedef IMG_UINT64 RA_LENGTH_T;
+typedef IMG_UINT32 RA_POLICY_T;
+
+typedef struct _RA_BASE_MULTI_ RA_BASE_MULTI_T;
+
+typedef IMG_UINT32 RA_BASE_ARRAY_SIZE_T;
+
+
+/*
+ * RA_BASE_ARRAY can represent a number of bases of which are packed,
+ * that is, they can be one of two types, a Real Base or a Ghost base.
+ * A Real Base is a base that has been created by the RA and is used to
+ * represent an allocated region, it has an entry in the RA Hash table and
+ * as such has a BT associated with it.
+ * A Ghost base is a fabricated base address generated at chunk boundaries
+ * given by the caller. These are used to divide a RealBase into
+ * arbitrary regions that the caller requires e.g. 4k pages. Ghost bases don't
+ * exist from the RA memory tracking point of view but they do exist and are treated
+ * as base addresses from the PoV of the caller. This allows the RA to allocate in
+ * largest possible lengths meaning fewer alloc calls whilst allowing the chunk
+ * flexibility for callers. Ghost refers to the concept that they
+ * don't exist in this RA internals context but do in the callers (LMA) context i.e.
+ * they appear Real from another perspective but we the RA know they are a ghost of the
+ * Real Base.
+ * */
+#if defined(__GNUC__) && GCC_VERSION_AT_LEAST(9, 0)
+/* Use C99 dynamic arrays, older compilers do not support this. */
+typedef RA_BASE_T RA_BASE_ARRAY_T[];
+#else
+/* Variable length array work around, will contain at least 1 element.
+ * Causes errors on newer compilers, in which case use dynamic arrays (see above).
+ */
+#define RA_FLEX_ARRAY_ONE_OR_MORE_ELEMENTS 1U
+typedef RA_BASE_T RA_BASE_ARRAY_T[RA_FLEX_ARRAY_ONE_OR_MORE_ELEMENTS];
+#endif
+
+/* Since 0x0 is a valid BaseAddr, we rely on max 64-bit value to be an invalid
+ * page address.
+ */
+#define INVALID_BASE_ADDR (IMG_UINT64_MAX)
+/* Used to check for duplicated alloc indices in sparse alloc path
+ * prior to attempting allocations */
+#define RA_BASE_SPARSE_PREP_ALLOC_ADDR (IMG_UINT64_MAX - 1)
+#define RA_BASE_FLAGS_MASK 0xFFF /* 12 Bits 4k alignment. */
+#define RA_BASE_FLAGS_LOG2 12
+#define RA_BASE_CHUNK_LOG2_MAX 64
+#define RA_BASE_GHOST_BIT (1ULL << 0)
+#define RA_BASE_STRIP_GHOST_BIT(uiBase) ((uiBase) & ~(RA_BASE_GHOST_BIT))
+#define RA_BASE_SET_GHOST_BIT(uiBase) ((uiBase) |= RA_BASE_GHOST_BIT)
+#define RA_BASE_IS_GHOST(uiBase) (BITMASK_HAS((uiBase), RA_BASE_GHOST_BIT) && (uiBase) != INVALID_BASE_ADDR)
+#define RA_BASE_IS_REAL(uiBase) (!BITMASK_HAS((uiBase), RA_BASE_GHOST_BIT))
+#define RA_BASE_IS_SPARSE_PREP(uiBase) ((uiBase) == RA_BASE_SPARSE_PREP_ALLOC_ADDR)
+#define RA_BASE_IS_INVALID(uiBase) ((uiBase) == INVALID_BASE_ADDR)
+
+typedef struct _RA_MULTIBASE_ITERATOR_ RA_MULTIBASE_ITERATOR;
/* Lock classes: describes the level of nesting between different arenas. */
#define RA_LOCKCLASS_0 0
* */
/* --- Resource allocation policy definitions ---
-* | 31.........4|......3....|........2.............|1...................0|
-* | Reserved | No split | Area bucket selection| Alloc node selection|
+* | 31.........5|.......4....|......3....|........2.............|1...................0|
+* | Reserved | Non-Contig | No split | Area bucket selection| Alloc node selection|
*/
/*
#define RA_POLICY_NO_SPLIT (8U)
#define RA_POLICY_NO_SPLIT_MASK (8U)
+/* This flag is used in physmem_lma only. it is used to decide if we should
+ * activate the non-contiguous allocation feature of RA MultiAlloc.
+ * Requirements for activation are that the OS implements the
+ * OSMapPageArrayToKernelVA function in osfunc which allows for mapping
+ * physically sparse pages as a virtually contiguous range.
+ * */
+#define RA_POLICY_ALLOC_ALLOW_NONCONTIG (16U)
+#define RA_POLICY_ALLOC_ALLOW_NONCONTIG_MASK (16U)
+
/*
* Default Arena Policy
* */
@Input RA_PERARENA_HANDLE RA handle
@Input RA_LENGTH_T Request size
@Input RA_FLAGS_T RA flags
+@Input RA_LENGTH_T Base Alignment
@Input IMG_CHAR Annotation
@Input RA_BASE_T Allocation base
@Input RA_LENGTH_T Actual size
typedef PVRSRV_ERROR (*PFN_RA_ALLOC)(RA_PERARENA_HANDLE,
RA_LENGTH_T,
RA_FLAGS_T,
+ RA_LENGTH_T,
const IMG_CHAR*,
RA_BASE_T*,
RA_LENGTH_T*,
* @Input imp_alloc - a resource allocation callback or 0.
* @Input imp_free - a resource de-allocation callback or 0.
* @Input per_arena_handle - private handle passed to alloc and free or 0.
- * @Input ui32PlicyFlags - Policies that govern the arena.
+ * @Input ui32PolicyFlags - Policies that govern the arena.
* @Return pointer to arena, or NULL.
*/
RA_ARENA *
PFN_RA_ALLOC imp_alloc,
PFN_RA_FREE imp_free,
RA_PERARENA_HANDLE per_arena_handle,
- IMG_UINT32 ui32PolicyFlags);
+ RA_POLICY_T ui32PolicyFlags);
/**
* @Function RA_Create_With_Span
* @Input ui64CpuBase - CPU Physical Base Address of the RA.
* @Input ui64SpanDevBase - Device Physical Base Address of the RA.
* @Input ui64SpanSize - Size of the span to add to the created RA.
+ * @Input ui32PolicyFlags - Policies that govern the arena.
* @Return pointer to arena, or NULL.
*/
RA_ARENA *
RA_LOG2QUANTUM_T uLog2Quantum,
IMG_UINT64 ui64CpuBase,
IMG_UINT64 ui64SpanDevBase,
- IMG_UINT64 ui64SpanSize);
+ IMG_UINT64 ui64SpanSize,
+ RA_POLICY_T ui32PolicyFlags);
/**
* @Function RA_Delete
RA_LENGTH_T *pActualSize,
RA_PERISPAN_HANDLE *phPriv);
+/*************************************************************************/ /*!
+@Function RA_AllocMulti
+@Description To allocate resource from an arena.
+ This method of allocation can be used to guarantee that if there
+ is enough space in the RA and the contiguity given is the
+ greatest common divisor of the contiguities used on this RA
+ the allocation can be made.
+ Allocations with contiguity less than the current GCD
+ (Greatest Common Divisor) abiding to pow2 are also guaranteed to
+ succeed. See scenario 4.
+ Allocations are not guaranteed but still reduce fragmentation
+ using this method when multiple contiguities are used e.g.
+ 4k & 16k and the current allocation has a contiguity higher than
+ the greatest common divisor used.
+ Scenarios with Log 2 contiguity examples:
+ 1. All allocations have contiguity of 4k. Allocations can be
+ guaranteed given enough RA space since the GCD is always used.
+ 2. Allocations of 4k and 16k contiguity have been previously
+ made on this RA. A new allocation of 4k contiguity is guaranteed
+ to succeed given enough RA space since the contiguity is the GCD.
+ 3. Allocations of 4k and 16k contiguity have been previously made
+ on this RA. A new allocation of 16k contiguity is not guaranteed
+ to succeed since it is not the GCD of all contiguities used.
+ 4. Contiguity 16k and 64k already exist, a 4k contiguity
+ allocation would be guaranteed to succeed but would now be the
+ new GCD. So further allocations would be required to match this
+ GCD to guarantee success.
+ This method does not suffer the same fragmentation pitfalls
+ as RA_Alloc as it constructs the allocation size from many
+ smaller constituent allocations, these are represented and returned
+ in the given array. In addition, Ghost bases are generated in
+ array entries conforming to the chunk size, this allows for
+ representing chunks of any size that work as page addrs
+ in upper levels.
+ The aforementioned array must be at least of size
+ uRequestsize / uiChunkSize, this ensures there is at least one
+ array entry per chunk required.
+ This function must have a uiChunkSize value of
+ at least 4096, this is to ensure space for the base type encoding.
+@Input pArena The arena
+@Input uRequestSize The size of resource requested.
+@Input uiLog2ChunkSize The log2 contiguity multiple of the bases i.e all
+ Real bases must be a multiple in size of this
+ size, also used to generate Ghost bases.
+ Allocations will also be aligned to this value.
+@Input uImportMultiplier Import x-times more for future requests if
+ we have to import new resource.
+@Input uImportFlags Flags influencing allocation policy.
+ required, otherwise must be a power of 2.
+@Input pszAnnotation String to describe the allocation
+@InOut aBaseArray Array of bases to populate.
+@Input uiBaseArraySize Size of the array to populate.
+@Output bPhysContig Are the allocations made in the RA physically
+ contiguous.
+@Return PVRSRV_OK - success
+*/ /**************************************************************************/
+PVRSRV_ERROR
+RA_AllocMulti(RA_ARENA *pArena,
+ RA_LENGTH_T uRequestSize,
+ IMG_UINT32 uiLog2ChunkSize,
+ IMG_UINT8 uImportMultiplier,
+ RA_FLAGS_T uImportFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ IMG_BOOL *bPhysContig);
+
+/**
+ * @Function RA_AllocMultiSparse
+ *
+ * @Description To Alloc resource from an RA arena at the specified indices.
+ * This function follows the same conditions and functionality as
+ * RA_AllocMulti although with the added aspect of specifying the
+ * indices to allocate in the Base Array. This means we can still
+ * attempt to maintain contiguity where possible with the aim of
+ * reducing fragmentation and increasing occurrence of optimal free
+ * scenarios.
+ * @Input pArena The Arena
+ * @Input uiLog2ChunkSize The log2 contiguity multiple of the bases i.e all
+ * Real bases must be a multiple in size of this
+ * size, also used to generate Ghost bases.
+ * Allocations will also be aligned to this value.
+ * @Input uImportMultiplier Import x-times more for future requests if
+ * we have to import new resource.
+ * @Input uImportFlags Flags influencing allocation policy.
+ * required, otherwise must be a power of 2.
+ * @Input pszAnnotation String to describe the allocation
+ * @InOut aBaseArray Array of bases to populate.
+ * @Input uiBaseArraySize Size of the array to populate.
+ * @Input puiAllocIndices The indices into the array to alloc, if indices are NULL
+ * then we will allocate uiAllocCount chunks sequentially.
+ * @InOut uiAllocCount The number of bases to alloc from the array.
+ *
+ * @Return PVRSRV_OK - success
+ */
+PVRSRV_ERROR
+RA_AllocMultiSparse(RA_ARENA *pArena,
+ IMG_UINT32 uiLog2ChunkSize,
+ IMG_UINT8 uImportMultiplier,
+ RA_FLAGS_T uImportFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ IMG_UINT32 *puiAllocIndices,
+ IMG_UINT32 uiAllocCount);
+/**
+ * @Function RA_FreeMulti
+ *
+ * @Description To free a multi-base resource constructed using
+ * a call to RA_AllocMulti.
+ *
+ * @Input pArena - The arena the segment was originally allocated from.
+ * @Input aBaseArray - The array to free bases from.
+ * @Input uiBaseArraysize - Size of the array to free bases from.
+ *
+ * @Return PVRSRV_OK - success
+ */
+PVRSRV_ERROR
+RA_FreeMulti(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize);
+
+/**
+ * @Function RA_FreeMultiSparse
+ *
+ * @Description To free part of a multi-base resource constructed using
+ * a call to RA_AllocMulti.
+ *
+ * @Input pArena - The arena the segment was originally allocated from.
+ * @Input aBaseArray - The array to free bases from.
+ * @Input uiBaseArraysize - Size of the array to free bases from.
+ * @Input uiLog2ChunkSize - The log2 chunk size used to generate the Ghost bases.
+ * @Input puiFreeIndices - The indices into the array to free.
+ * @InOut puiFreeCount - The number of bases to free from the array, becomes the number
+ * of bases actually free'd. The in value may differ from the out
+ * value in cases of error when freeing. The out value can then be
+ * used in upper levels to keep any mem tracking structures consistent
+ * with what was actually freed before the error occurred.
+ *
+ * @Return PVRSRV_OK - success
+ */
+PVRSRV_ERROR
+RA_FreeMultiSparse(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ IMG_UINT32 uiLog2ChunkSize,
+ IMG_UINT32 *puiFreeIndices,
+ IMG_UINT32 *puiFreeCount);
+
/**
* @Function RA_Alloc_Range
*
void
RA_Free(RA_ARENA *pArena, RA_BASE_T base);
+/**
+ * @Function RA_SwapSparseMem
+ *
+ * @Description Swaps chunk sized allocations at X<->Y indices.
+ * The function is most optimal when Indices are provided
+ * in ascending order, this allows the internals to optimally
+ * swap based on contiguity and reduces the amount of ghost to
+ * real conversion performed. Note this function can also be used
+ * to move pages, in this case, we effectively swap real allocations
+ * with invalid marked bases.
+ * @Input pArena - The arena.
+ * @InOut aBaseArray - The array to Swap bases in.
+ * @Input uiBaseArraysize - Size of the array to Swap bases in.
+ * @Input uiLog2ChunkSize - The log2 chunk size used to generate the Ghost bases
+ * and size the Real chunks.
+ * @Input puiXIndices - Set of X indices to swap with parallel indices in Y.
+ * @Input puiYIndices - Set of Y indices to swap with parallel indices in X.
+ * @Input uiSwapCount - Number of indices to swap.
+ *
+ * @Return PVRSRV_OK - success
+ */
+PVRSRV_ERROR
+RA_SwapSparseMem(RA_ARENA *pArena,
+ RA_BASE_ARRAY_T aBaseArray,
+ RA_BASE_ARRAY_SIZE_T uiBaseArraySize,
+ IMG_UINT32 uiLog2ChunkSize,
+ IMG_UINT32 *puiXIndices,
+ IMG_UINT32 *puiYIndices,
+ IMG_UINT32 uiSwapCount);
+
/**
* @Function RA_Get_Usage_Stats
*
IMG_INTERNAL void
RA_Get_Usage_Stats(RA_ARENA *pArena, PRA_USAGE_STATS psRAStats);
+/**
+ * @Function RA_GetArenaName
+ *
+ * @Description To obtain the arena name.
+ *
+ * @Input pArena - the arena to acquire the name from.
+ *
+ * @Return IMG_CHAR* Arena name.
+ */
+IMG_INTERNAL IMG_CHAR *
+RA_GetArenaName(RA_ARENA *pArena);
+
IMG_INTERNAL RA_ARENA_ITERATOR *
RA_IteratorAcquire(RA_ARENA *pArena, IMG_BOOL bIncludeFreeSegments);
DLLIST_NODE sListNode; /*!< List node for the sync block list */
} SYNC_PRIM_BLOCK;
-typedef enum SYNC_PRIM_TYPE_TAG
-{
- SYNC_PRIM_TYPE_UNKNOWN = 0,
- SYNC_PRIM_TYPE_LOCAL,
- SYNC_PRIM_TYPE_SERVER,
-} SYNC_PRIM_TYPE;
-
-typedef struct SYNC_PRIM_LOCAL_TAG
+typedef struct SYNC_PRIM_TAG
{
- ATOMIC_T hRefCount; /*!< Ref count for this sync */
+ PVRSRV_CLIENT_SYNC_PRIM sCommon; /*!< Client visible part of the sync prim */
+ ATOMIC_T hRefCount; /*!< Ref count for this sync */
SYNC_PRIM_BLOCK *psSyncBlock; /*!< Synchronisation block this primitive is allocated on */
IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */
IMG_HANDLE hRecord; /*!< Sync record handle */
-} SYNC_PRIM_LOCAL;
-
-typedef struct SYNC_PRIM_TAG
-{
- PVRSRV_CLIENT_SYNC_PRIM sCommon; /*!< Client visible part of the sync prim */
- SYNC_PRIM_TYPE eType; /*!< Sync primitive type */
- union {
- SYNC_PRIM_LOCAL sLocal; /*!< Local sync primitive data */
- } u;
} SYNC_PRIM;
#include "pvrsrv_device.h"
#include "syscommon.h"
#include "pvr_debug.h"
+#include "oskm_apphint.h"
void SysRGXErrorNotify(IMG_HANDLE hSysData,
PVRSRV_ROBUSTNESS_NOTIFY_DATA *psErrorData)
#endif /* PVRSRV_NEED_PVR_DPF */
}
+IMG_UINT64 SysRestrictGpuLocalPhysheap(IMG_UINT64 uiHeapSize)
+{
+#if defined(SUPPORT_VALIDATION)
+ void *pvAppHintState = NULL;
+ IMG_UINT32 uiCurrentHeapSizeMB = B2MB(uiHeapSize);
+ IMG_UINT32 uiForcedHeapSizeMB = 0;
+ IMG_UINT64 uiForcedHeapSizeBytes = 0;
+
+ OSCreateKMAppHintState(&pvAppHintState);
+ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState,
+ RestrictGpuLocalPhysHeapSizeMB, &uiCurrentHeapSizeMB,
+ &uiForcedHeapSizeMB);
+ OSFreeKMAppHintState(pvAppHintState);
+
+ uiForcedHeapSizeBytes = MB2B((IMG_UINT64)uiForcedHeapSizeMB);
+
+ if (uiForcedHeapSizeMB == 0)
+ {
+ /* Apphint wasn't set, just return current heapsize */
+ return uiHeapSize;
+ }
+
+ if (uiForcedHeapSizeBytes > uiHeapSize)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"GPU_LOCAL Forced heap value greater than possible heap size. "
+ "Given: %llu Available: %llu. Reverting to default.",
+ uiForcedHeapSizeBytes, uiHeapSize));
+ }
+ else
+ {
+ PVR_LOG(("RestrictGpuLocalPhysHeapSizeMB applied GPU_LOCAL Size Bytes: %llu", uiForcedHeapSizeBytes));
+ }
+
+ return uiForcedHeapSizeBytes;
+#else
+ return uiHeapSize;
+#endif
+}
+
+IMG_BOOL SysRestrictGpuLocalAddPrivateHeap(void)
+{
+#if defined(SUPPORT_VALIDATION)
+ void *pvAppHintState = NULL;
+ IMG_UINT32 uiCurrentHeapSizeMB = 0;
+ IMG_UINT32 uiForcedHeapSizeMB = 0;
+
+ OSCreateKMAppHintState(&pvAppHintState);
+ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState,
+ RestrictGpuLocalPhysHeapSizeMB, &uiCurrentHeapSizeMB,
+ &uiForcedHeapSizeMB);
+ OSFreeKMAppHintState(pvAppHintState);
+
+ return uiForcedHeapSizeMB ? IMG_TRUE : IMG_FALSE;
+#else
+ return IMG_FALSE;
+#endif
+}
+
/******************************************************************************
End of file (sysconfig_cmn.c)
******************************************************************************/
void SysRGXErrorNotify(IMG_HANDLE hSysData,
PVRSRV_ROBUSTNESS_NOTIFY_DATA *psErrorData);
+/**************************************************************************/ /*!
+@Function SysRestrictGpuLocalPhysheap
+@Description If the Restriction apphint has been set, validate the
+ restriction value and return the new GPU_LOCAL heap size.
+
+@Input uiHeapSize Current syslayer detected GPU_LOCAL heap size.
+@Return IMG_UINT64 New GPU_LOCAL heap size in bytes.
+*/ /***************************************************************************/
+IMG_UINT64 SysRestrictGpuLocalPhysheap(IMG_UINT64 uiHeapSize);
+
+/**************************************************************************/ /*!
+@Function SysRestrictGpuLocalAddPrivateHeap
+@Description Determine if the restriction apphint has been set.
+
+@Return IMG_BOOL IMG_TRUE if the restriction apphint has been
+ set.
+*/ /***************************************************************************/
+IMG_BOOL SysRestrictGpuLocalAddPrivateHeap(void);
+
#endif /* !defined(SYSCOMMON_H) */
#include "rgxdefs_km.h"
#include "virt_validation_defs.h"
-void SysInitVirtInitialization(IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
- IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
+void SysInitVirtInitialization(IMG_HANDLE hSysData,
+ IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+ IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
-#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
-void SysSetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState);
-void SysSetTrustedDeviceAceEnabled(void);
+#if defined(EMULATOR)
+void SysSetAxiProtOSid(IMG_HANDLE hSysData, IMG_UINT32 ui32OSid, IMG_BOOL bState);
+void SysSetTrustedDeviceAceEnabled(IMG_HANDLE hSysData);
#endif
#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
--- /dev/null
+########################################################################### ###
+#@File
+#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License Dual MIT/GPLv2
+#
+# The contents of this file are subject to the MIT license as set out below.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+#
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+#
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+#
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+PVRSRVKM_NAME = $(PVRSRV_MODNAME)
+
+$(PVRSRVKM_NAME)-y += \
+ services/system/$(PVR_SYSTEM)/sysconfig.o \
+ services/server/common/vmm_pvz_client.o \
+ services/server/common/vmm_pvz_server.o \
+ services/server/common/vz_vmm_pvz.o \
+ services/server/common/vz_vmm_vm.o \
+ services/system/rogue/common/vmm_type_$(VMM_TYPE).o
+
+ccflags-y += \
+ -I$(TOP)/services/system/rogue/common/env/linux
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvrsrv_device.h"
+#include "pvrsrv_memallocflags.h"
+#include "syscommon.h"
+#include "power.h"
+#include "sysinfo.h"
+#include "sysconfig.h"
+#include "physheap.h"
+#include "pci_support.h"
+#include "interrupt_support.h"
+#include "plato_drv.h"
+#include <linux/dma-mapping.h>
+
+#define PLATO_HAS_NON_MAPPABLE(sys) (sys->pdata->has_nonmappable == true)
+
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST)
+#define PHYS_HEAP_ID_CPU_LOCAL 0
+#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+#define PHYS_HEAP_ID_GPU_LOCAL 0
+#define PHYS_HEAP_ID_CPU_LOCAL 1
+#if defined(SUPPORT_PLATO_DISPLAY)
+#define PHYS_HEAP_ID_PDP_LOCAL 2
+#define PHYS_HEAP_ID_NON_MAPPABLE 3
+#else
+#define PHYS_HEAP_ID_NON_MAPPABLE 2
+#endif
+#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL)
+#define PHYS_HEAP_ID_GPU_LOCAL 0
+#if defined(SUPPORT_PLATO_DISPLAY)
+#define PHYS_HEAP_ID_PDP_LOCAL 1
+#define PHYS_HEAP_ID_NON_MAPPABLE 2
+#else
+#define PHYS_HEAP_ID_NON_MAPPABLE 1
+#endif
+#endif
+
+#if defined(SUPPORT_PLATO_DISPLAY)
+static_assert(PHYS_HEAP_ID_PDP_LOCAL == PVRSRV_PHYS_HEAP_CONFIG_PDP_LOCAL_ID, "PDP heap ID mismatch.");
+#endif
+
+static struct plato_debug_register plato_noc_regs[] = {
+ {"NOC Offset 0x00", 0x00, 0},
+ {"NOC Offset 0x04", 0x04, 0},
+ {"NOC Offset 0x08", 0x08, 0},
+ {"NOC Offset 0x0C", 0x0C, 0},
+ {"NOC Offset 0x10", 0x10, 0},
+ {"NOC Offset 0x14", 0x14, 0},
+ {"NOC Offset 0x18", 0x18, 0},
+ {"NOC Offset 0x1C", 0x1C, 0},
+ {"NOC Offset 0x50", 0x50, 0},
+ {"NOC Offset 0x54", 0x54, 0},
+ {"NOC Offset 0x58", 0x58, 0},
+ {"DDR A Ctrl", SYS_PLATO_REG_NOC_DBG_DDR_A_CTRL_OFFSET, 0},
+ {"DDR A Data", SYS_PLATO_REG_NOC_DBG_DDR_A_DATA_OFFSET, 0},
+ {"DDR A Publ", SYS_PLATO_REG_NOC_DBG_DDR_A_PUBL_OFFSET, 0},
+ {"DDR B Ctrl", SYS_PLATO_REG_NOC_DBG_DDR_B_CTRL_OFFSET, 0},
+ {"DDR B Data", SYS_PLATO_REG_NOC_DBG_DDR_B_DATA_OFFSET, 0},
+ {"DDR B Publ", SYS_PLATO_REG_NOC_DBG_DDR_B_PUBL_OFFSET, 0},
+ {"Display S", SYS_PLATO_REG_NOC_DBG_DISPLAY_S_OFFSET, 0},
+ {"GPIO 0 S", SYS_PLATO_REG_NOC_DBG_GPIO_0_S_OFFSET, 0},
+ {"GPIO 1 S", SYS_PLATO_REG_NOC_DBG_GPIO_1_S_OFFSET, 0},
+ {"GPU S", SYS_PLATO_REG_NOC_DBG_GPU_S_OFFSET, 0},
+ {"PCI PHY", SYS_PLATO_REG_NOC_DBG_PCI_PHY_OFFSET, 0},
+ {"PCI Reg", SYS_PLATO_REG_NOC_DBG_PCI_REG_OFFSET, 0},
+ {"PCI S", SYS_PLATO_REG_NOC_DBG_PCI_S_OFFSET, 0},
+ {"Periph S", SYS_PLATO_REG_NOC_DBG_PERIPH_S_OFFSET, 0},
+ {"Ret Reg", SYS_PLATO_REG_NOC_DBG_RET_REG_OFFSET, 0},
+ {"Service", SYS_PLATO_REG_NOC_DBG_SERVICE_OFFSET, 0},
+};
+
+static struct plato_debug_register plato_aon_regs[] = {
+ {"AON Offset 0x0000", 0x0000, 0},
+ {"AON Offset 0x0070", 0x0070, 0},
+};
+
+typedef struct _SYS_DATA_ {
+ struct platform_device *pdev;
+ struct resource *registers;
+ struct plato_rogue_platform_data *pdata;
+} SYS_DATA;
+
+typedef struct {
+ struct device *psDev;
+ int iInterruptID;
+ void *pvData;
+ PFN_LISR pfnLISR;
+} LISR_DATA;
+
+static IMG_CHAR *GetDeviceVersionString(SYS_DATA *psSysData)
+{
+ return NULL;
+}
+
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+ IMG_UINT32 i = 0;
+
+ PVR_DUMPDEBUG_LOG("------[ Plato System Debug ]------");
+
+ if (plato_debug_info(psSysData->pdev->dev.parent, &plato_noc_regs[0], &plato_aon_regs[0]))
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ for (i = 0; i < ARRAY_SIZE(plato_noc_regs); i++)
+ PVR_DUMPDEBUG_LOG("%s: 0x%x", plato_noc_regs[i].description, plato_noc_regs[i].value);
+
+ for (i = 0; i < ARRAY_SIZE(plato_aon_regs); i++)
+ PVR_DUMPDEBUG_LOG("%s: 0x%x", plato_aon_regs[i].description, plato_aon_regs[i].value);
+
+ return PVRSRV_OK;
+}
+
+
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+
+static PVRSRV_ERROR InitLocalHeaps(SYS_DATA *psSysData,
+ PHYS_HEAP_CONFIG *pasPhysHeaps,
+ IMG_UINT32 uiPhysHeapCount,
+ IMG_HANDLE hPhysHeapPrivData)
+{
+ PHYS_HEAP_CONFIG *psPhysHeap;
+
+ psPhysHeap = &pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL];
+ psPhysHeap->eType = PHYS_HEAP_TYPE_LMA;
+ psPhysHeap->pszPDumpMemspaceName = "LMA";
+ psPhysHeap->psMemFuncs = &gsLocalPhysHeapFuncs;
+ psPhysHeap->hPrivData = hPhysHeapPrivData;
+ psPhysHeap->ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL;
+
+ /* Configure mappable heap region */
+ psPhysHeap->sStartAddr.uiAddr = psSysData->pdata->rogue_heap_mappable.base;
+ psPhysHeap->sCardBase.uiAddr = psSysData->pdata->rogue_heap_dev_addr;
+ psPhysHeap->uiSize = psSysData->pdata->rogue_heap_mappable.size;
+
+ PVR_LOG(("Added mappable local memory heap. Base = 0x%016llx, Size=0x%016llx",
+ psPhysHeap->sCardBase.uiAddr,
+ psPhysHeap->uiSize));
+
+ /* Setup non-mappable region if BAR size is less than actual memory size (8GB) */
+ if (PLATO_HAS_NON_MAPPABLE(psSysData))
+ {
+ psPhysHeap = &pasPhysHeaps[PHYS_HEAP_ID_NON_MAPPABLE];
+ psPhysHeap->eType = PHYS_HEAP_TYPE_LMA;
+ psPhysHeap->pszPDumpMemspaceName = "LMA";
+ psPhysHeap->psMemFuncs = &gsLocalPhysHeapFuncs;
+ psPhysHeap->hPrivData = hPhysHeapPrivData;
+ psPhysHeap->ui32UsageFlags = PHYS_HEAP_USAGE_GPU_PRIVATE;
+
+ psPhysHeap->sCardBase.uiAddr = psSysData->pdata->rogue_heap_nonmappable.base;
+ psPhysHeap->uiSize = psSysData->pdata->rogue_heap_nonmappable.size;
+ psPhysHeap->sStartAddr.uiAddr = 0;
+
+ PVR_LOG(("Added non-mappable local memory heap. Base = 0x%016llx, Size=0x%016llx",
+ psPhysHeap->sCardBase.uiAddr,
+ psPhysHeap->uiSize));
+
+ PVR_ASSERT(psPhysHeap->uiSize < SYS_DEV_MEM_REGION_SIZE);
+ }
+
+#if defined(SUPPORT_PLATO_DISPLAY)
+ psPhysHeap = &pasPhysHeaps[PHYS_HEAP_ID_PDP_LOCAL];
+ psPhysHeap->eType = PHYS_HEAP_TYPE_LMA;
+ psPhysHeap->pszPDumpMemspaceName = "LMA";
+ psPhysHeap->psMemFuncs = &gsLocalPhysHeapFuncs;
+ psPhysHeap->hPrivData = hPhysHeapPrivData;
+ psPhysHeap->ui32UsageFlags = PHYS_HEAP_USAGE_EXTERNAL;
+
+ psPhysHeap->sCardBase.uiAddr = PLATO_DDR_DEV_PHYSICAL_BASE;
+ psPhysHeap->sStartAddr.uiAddr = psSysData->pdata->pdp_heap.base;
+ psPhysHeap->uiSize = psSysData->pdata->pdp_heap.size;
+
+ PVR_LOG(("Added PDP heap. Base = 0x%016llx, Size=0x%016llx",
+ psPhysHeap->sStartAddr.uiAddr,
+ psPhysHeap->uiSize));
+#endif
+
+ return PVRSRV_OK;
+}
+#endif /* (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) */
+
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+static PVRSRV_ERROR InitHostHeaps(SYS_DATA *psSysData,
+ PHYS_HEAP_CONFIG *pasPhysHeaps,
+ IMG_UINT32 uiPhysHeapCount,
+ IMG_HANDLE hPhysHeapPrivData)
+{
+ PHYS_HEAP_CONFIG *psPhysHeap;
+
+ PVR_ASSERT(uiPhysHeapCount == 1);
+
+ psPhysHeap = &pasPhysHeaps[0];
+ psPhysHeap->eType = PHYS_HEAP_TYPE_UMA;
+ psPhysHeap->pszPDumpMemspaceName = "SYSMEM";
+ psPhysHeap->psMemFuncs = &gsHostPhysHeapFuncs;
+ psPhysHeap->hPrivData = hPhysHeapPrivData;
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST)
+ psPhysHeap->ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL;
+#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+ psPhysHeap->ui32UsageFlags = PHYS_HEAP_USAGE_CPU_LOCAL;
+ PVR_DPF((PVR_DBG_WARNING, "Initialising CPU_LOCAL UMA Host PhysHeaps"));
+#if !defined(SUPPORT_PLATO_DISPLAY)
+ psPhysHeap->ui32UsageFlags |= PHYS_HEAP_USAGE_EXTERNAL;
+#endif
+#endif
+ psPhysHeap->sCardBase.uiAddr = PLATO_HOSTRAM_DEV_PHYSICAL_BASE;
+
+ return PVRSRV_OK;
+}
+#endif /* (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) */
+
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+static PVRSRV_ERROR InitHybridHeaps(SYS_DATA *psSysData,
+ PHYS_HEAP_CONFIG *pasPhysHeaps,
+ IMG_UINT32 uiPhysHeapCount,
+ IMG_HANDLE hPhysHeapPrivData)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(uiPhysHeapCount >= PHYS_HEAP_ID_NON_MAPPABLE);
+
+ eError = InitHostHeaps(psSysData,
+ &pasPhysHeaps[PHYS_HEAP_ID_CPU_LOCAL], 1,
+ hPhysHeapPrivData);
+ if (eError != PVRSRV_OK)
+ return eError;
+
+ /*
+ * InitLocalHeaps should set up the correct heaps regardless of whether the
+ * memory configuration is 'local' or 'hybrid'.
+ */
+ eError = InitLocalHeaps(psSysData, pasPhysHeaps,
+ uiPhysHeapCount, hPhysHeapPrivData);
+ if (eError != PVRSRV_OK) {
+ return eError;
+ }
+
+ /* Adjust the pdump memory space names */
+ pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].pszPDumpMemspaceName = "LMA0";
+#if defined(SUPPORT_PLATO_DISPLAY)
+ pasPhysHeaps[PHYS_HEAP_ID_PDP_LOCAL].pszPDumpMemspaceName = "LMA1";
+#endif
+ return PVRSRV_OK;
+}
+#endif /* (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) */
+
+static PVRSRV_ERROR PhysHeapsCreate(SYS_DATA *psSysData,
+ PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PHYS_HEAP_CONFIG **ppasPhysHeapsOut,
+ IMG_UINT32 *puiPhysHeapCountOut)
+{
+ IMG_UINT32 uiHeapCount = 1;
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ PVRSRV_ERROR eError;
+
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+ uiHeapCount++;
+#endif
+
+#if defined(SUPPORT_PLATO_DISPLAY)
+ uiHeapCount++;
+#endif
+
+ if (PLATO_HAS_NON_MAPPABLE(psSysData))
+ {
+ uiHeapCount++;
+ }
+
+ pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * uiHeapCount);
+ if (!pasPhysHeaps)
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL)
+ eError = InitLocalHeaps(psSysData, pasPhysHeaps,
+ uiHeapCount, psDevConfig);
+#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST)
+ eError = InitHostHeaps(psSysData, pasPhysHeaps,
+ uiHeapCount, psDevConfig);
+#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+ eError = InitHybridHeaps(psSysData, pasPhysHeaps,
+ uiHeapCount, psDevConfig);
+#endif
+
+ if (eError != PVRSRV_OK) {
+ OSFreeMem(pasPhysHeaps);
+ return eError;
+ }
+
+ *ppasPhysHeapsOut = pasPhysHeaps;
+ *puiPhysHeapCountOut = uiHeapCount;
+
+ return PVRSRV_OK;
+}
+
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+static void PlatoLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+
+ /* Optimise common case */
+ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr -
+ psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sStartAddr.uiAddr +
+ psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sCardBase.uiAddr;
+
+ if (ui32NumOfAddr > 1) {
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) {
+ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr -
+ psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sStartAddr.uiAddr +
+ psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sCardBase.uiAddr;
+ }
+ }
+}
+
+static void PlatoLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+
+ /* Optimise common case */
+ psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr -
+ psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sCardBase.uiAddr +
+ psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sStartAddr.uiAddr;
+
+ if (ui32NumOfAddr > 1) {
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) {
+ psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr -
+ psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sCardBase.uiAddr +
+ psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sStartAddr.uiAddr;
+ }
+ }
+}
+
+#endif
+
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+static void PlatoSystemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+ /* Optimise common case */
+ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr + PLATO_HOSTRAM_DEV_PHYSICAL_BASE;
+ if (ui32NumOfAddr > 1) {
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr + PLATO_HOSTRAM_DEV_PHYSICAL_BASE;
+ }
+}
+
+static void PlatoSystemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+ /* Optimise common case */
+ psCpuPAddr[0].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[0].uiAddr - PLATO_HOSTRAM_DEV_PHYSICAL_BASE);
+ if (ui32NumOfAddr > 1) {
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ psCpuPAddr[ui32Idx].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[ui32Idx].uiAddr - PLATO_HOSTRAM_DEV_PHYSICAL_BASE);
+ }
+}
+
+#endif /* (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) */
+
+static PVRSRV_ERROR DeviceConfigCreate(void *pvOSDevice,
+ SYS_DATA *psSysData,
+ PVRSRV_DEVICE_CONFIG **ppsDevConfigOut)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ RGX_DATA *psRGXData;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo;
+ PVRSRV_ERROR eError;
+
+ psDevConfig = OSAllocZMem(sizeof(*psDevConfig) +
+ sizeof(*psRGXData) +
+ sizeof(*psRGXTimingInfo));
+ if (!psDevConfig)
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+
+ psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig));
+ psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData));
+
+ /* Set up the RGX timing information */
+ psRGXTimingInfo->ui32CoreClockSpeed = plato_core_clock_speed(&psSysData->pdev->dev);
+ psRGXTimingInfo->bEnableActivePM = IMG_FALSE;
+ psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE;
+ psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+ /* Set up the RGX data */
+ psRGXData->psRGXTimingInfo = psRGXTimingInfo;
+
+ /* Initialize heaps */
+ eError = PhysHeapsCreate(psSysData, psDevConfig, &psDevConfig->pasPhysHeaps,
+ &psDevConfig->ui32PhysHeapCount);
+ if (eError != PVRSRV_OK) {
+ OSFreeMem(psDevConfig);
+ return eError;
+ }
+
+ psDevConfig->pvOSDevice = pvOSDevice;
+ psDevConfig->pszName = PLATO_SYSTEM_NAME;
+ psDevConfig->pszVersion = GetDeviceVersionString(psSysData);
+
+ psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start;
+ psDevConfig->ui32RegsSize = SYS_PLATO_REG_RGX_SIZE;
+ psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL;
+
+ psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE;
+ psDevConfig->bHasNonMappableLocalMemory = PLATO_HAS_NON_MAPPABLE(psSysData);
+ psDevConfig->bHasFBCDCVersion31 = IMG_FALSE;
+
+ psDevConfig->ui32IRQ = PLATO_INTERRUPT_GPU;
+
+ psDevConfig->hDevData = psRGXData;
+ psDevConfig->hSysData = psSysData;
+
+ *ppsDevConfigOut = psDevConfig;
+
+ return PVRSRV_OK;
+}
+
+static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ if (psDevConfig->pszVersion)
+ OSFreeMem(psDevConfig->pszVersion);
+
+ if (psDevConfig->pasPhysHeaps)
+ OSFreeMem(psDevConfig->pasPhysHeaps);
+
+ /*
+ * The device config, RGX data and RGX timing info are part of the same
+ * allocation so do only one free.
+ */
+ OSFreeMem(psDevConfig);
+}
+
+static PVRSRV_ERROR PlatoLocalMemoryTest(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ IMG_UINT64 i, j = 0;
+ IMG_UINT32 tmp = 0;
+ IMG_UINT32 chunk = sizeof(IMG_UINT32) * 10;
+
+ IMG_UINT64 ui64TestMemoryBase = psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].sStartAddr.uiAddr;
+ IMG_UINT64 ui64TestMemorySize = psDevConfig->pasPhysHeaps[PHYS_HEAP_ID_GPU_LOCAL].uiSize;
+
+ PVR_LOG(("%s: Starting Local memory test from 0x%llx to 0x%llx (in CPU space)",
+ __func__, ui64TestMemoryBase, ui64TestMemoryBase + ui64TestMemorySize));
+
+ while (j < ui64TestMemorySize) {
+ IMG_CPU_PHYADDR myPaddr;
+ IMG_UINT32 *pui32Virt;
+
+ myPaddr.uiAddr = ui64TestMemoryBase + j;
+ pui32Virt = OSMapPhysToLin(myPaddr, chunk, PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+ for (i = 0; i < chunk/sizeof(IMG_UINT32); i++) {
+ *(pui32Virt + i) = 0xdeadbeef;
+ OSWriteMemoryBarrier(pui32Virt);
+ tmp = *(pui32Virt + i);
+ if (tmp != 0xdeadbeef) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Local memory read-write test failed at address=0x%llx: written 0x%x, read 0x%x",
+ ui64TestMemoryBase + ((i * sizeof(IMG_UINT32)) + j), (IMG_UINT32) 0xdeadbeef, tmp));
+
+ OSUnMapPhysToLin(pui32Virt, chunk);
+ return PVRSRV_ERROR_SYSTEM_LOCAL_MEMORY_INIT_FAIL;
+ }
+ }
+
+ OSUnMapPhysToLin(pui32Virt, chunk);
+
+ j += (1024 * 1024 * 500);
+ }
+
+ PVR_LOG(("Local memory read-write test passed!"));
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ SYS_DATA *psSysData;
+ IMG_UINT32 uiRegistersSize;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(pvOSDevice);
+
+ psSysData = OSAllocZMem(sizeof(*psSysData));
+ if (psSysData == NULL)
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+
+ dma_set_mask(pvOSDevice, DMA_BIT_MASK(40));
+
+ /* Retrieve platform device and data */
+ psSysData->pdev = to_platform_device((struct device *) pvOSDevice);
+ psSysData->pdata = psSysData->pdev->dev.platform_data;
+
+ /* Enable plato PCI */
+ if (plato_enable(psSysData->pdev->dev.parent)) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to enable PCI device", __func__));
+ eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+ goto ErrFreeSysData;
+ }
+
+ psSysData->registers = platform_get_resource_byname(psSysData->pdev, IORESOURCE_MEM, PLATO_ROGUE_RESOURCE_REGS);
+ if (!psSysData->registers) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get Rogue register information",
+ __func__));
+ eError = PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+ goto ErrorDevDisable;
+ }
+
+ /* Check the address range is large enough. */
+ uiRegistersSize = resource_size(psSysData->registers);
+ if (uiRegistersSize < SYS_PLATO_REG_RGX_SIZE) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Rogue register region isn't big enough (was %08X, required 0x%08x)",
+ __func__, uiRegistersSize, SYS_PLATO_REG_RGX_SIZE));
+
+ eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL;
+ goto ErrorDevDisable;
+ }
+
+#if !defined(VIRTUAL_PLATFORM)
+ /* Reserve the rogue registers address range */
+ if (!request_mem_region(psSysData->registers->start,
+ uiRegistersSize,
+ PVRSRV_MODNAME)) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Rogue register memory region not available", __func__));
+ eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+ goto ErrorDevDisable;
+ }
+#endif
+
+ eError = DeviceConfigCreate(pvOSDevice, psSysData, &psDevConfig);
+ if (eError != PVRSRV_OK) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create device config", __func__));
+ goto ErrorReleaseMemRegion;
+ }
+
+ PlatoLocalMemoryTest(psDevConfig);
+
+ *ppsDevConfig = psDevConfig;
+
+ return PVRSRV_OK;
+
+ErrorReleaseMemRegion:
+ release_mem_region(psSysData->registers->start,
+ resource_size(psSysData->registers));
+ErrorDevDisable:
+ plato_disable(psSysData->pdev->dev.parent);
+ErrFreeSysData:
+ OSFreeMem(psSysData);
+ return eError;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ SYS_DATA *psSysData = (SYS_DATA *)psDevConfig->hSysData;
+
+ DeviceConfigDestroy(psDevConfig);
+
+ release_mem_region(psSysData->registers->start,
+ resource_size(psSysData->registers));
+ plato_disable(psSysData->pdev->dev.parent);
+
+ OSFreeMem(psSysData);
+}
+
+static void PlatoInterruptHandler(void *pvData)
+{
+ LISR_DATA *psLISRData = pvData;
+
+ psLISRData->pfnLISR(psLISRData->pvData);
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData)
+{
+ SYS_DATA *psSysData = (SYS_DATA *)hSysData;
+ LISR_DATA *psLISRData;
+ PVRSRV_ERROR eError;
+
+ /* Should only accept GPU interrupts through this API */
+ if (ui32IRQ != PLATO_INTERRUPT_GPU) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid %d", __func__, ui32IRQ));
+ return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ }
+
+ psLISRData = OSAllocZMem(sizeof(*psLISRData));
+ if (!psLISRData)
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+
+ psLISRData->pfnLISR = pfnLISR;
+ psLISRData->pvData = pvData;
+ psLISRData->iInterruptID = ui32IRQ;
+ psLISRData->psDev = psSysData->pdev->dev.parent;
+
+ if (plato_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, PlatoInterruptHandler, psLISRData)) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: plato_set_interrupt_handler() failed", __func__));
+ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ goto err_free_data;
+ }
+
+ if (plato_enable_interrupt(psLISRData->psDev, psLISRData->iInterruptID)) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: plato_enable_interrupt() failed", __func__));
+ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ goto err_unset_interrupt_handler;
+ }
+
+ *phLISRData = psLISRData;
+
+ PVR_LOG(("Installed device LISR %s on IRQ %d", pszName, ui32IRQ));
+
+ return PVRSRV_OK;
+
+err_unset_interrupt_handler:
+ plato_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL);
+err_free_data:
+ OSFreeMem(psLISRData);
+ return eError;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+ LISR_DATA *psLISRData = (LISR_DATA *) hLISRData;
+ int err;
+
+ err = plato_disable_interrupt(psLISRData->psDev, psLISRData->iInterruptID);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: plato_enable_interrupt() failed (%d)", __func__, err));
+ }
+
+ err = plato_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: plato_set_interrupt_handler() failed (%d)", __func__, err));
+ }
+
+ PVR_TRACE(("Uninstalled device LISR " IMG_PFN_FMTSPEC " from irq %u", psLISRData->pfnLISR, psLISRData->iInterruptID));
+
+ OSFreeMem(psLISRData);
+
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(SYSCONFIG_H)
+#define SYSCONFIG_H
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+#include "plato_drv.h"
+
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10)
+#define MAX_SYSTEMS 32
+
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+static void PlatoLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void PlatoLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+#endif
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+static void PlatoSystemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void PlatoSystemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+#endif /* (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) */
+
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs = {
+ /* pfnCpuPAddrToDevPAddr */
+ PlatoLocalCpuPAddrToDevPAddr,
+ /* pfnDevPAddrToCpuPAddr */
+ PlatoLocalDevPAddrToCpuPAddr,
+};
+#endif
+
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST) || (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID)
+static PHYS_HEAP_FUNCTIONS gsHostPhysHeapFuncs = {
+ /* pfnCpuPAddrToDevPAddr */
+ PlatoSystemCpuPAddrToDevPAddr,
+ /* pfnDevPAddrToCpuPAddr */
+ PlatoSystemDevPAddrToCpuPAddr,
+};
+#endif
+
+#if (PLATO_MEMORY_CONFIG != PLATO_MEMORY_LOCAL) && \
+ (PLATO_MEMORY_CONFIG != PLATO_MEMORY_HYBRID) && \
+ (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST)
+#error "PLATO_MEMORY_CONFIG not valid"
+#endif
+
+/*****************************************************************************
+ * system specific data structures
+ *****************************************************************************/
+
+#endif /* !defined(SYSCONFIG_H) */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(SYSINFO_H)
+#define SYSINFO_H
+
+#define SYS_RGX_DEV_VENDOR_ID (0x1AEE)
+#define SYS_RGX_DEV_DEVICE_ID (0x0003)
+#define SYS_RGX_DEV_NAME "plato_rogue"
+
+/*!< System specific poll/timeout details */
+#if defined(VIRTUAL_PLATFORM) || defined(EMULATOR)
+/* Emulator clock ~600 times slower than HW */
+#define MAX_HW_TIME_US (300000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1000000)
+#else
+#define MAX_HW_TIME_US (500000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(100000)
+#endif
+
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT (10000)
+
+#endif /* !defined(SYSINFO_H) */
do_invalid_range(0x0, 0x200000);
/* Remap pages into VMALLOC space */
- pvVirtAddr = pvr_vmap(pagearray, ui32PgCount, VM_READ | VM_WRITE, prot);
+ pvVirtAddr = pvr_vmap(pagearray, ui32PgCount, VM_MAP, prot);
psDmaAlloc->PageProps = prot;
/* Clean-up tmp buffers */
#include "vmm_pvz_server.h"
static PVRSRV_ERROR
-StubVMMMapDevPhysHeap(IMG_UINT32 ui32FuncID,
- IMG_UINT32 ui32DevID,
- IMG_UINT64 ui64Size,
+StubVMMMapDevPhysHeap(IMG_UINT64 ui64Size,
IMG_UINT64 ui64Addr)
{
- PVR_UNREFERENCED_PARAMETER(ui32FuncID);
- PVR_UNREFERENCED_PARAMETER(ui32DevID);
PVR_UNREFERENCED_PARAMETER(ui64Size);
PVR_UNREFERENCED_PARAMETER(ui64Addr);
return PVRSRV_ERROR_NOT_IMPLEMENTED;
}
static PVRSRV_ERROR
-StubVMMUnmapDevPhysHeap(IMG_UINT32 ui32FuncID,
- IMG_UINT32 ui32DevID)
+StubVMMUnmapDevPhysHeap(void)
{
- PVR_UNREFERENCED_PARAMETER(ui32FuncID);
- PVR_UNREFERENCED_PARAMETER(ui32DevID);
return PVRSRV_ERROR_NOT_IMPLEMENTED;
}
{
.sClientFuncTab = {
/* pfnMapDevPhysHeap */
- &StubVMMMapDevPhysHeap,
+ .pfnMapDevPhysHeap = &StubVMMMapDevPhysHeap,
/* pfnUnmapDevPhysHeap */
- &StubVMMUnmapDevPhysHeap
+ .pfnUnmapDevPhysHeap = &StubVMMUnmapDevPhysHeap
},
.sServerFuncTab = {
/* pfnMapDevPhysHeap */
- &PvzServerMapDevPhysHeap,
+ .pfnMapDevPhysHeap = &PvzServerMapDevPhysHeap,
/* pfnUnmapDevPhysHeap */
- &PvzServerUnmapDevPhysHeap
+ .pfnUnmapDevPhysHeap = &PvzServerUnmapDevPhysHeap
},
.sVmmFuncTab = {
/* pfnOnVmOnline */
- &PvzServerOnVmOnline,
+ .pfnOnVmOnline = &PvzServerOnVmOnline,
/* pfnOnVmOffline */
- &PvzServerOnVmOffline,
+ .pfnOnVmOffline = &PvzServerOnVmOffline,
/* pfnVMMConfigure */
- &PvzServerVMMConfigure
+ .pfnVMMConfigure = &PvzServerVMMConfigure
}
};
--- /dev/null
+########################################################################### ###
+#@File
+#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License Dual MIT/GPLv2
+#
+# The contents of this file are subject to the MIT license as set out below.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+#
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+#
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+#
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+
+PVRSRVKM_NAME = $(PVRSRV_MODNAME)
+
+ifeq ($(KERNEL_DRIVER_DIR),)
+ SYSTEM_BASEDIR := services/system/rogue/$(PVR_SYSTEM)
+else
+ SYSTEM_BASEDIR := external/$(KERNEL_DRIVER_DIR)/$(PVR_SYSTEM)
+endif
+
+$(PVRSRVKM_NAME)-y += \
+ $(SYSTEM_BASEDIR)/mt8173_mfgsys.o \
+ $(SYSTEM_BASEDIR)/mt8173_sysconfig.o \
+ services/server/common/vmm_pvz_client.o \
+ services/server/common/vmm_pvz_server.o \
+ services/server/common/vz_vmm_pvz.o \
+ services/server/common/vz_vmm_vm.o \
+ services/system/rogue/common/vmm_type_$(VMM_TYPE).o
+
+ifeq ($(SUPPORT_ION),1)
+ $(PVRSRVKM_NAME)-y += \
+ services/system/common/env/linux/ion_support_generic.o
+endif
--- /dev/null
+/*
+* Copyright (c) 2014 MediaTek Inc.
+* Author: Chiawen Lee <chiawen.lee@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/thermal.h>
+
+#include "mt8173_mfgsys.h"
+
+static const char * const top_mfg_clk_name[] = {
+ "mfg_mem_in_sel",
+ "mfg_axi_in_sel",
+ "top_axi",
+ "top_mem",
+};
+
+#define MAX_TOP_MFG_CLK ARRAY_SIZE(top_mfg_clk_name)
+
+#define REG_MFG_AXI BIT(0)
+#define REG_MFG_MEM BIT(1)
+#define REG_MFG_G3D BIT(2)
+#define REG_MFG_26M BIT(3)
+#define REG_MFG_ALL (REG_MFG_AXI | REG_MFG_MEM | REG_MFG_G3D | REG_MFG_26M)
+
+#define REG_MFG_CG_STA 0x00
+#define REG_MFG_CG_SET 0x04
+#define REG_MFG_CG_CLR 0x08
+
+static void mtk_mfg_clr_clock_gating(void __iomem *reg)
+{
+ writel(REG_MFG_ALL, reg + REG_MFG_CG_CLR);
+}
+
+static int mtk_mfg_prepare_clock(struct mtk_mfg *mfg)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < MAX_TOP_MFG_CLK; i++) {
+ ret = clk_prepare(mfg->top_clk[i]);
+ if (ret)
+ goto unwind;
+ }
+ ret = clk_prepare(mfg->top_mfg);
+ if (ret)
+ goto unwind;
+
+ return 0;
+unwind:
+ while (i--)
+ clk_unprepare(mfg->top_clk[i]);
+
+ return ret;
+}
+
+static void mtk_mfg_unprepare_clock(struct mtk_mfg *mfg)
+{
+ int i;
+
+ clk_unprepare(mfg->top_mfg);
+ for (i = MAX_TOP_MFG_CLK - 1; i >= 0; i--)
+ clk_unprepare(mfg->top_clk[i]);
+}
+
+static int mtk_mfg_enable_clock(struct mtk_mfg *mfg)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < MAX_TOP_MFG_CLK; i++) {
+ ret = clk_enable(mfg->top_clk[i]);
+ if (ret)
+ goto unwind;
+ }
+ ret = clk_enable(mfg->top_mfg);
+ if (ret)
+ goto unwind;
+ mtk_mfg_clr_clock_gating(mfg->reg_base);
+
+ return 0;
+unwind:
+ while (i--)
+ clk_disable(mfg->top_clk[i]);
+
+ return ret;
+}
+
+static void mtk_mfg_disable_clock(struct mtk_mfg *mfg)
+{
+ int i;
+
+ clk_disable(mfg->top_mfg);
+ for (i = MAX_TOP_MFG_CLK - 1; i >= 0; i--)
+ clk_disable(mfg->top_clk[i]);
+}
+
+static void mtk_mfg_enable_hw_apm(struct mtk_mfg *mfg)
+{
+ writel(0x003c3d4d, mfg->reg_base + 0x24);
+ writel(0x4d45440b, mfg->reg_base + 0x28);
+ writel(0x7a710184, mfg->reg_base + 0xe0);
+ writel(0x835f6856, mfg->reg_base + 0xe4);
+ writel(0x002b0234, mfg->reg_base + 0xe8);
+ writel(0x80000000, mfg->reg_base + 0xec);
+ writel(0x08000000, mfg->reg_base + 0xa0);
+}
+
+int mtk_mfg_enable(struct mtk_mfg *mfg)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(mfg->dev);
+ if (ret)
+ return ret;
+
+ ret = mtk_mfg_enable_clock(mfg);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ mtk_mfg_enable_hw_apm(mfg);
+
+ return 0;
+
+err_pm_runtime_put:
+ pm_runtime_put_sync(mfg->dev);
+ return ret;
+}
+
+static void mtk_mfg_disable_hw_apm(struct mtk_mfg *mfg)
+{
+ writel(0x00, mfg->reg_base + 0xec);
+}
+
+void mtk_mfg_disable(struct mtk_mfg *mfg)
+{
+ mtk_mfg_disable_hw_apm(mfg);
+
+ mtk_mfg_disable_clock(mfg);
+ pm_runtime_put_sync(mfg->dev);
+}
+
+int mtk_mfg_freq_set(struct mtk_mfg *mfg, unsigned long freq)
+{
+ int ret;
+
+ ret = clk_prepare_enable(mfg->top_mfg);
+ if (ret) {
+ dev_err(mfg->dev, "enable and prepare top_mfg failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_set_parent(mfg->top_mfg, mfg->clk26m);
+ if (ret) {
+ dev_err(mfg->dev, "Set clk parent to clk26m failed, %d\n", ret);
+ goto unprepare_top_mfg;
+ }
+
+ ret = clk_set_rate(mfg->mmpll, freq);
+ if (ret)
+ dev_err(mfg->dev, "Set freq to %lu Hz failed, %d\n", freq, ret);
+
+ ret = clk_set_parent(mfg->top_mfg, mfg->top_mmpll);
+ if (ret)
+ dev_err(mfg->dev, "Set clk parent to top_mmpll failed, %d\n", ret);
+
+unprepare_top_mfg:
+ clk_disable_unprepare(mfg->top_mfg);
+
+ if (!ret)
+ dev_dbg(mfg->dev, "Freq set to %lu Hz\n", freq);
+
+ return ret;
+}
+
+int mtk_mfg_volt_set(struct mtk_mfg *mfg, int volt)
+{
+ int ret;
+
+ ret = regulator_set_voltage(mfg->vgpu, volt, volt);
+ if (ret != 0) {
+ dev_err(mfg->dev, "Set voltage to %u uV failed, %d\n",
+ volt, ret);
+ return ret;
+ }
+
+ dev_dbg(mfg->dev, "Voltage set to %d uV\n", volt);
+
+ return 0;
+}
+
+static int mtk_mfg_bind_device_resource(struct mtk_mfg *mfg)
+{
+ struct device *dev = mfg->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int i;
+ struct resource *res;
+
+ mfg->top_clk = devm_kcalloc(dev, MAX_TOP_MFG_CLK,
+ sizeof(*mfg->top_clk), GFP_KERNEL);
+ if (!mfg->top_clk)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ mfg->rgx_start = res->start;
+ mfg->rgx_size = resource_size(res);
+
+ mfg->rgx_irq = platform_get_irq_byname(pdev, "RGX");
+ if (mfg->rgx_irq < 0)
+ return mfg->rgx_irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ mfg->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mfg->reg_base))
+ return PTR_ERR(mfg->reg_base);
+
+ mfg->mmpll = devm_clk_get(dev, "mmpll_clk");
+ if (IS_ERR(mfg->mmpll)) {
+ dev_err(dev, "devm_clk_get mmpll_clk failed !!!\n");
+ return PTR_ERR(mfg->mmpll);
+ }
+
+ for (i = 0; i < MAX_TOP_MFG_CLK; i++) {
+ mfg->top_clk[i] = devm_clk_get(dev, top_mfg_clk_name[i]);
+ if (IS_ERR(mfg->top_clk[i])) {
+ dev_err(dev, "devm_clk_get %s failed !!!\n",
+ top_mfg_clk_name[i]);
+ return PTR_ERR(mfg->top_clk[i]);
+ }
+ }
+
+ mfg->top_mfg = devm_clk_get(dev, "top_mfg");
+ if (IS_ERR(mfg->top_mfg)) {
+ dev_err(dev, "devm_clk_get top_mfg failed !!!\n");
+ return PTR_ERR(mfg->top_mfg);
+ }
+
+ mfg->top_mmpll = devm_clk_get(dev, "top_mmpll");
+ if (IS_ERR(mfg->top_mmpll)) {
+ dev_err(dev, "devm_clk_get top_mmpll failed !!!\n");
+ return PTR_ERR(mfg->top_mmpll);
+ }
+
+ mfg->clk26m = devm_clk_get(dev, "clk26m");
+ if (IS_ERR(mfg->clk26m)) {
+ dev_err(dev, "devm_clk_get clk26m failed !!!\n");
+ return PTR_ERR(mfg->clk26m);
+ }
+
+#if defined(CONFIG_DEVFREQ_THERMAL)
+ mfg->tz = thermal_zone_get_zone_by_name("cpu_thermal");
+ if (IS_ERR(mfg->tz)) {
+ dev_warn(dev, "Failed to get cpu_thermal zone\n");
+ }
+#endif
+
+ mfg->vgpu = devm_regulator_get(dev, "mfgsys-power");
+ if (IS_ERR(mfg->vgpu))
+ return PTR_ERR(mfg->vgpu);
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static void mtk_mfg_unbind_device_resource(struct mtk_mfg *mfg)
+{
+ struct device *dev = mfg->dev;
+
+ pm_runtime_disable(dev);
+}
+
+struct mtk_mfg *mtk_mfg_create(struct device *dev)
+{
+ int err;
+ struct mtk_mfg *mfg;
+
+ mtk_mfg_debug("mtk_mfg_create Begin\n");
+
+ mfg = devm_kzalloc(dev, sizeof(*mfg), GFP_KERNEL);
+ if (!mfg)
+ return ERR_PTR(-ENOMEM);
+ mfg->dev = dev;
+
+ err = mtk_mfg_bind_device_resource(mfg);
+ if (err != 0)
+ return ERR_PTR(err);
+
+ mutex_init(&mfg->set_power_state);
+
+ err = mtk_mfg_prepare_clock(mfg);
+ if (err)
+ goto err_unbind_resource;
+
+ mtk_mfg_debug("mtk_mfg_create End\n");
+
+ return mfg;
+err_unbind_resource:
+ mtk_mfg_unbind_device_resource(mfg);
+
+ return ERR_PTR(err);
+}
+
+void mtk_mfg_destroy(struct mtk_mfg *mfg)
+{
+ mtk_mfg_unprepare_clock(mfg);
+
+ mtk_mfg_unbind_device_resource(mfg);
+}
--- /dev/null
+/*
+* Copyright (c) 2014 MediaTek Inc.
+* Author: Chiawen Lee <chiawen.lee@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef MT8173_MFGSYS_H
+#define MT8173_MFGSYS_H
+
+#include <linux/device.h>
+
+/* unit ms, timeout interval for DVFS detection */
+#define MTK_DVFS_SWITCH_INTERVAL 300
+
+#define ENABLE_MTK_MFG_DEBUG 0
+
+#if ENABLE_MTK_MFG_DEBUG
+#define mtk_mfg_debug(fmt, args...) pr_info("[MFG]" fmt, ##args)
+#else
+#define mtk_mfg_debug(fmt, args...) do { } while (0)
+#endif
+
+struct mtk_mfg {
+ struct device *dev;
+
+ struct clk **top_clk;
+ void __iomem *reg_base;
+
+ resource_size_t rgx_start;
+ resource_size_t rgx_size;
+ int rgx_irq;
+
+ /* mutex protect for set power state */
+ struct mutex set_power_state;
+
+ /* for gpu device freq/volt update */
+ struct regulator *vgpu;
+ struct clk *mmpll;
+ struct clk *top_mfg;
+ struct clk *top_mmpll;
+ struct clk *clk26m;
+
+#if (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) || \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ struct thermal_zone_device *tz;
+#endif
+};
+
+struct mtk_mfg *mtk_mfg_create(struct device *dev);
+void mtk_mfg_destroy(struct mtk_mfg *mfg);
+
+int mtk_mfg_enable(struct mtk_mfg *mfg);
+void mtk_mfg_disable(struct mtk_mfg *mfg);
+
+int mtk_mfg_freq_set(struct mtk_mfg *mfg, unsigned long freq);
+int mtk_mfg_volt_set(struct mtk_mfg *mfg, int volt);
+
+#endif /* MT8173_MFGSYS_H*/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/thermal.h>
+#if defined(CONFIG_DEVFREQ_THERMAL)
+#include <linux/devfreq_cooling.h>
+#endif
+
+#include "physheap.h"
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+#include "syscommon.h"
+#if defined(SUPPORT_ION)
+#include "ion_support.h"
+#endif
+
+#include "mt8173_mfgsys.h"
+
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS 10
+#define RGX_HW_CORE_CLOCK_SPEED 395000000
+#define MT8173_SYSTEM_NAME "mt8173"
+
+static IMG_HANDLE ghSysData;
+
+typedef struct
+{
+ IMG_UINT32 ui32IRQ;
+ PFN_LISR pfnLISR;
+ void *pvLISRData;
+} LISR_WRAPPER_DATA;
+
+static irqreturn_t MTKLISRWrapper(int iIrq, void *pvData)
+{
+ LISR_WRAPPER_DATA *psWrapperData = pvData;
+
+ if (psWrapperData->pfnLISR(psWrapperData->pvLISRData))
+ {
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/*
+ * CPU to Device physical address translation
+ */
+static
+void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+ /* Optimise common case */
+ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+ if (ui32NumOfAddr > 1) {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+ }
+}
+
+/*
+ * Device to CPU physical address translation
+ */
+static
+void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+ /* Optimise common case */
+ psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr;
+ if (ui32NumOfAddr > 1) {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr;
+ }
+}
+
+static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = {
+ .pfnCpuPAddrToDevPAddr = UMAPhysHeapCpuPAddrToDevPAddr,
+ .pfnDevPAddrToCpuPAddr = UMAPhysHeapDevPAddrToCpuPAddr,
+};
+
+static PVRSRV_ERROR MTKSysDevPrePowerState(
+ IMG_HANDLE hSysData,
+ PVRSRV_SYS_POWER_STATE eNewPowerState,
+ PVRSRV_SYS_POWER_STATE eCurrentPowerState,
+ PVRSRV_POWER_FLAGS ePwrFlags)
+{
+ struct mtk_mfg *mfg = hSysData;
+
+ mtk_mfg_debug("MTKSysDevPrePowerState (%d->%d), bPwrFlags = 0x%08x\n",
+ eCurrentPowerState, eNewPowerState, ePwrFlags);
+
+ mutex_lock(&mfg->set_power_state);
+
+ if ((PVRSRV_SYS_POWER_STATE_OFF == eNewPowerState) &&
+ (PVRSRV_SYS_POWER_STATE_ON == eCurrentPowerState))
+ mtk_mfg_disable(mfg);
+
+ mutex_unlock(&mfg->set_power_state);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR MTKSysDevPostPowerState(
+ IMG_HANDLE hSysData,
+ PVRSRV_SYS_POWER_STATE eNewPowerState,
+ PVRSRV_SYS_POWER_STATE eCurrentPowerState,
+ PVRSRV_POWER_FLAGS ePwrFlags)
+{
+ struct mtk_mfg *mfg = hSysData;
+ PVRSRV_ERROR ret;
+
+ mtk_mfg_debug("MTKSysDevPostPowerState (%d->%d)\n",
+ eCurrentPowerState, eNewPowerState);
+
+ mutex_lock(&mfg->set_power_state);
+
+ if ((PVRSRV_SYS_POWER_STATE_ON == eNewPowerState) &&
+ (PVRSRV_SYS_POWER_STATE_OFF == eCurrentPowerState)) {
+ if (mtk_mfg_enable(mfg)) {
+ ret = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+ goto done;
+ }
+ }
+
+ ret = PVRSRV_OK;
+done:
+ mutex_unlock(&mfg->set_power_state);
+
+ return ret;
+}
+
+#ifdef SUPPORT_LINUX_DVFS
+#if defined(CONFIG_DEVFREQ_THERMAL)
+
+#define FALLBACK_STATIC_TEMPERATURE 65000
+
+/* Temperatures on power over-temp-and-voltage curve (C) */
+static const int vt_temperatures[] = { 25, 45, 65, 85, 105 };
+
+/* Voltages on power over-temp-and-voltage curve (mV) */
+static const int vt_voltages[] = { 900, 1000, 1130 };
+
+#define POWER_TABLE_NUM_TEMP ARRAY_SIZE(vt_temperatures)
+#define POWER_TABLE_NUM_VOLT ARRAY_SIZE(vt_voltages)
+
+static const unsigned int
+power_table[POWER_TABLE_NUM_VOLT][POWER_TABLE_NUM_TEMP] = {
+ /* 25 45 65 85 105 */
+ { 14540, 35490, 60420, 120690, 230000 }, /* 900 mV */
+ { 21570, 41910, 82380, 159140, 298620 }, /* 1000 mV */
+ { 32320, 72950, 111320, 209290, 382700 }, /* 1130 mV */
+};
+
+/** Frequency and Power in Khz and mW respectively */
+static const int f_range[] = {253500, 299000, 396500, 455000, 494000, 598000};
+static const IMG_UINT32 max_dynamic_power[] = {612, 722, 957, 1100, 1194, 1445};
+
+static u32 interpolate(int value, const int *x, const unsigned int *y, int len)
+{
+ u64 tmp64;
+ u32 dx;
+ u32 dy;
+ int i, ret;
+
+ if (value <= x[0])
+ return y[0];
+ if (value >= x[len - 1])
+ return y[len - 1];
+
+ for (i = 1; i < len - 1; i++) {
+ /* If value is identical, no need to interpolate */
+ if (value == x[i])
+ return y[i];
+ if (value < x[i])
+ break;
+ }
+
+ /* Linear interpolation between the two (x,y) points */
+ dy = y[i] - y[i - 1];
+ dx = x[i] - x[i - 1];
+
+ tmp64 = value - x[i - 1];
+ tmp64 *= dy;
+ do_div(tmp64, dx);
+ ret = y[i - 1] + tmp64;
+
+ return ret;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static unsigned long mtk_mfg_get_static_power(struct devfreq *df,
+ unsigned long voltage)
+#else
+static unsigned long mtk_mfg_get_static_power(unsigned long voltage)
+#endif
+{
+ struct mtk_mfg *mfg = ghSysData;
+ struct thermal_zone_device *tz = mfg->tz;
+ unsigned long power;
+#if !defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+ unsigned long temperature = FALLBACK_STATIC_TEMPERATURE;
+#else
+ int temperature = FALLBACK_STATIC_TEMPERATURE;
+#endif
+ int low_idx = 0, high_idx = POWER_TABLE_NUM_VOLT - 1;
+ int i;
+
+ if (!tz)
+ return 0;
+
+ if (tz->ops->get_temp(tz, &temperature))
+ dev_warn(mfg->dev, "Failed to read temperature\n");
+ do_div(temperature, 1000);
+
+ for (i = 0; i < POWER_TABLE_NUM_VOLT; i++) {
+ if (voltage <= vt_voltages[POWER_TABLE_NUM_VOLT - 1 - i])
+ high_idx = POWER_TABLE_NUM_VOLT - 1 - i;
+
+ if (voltage >= vt_voltages[i])
+ low_idx = i;
+ }
+
+ if (low_idx == high_idx) {
+ power = interpolate(temperature,
+ vt_temperatures,
+ &power_table[low_idx][0],
+ POWER_TABLE_NUM_TEMP);
+ } else {
+ unsigned long dvt =
+ vt_voltages[high_idx] - vt_voltages[low_idx];
+ unsigned long power1, power2;
+
+ power1 = interpolate(temperature,
+ vt_temperatures,
+ &power_table[high_idx][0],
+ POWER_TABLE_NUM_TEMP);
+
+ power2 = interpolate(temperature,
+ vt_temperatures,
+ &power_table[low_idx][0],
+ POWER_TABLE_NUM_TEMP);
+
+ power = (power1 - power2) * (voltage - vt_voltages[low_idx]);
+ do_div(power, dvt);
+ power += power2;
+ }
+
+ /* convert to mw */
+ do_div(power, 1000);
+
+ mtk_mfg_debug("mtk_mfg_get_static_power: %lu at Temperature %d\n",
+ power, temperature);
+ return power;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+static unsigned long mtk_mfg_get_dynamic_power(struct devfreq *df,
+ unsigned long freq,
+ unsigned long voltage)
+#else
+static unsigned long mtk_mfg_get_dynamic_power(unsigned long freq,
+ unsigned long voltage)
+#endif
+{
+ #define NUM_RANGE ARRAY_SIZE(f_range)
+ /** Frequency and Power in Khz and mW respectively */
+ IMG_INT32 i, low_idx = 0, high_idx = NUM_RANGE - 1;
+ IMG_UINT32 power;
+
+ for (i = 0; i < NUM_RANGE; i++) {
+ if (freq <= f_range[NUM_RANGE - 1 - i])
+ high_idx = NUM_RANGE - 1 - i;
+
+ if (freq >= f_range[i])
+ low_idx = i;
+ }
+
+ if (low_idx == high_idx) {
+ power = max_dynamic_power[low_idx];
+ } else {
+ IMG_UINT32 f_interval = f_range[high_idx] - f_range[low_idx];
+ IMG_UINT32 p_interval = max_dynamic_power[high_idx] -
+ max_dynamic_power[low_idx];
+
+ power = p_interval * (freq - f_range[low_idx]);
+ do_div(power, f_interval);
+ power += max_dynamic_power[low_idx];
+ }
+
+ power = (IMG_UINT32)div_u64((IMG_UINT64)power * voltage * voltage,
+ 1000000UL);
+
+ return power;
+ #undef NUM_RANGE
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0))
+static int mtk_mfg_get_real_power(struct devfreq *df,
+ u32 *power,
+ unsigned long freq,
+ unsigned long voltage)
+{
+ if (!df || !power)
+ return -EINVAL;
+
+ *power = mtk_mfg_get_static_power(df, voltage) +
+ mtk_mfg_get_dynamic_power(df, freq, voltage);
+
+ return 0;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) */
+
+static struct devfreq_cooling_power sPowerOps = {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0))
+ .get_static_power = mtk_mfg_get_static_power,
+ .get_dynamic_power = mtk_mfg_get_dynamic_power,
+#else
+ .get_real_power = mtk_mfg_get_real_power,
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */
+};
+#endif
+
+static void SetFrequency(IMG_HANDLE hSysData, IMG_UINT32 freq)
+{
+ struct mtk_mfg *mfg = hSysData;
+
+ /* freq is in Hz */
+ mtk_mfg_freq_set(mfg, freq);
+}
+
+static void SetVoltage(IMG_HANDLE hSysData, IMG_UINT32 volt)
+{
+ struct mtk_mfg *mfg = hSysData;
+
+ mtk_mfg_volt_set(mfg, volt);
+}
+#endif
+
+static PVRSRV_ERROR DeviceConfigCreate(void *pvOSDevice,
+ struct mtk_mfg *mfg,
+ PVRSRV_DEVICE_CONFIG **ppsDevConfigOut)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ RGX_DATA *psRGXData;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo;
+ PHYS_HEAP_CONFIG *psPhysHeapConfig;
+
+ psDevConfig = OSAllocZMem(sizeof(*psDevConfig) +
+ sizeof(*psRGXData) +
+ sizeof(*psRGXTimingInfo) +
+ sizeof(*psPhysHeapConfig));
+ if (!psDevConfig)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig));
+ psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData));
+ psPhysHeapConfig = (PHYS_HEAP_CONFIG *)((IMG_CHAR *)psRGXTimingInfo + sizeof(*psRGXTimingInfo));
+
+ /* Set up the RGX timing information */
+ psRGXTimingInfo->ui32CoreClockSpeed = RGX_HW_CORE_CLOCK_SPEED;
+ psRGXTimingInfo->bEnableActivePM = IMG_TRUE;
+ psRGXTimingInfo->bEnableRDPowIsland = IMG_TRUE;
+ psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+ /* Set up the RGX data */
+ psRGXData->psRGXTimingInfo = psRGXTimingInfo;
+
+ psPhysHeapConfig->pszPDumpMemspaceName = "SYSMEM";
+ psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+ psPhysHeapConfig->psMemFuncs = &gsPhysHeapFuncs;
+ psPhysHeapConfig->hPrivData = NULL;
+ psPhysHeapConfig->ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL;
+
+ psDevConfig->pasPhysHeaps = psPhysHeapConfig;
+ psDevConfig->ui32PhysHeapCount = 1U;
+
+ psDevConfig->pvOSDevice = pvOSDevice;
+ psDevConfig->pszName = MT8173_SYSTEM_NAME;
+ psDevConfig->pszVersion = NULL;
+
+ psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL;
+
+ psDevConfig->bHasFBCDCVersion31 = IMG_FALSE;
+ psDevConfig->bDevicePA0IsValid = IMG_FALSE;
+
+ psDevConfig->hDevData = psRGXData;
+ psDevConfig->hSysData = (IMG_HANDLE) mfg;
+ ghSysData = psDevConfig->hSysData;
+
+ psDevConfig->pfnSysDevFeatureDepInit = NULL;
+
+ psDevConfig->ui32IRQ = mfg->rgx_irq;
+
+ psDevConfig->sRegsCpuPBase.uiAddr = mfg->rgx_start;
+ psDevConfig->ui32RegsSize = mfg->rgx_size;
+
+#ifdef SUPPORT_LINUX_DVFS
+ psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = MTK_DVFS_SWITCH_INTERVAL;
+#if defined(CONFIG_DEVFREQ_THERMAL)
+ psDevConfig->sDVFS.sDVFSDeviceCfg.psPowerOps = &sPowerOps;
+#endif
+
+ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90;
+ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10;
+#endif
+
+ /* power management on HW system */
+ psDevConfig->pfnPrePowerState = MTKSysDevPrePowerState;
+ psDevConfig->pfnPostPowerState = MTKSysDevPostPowerState;
+
+ /* clock frequency */
+ psDevConfig->pfnClockFreqGet = NULL;
+
+ /* device error notify callback function */
+ psDevConfig->pfnSysDevErrorNotify = NULL;
+
+ *ppsDevConfigOut = psDevConfig;
+
+ return PVRSRV_OK;
+}
+
+static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ /*
+ * The device config, RGX data and RGX timing info are part of the same
+ * allocation so do only one free.
+ */
+ OSFreeMem(psDevConfig);
+}
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ struct device *dev = pvOSDevice;
+ struct mtk_mfg *mfg;
+ PVRSRV_ERROR eError;
+
+ mfg = mtk_mfg_create(dev);
+ if (IS_ERR(mfg)) {
+ if (PTR_ERR(mfg) == -EPROBE_DEFER)
+ return PVRSRV_ERROR_PROBE_DEFER;
+ else
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+ dma_set_mask(dev, DMA_BIT_MASK(33));
+
+ eError = DeviceConfigCreate(pvOSDevice, mfg, &psDevConfig);
+ if (eError != PVRSRV_OK)
+ {
+ mtk_mfg_destroy(mfg);
+
+ return eError;
+ }
+
+ *ppsDevConfig = psDevConfig;
+
+#if defined(SUPPORT_ION)
+ IonInit(NULL);
+#endif
+
+ return PVRSRV_OK;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ struct mtk_mfg *mfg = psDevConfig->hSysData;
+
+#if defined(SUPPORT_ION)
+ IonDeinit();
+#endif
+
+ DeviceConfigDestroy(psDevConfig);
+
+ mtk_mfg_destroy(mfg);
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData)
+{
+ LISR_WRAPPER_DATA *psWrapperData;
+
+ PVR_UNREFERENCED_PARAMETER(hSysData);
+
+ psWrapperData = OSAllocMem(sizeof(*psWrapperData));
+ if (!psWrapperData)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psWrapperData->ui32IRQ = ui32IRQ;
+ psWrapperData->pfnLISR = pfnLISR;
+ psWrapperData->pvLISRData = pvData;
+
+ if (request_irq(ui32IRQ, MTKLISRWrapper, IRQF_TRIGGER_LOW, pszName,
+ psWrapperData))
+ {
+ OSFreeMem(psWrapperData);
+
+ return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER;
+ }
+
+ *phLISRData = (IMG_HANDLE) psWrapperData;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+ LISR_WRAPPER_DATA *psWrapperData = hLISRData;
+
+ free_irq(psWrapperData->ui32IRQ, psWrapperData);
+
+ OSFreeMem(psWrapperData);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+
+
+/*!< System specific poll/timeout details */
+#define MAX_HW_TIME_US (1000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000)
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT (20000)
+
+#define SYS_RGX_OF_COMPATIBLE "mediatek,mt8173-gpu"
+
+#endif /* !defined(__SYSINFO_H__) */
--- /dev/null
+########################################################################### ###
+#@File
+#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License Dual MIT/GPLv2
+#
+# The contents of this file are subject to the MIT license as set out below.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+#
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+#
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+#
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+PVRSRVKM_NAME = $(PVRSRV_MODNAME)
+
+$(PVRSRVKM_NAME)-y += \
+ services/system/rogue/$(PVR_SYSTEM)/sysconfig.o \
+ services/system/common/sysconfig_cmn.o \
+ services/server/common/vmm_pvz_client.o \
+ services/server/common/vmm_pvz_server.o \
+ services/server/common/vz_vmm_pvz.o \
+ services/server/common/vz_vmm_vm.o \
+ services/system/rogue/common/vmm_type_$(VMM_TYPE).o
+
+ccflags-y += \
+ -I$(TOP)/services/system/rogue/common/env/linux -I$(TOP)/services/system/rogue/common/env/linux \
+ -I$(TOP)/kernel/drivers/staging/imgtec \
+ -I$(TOP)/kernel/drivers/staging/imgtec/tc \
+ -I$(TOP)/include/rogue/system/rgx_tc -I$(TOP)/include/system/rgx_tc
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#include "sysinfo.h"
+#include "apollo_regs.h"
+
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+#include "syscommon.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+
+#if defined(SUPPORT_ION)
+#include PVR_ANDROID_ION_HEADER
+#include "ion_support.h"
+#include "ion_sys.h"
+#endif
+
+#include "tc_drv.h"
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#define ODIN_MEMORY_HYBRID_DEVICE_BASE 0x400000000
+
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10)
+
+#define UI64_TOPWORD_IS_ZERO(ui64) ((ui64 >> 32) == 0)
+
+#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS)
+
+/* Fake DVFS configuration used purely for testing purposes */
+
+static const IMG_OPP asOPPTable[] =
+{
+ { 8, 25000000},
+ { 16, 50000000},
+ { 32, 75000000},
+ { 64, 100000000},
+};
+
+#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP))
+
+static void SetFrequency(IMG_HANDLE hSysData, IMG_UINT32 ui32Frequency)
+{
+ PVR_UNREFERENCED_PARAMETER(hSysData);
+
+ PVR_DPF((PVR_DBG_ERROR, "SetFrequency %u", ui32Frequency));
+}
+
+static void SetVoltage(IMG_HANDLE hSysData, IMG_UINT32 ui32Voltage)
+{
+ PVR_UNREFERENCED_PARAMETER(hSysData);
+
+ PVR_DPF((PVR_DBG_ERROR, "SetVoltage %u", ui32Voltage));
+}
+
+#endif
+
+static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs =
+{
+ .pfnCpuPAddrToDevPAddr = TCLocalCpuPAddrToDevPAddr,
+ .pfnDevPAddrToCpuPAddr = TCLocalDevPAddrToCpuPAddr,
+};
+
+static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+static PHYS_HEAP_FUNCTIONS gsHostPhysHeapFuncs =
+{
+ .pfnCpuPAddrToDevPAddr = TCHostCpuPAddrToDevPAddr,
+ .pfnDevPAddrToCpuPAddr = TCHostDevPAddrToCpuPAddr,
+};
+
+static void TCHybridCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void TCHybridDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+static PHYS_HEAP_FUNCTIONS gsHybridPhysHeapFuncs =
+{
+ .pfnCpuPAddrToDevPAddr = TCHybridCpuPAddrToDevPAddr,
+ .pfnDevPAddrToCpuPAddr = TCHybridDevPAddrToCpuPAddr
+};
+
+typedef struct _SYS_DATA_ SYS_DATA;
+
+struct _SYS_DATA_
+{
+ struct platform_device *pdev;
+
+ struct tc_rogue_platform_data *pdata;
+
+ struct resource *registers;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ struct ion_client *ion_client;
+ struct ion_handle *ion_rogue_allocation;
+#endif
+
+#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__)
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+
+ PHYS_HEAP_ITERATOR *psHeapIter;
+ void *pvS3Buffer;
+#endif /* defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) */
+};
+
+#define SYSTEM_INFO_FORMAT_STRING "FPGA Revision: %s - TCF Core Revision: %s - TCF Core Target Build ID: %s - PCI Version: %s - Macro Version: %s"
+#define FPGA_REV_MAX_LEN 8 /* current longest format: "x.y.z" */
+#define TCF_CORE_REV_MAX_LEN 8 /* current longest format: "x.y.z" */
+#define TCF_CORE_CFG_MAX_LEN 4 /* current longest format: "x" */
+#define PCI_VERSION_MAX_LEN 4 /* current longest format: "x" */
+#define MACRO_VERSION_MAX_LEN 8 /* current longest format: "x.yz" */
+
+static IMG_CHAR *GetDeviceVersionString(SYS_DATA *psSysData)
+{
+ int err;
+ char str_fpga_rev[FPGA_REV_MAX_LEN]={0};
+ char str_tcf_core_rev[TCF_CORE_REV_MAX_LEN]={0};
+ char str_tcf_core_target_build_id[TCF_CORE_CFG_MAX_LEN]={0};
+ char str_pci_ver[PCI_VERSION_MAX_LEN]={0};
+ char str_macro_ver[MACRO_VERSION_MAX_LEN]={0};
+
+ IMG_CHAR *pszVersion;
+ IMG_UINT32 ui32StringLength;
+
+ err = tc_sys_strings(psSysData->pdev->dev.parent,
+ str_fpga_rev, sizeof(str_fpga_rev),
+ str_tcf_core_rev, sizeof(str_tcf_core_rev),
+ str_tcf_core_target_build_id, sizeof(str_tcf_core_target_build_id),
+ str_pci_ver, sizeof(str_pci_ver),
+ str_macro_ver, sizeof(str_macro_ver));
+ if (err)
+ {
+ return NULL;
+ }
+
+ /* Calculate how much space we need to allocate for the string */
+ ui32StringLength = OSStringLength(SYSTEM_INFO_FORMAT_STRING);
+ ui32StringLength += OSStringLength(str_fpga_rev);
+ ui32StringLength += OSStringLength(str_tcf_core_rev);
+ ui32StringLength += OSStringLength(str_tcf_core_target_build_id);
+ ui32StringLength += OSStringLength(str_pci_ver);
+ ui32StringLength += OSStringLength(str_macro_ver);
+
+ /* Create the version string */
+ pszVersion = OSAllocMem(ui32StringLength * sizeof(IMG_CHAR));
+ if (pszVersion)
+ {
+ OSSNPrintf(&pszVersion[0], ui32StringLength,
+ SYSTEM_INFO_FORMAT_STRING,
+ str_fpga_rev,
+ str_tcf_core_rev,
+ str_tcf_core_target_build_id,
+ str_pci_ver,
+ str_macro_ver);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to create format string", __func__));
+ }
+
+ return pszVersion;
+}
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+static SYS_DATA *gpsIonPrivateData;
+
+PVRSRV_ERROR IonInit(void *pvPrivateData)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYS_DATA *psSysData = pvPrivateData;
+ gpsIonPrivateData = psSysData;
+
+ psSysData->ion_client = ion_client_create(psSysData->pdata->ion_device, SYS_RGX_DEV_NAME);
+ if (IS_ERR(psSysData->ion_client))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create ION client (%ld)", __func__, PTR_ERR(psSysData->ion_client)));
+ eError = PVRSRV_ERROR_ION_NO_CLIENT;
+ goto err_out;
+ }
+ /* Allocate the whole rogue ion heap and pass that to services to manage */
+ psSysData->ion_rogue_allocation = ion_alloc(psSysData->ion_client, psSysData->pdata->rogue_heap_memory_size, 4096, (1 << psSysData->pdata->ion_heap_id), 0);
+ if (IS_ERR(psSysData->ion_rogue_allocation))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate ION rogue buffer (%ld)", __func__, PTR_ERR(psSysData->ion_rogue_allocation)));
+ eError = PVRSRV_ERROR_ION_FAILED_TO_ALLOC;
+ goto err_destroy_client;
+
+ }
+
+ return PVRSRV_OK;
+err_destroy_client:
+ ion_client_destroy(psSysData->ion_client);
+ psSysData->ion_client = NULL;
+err_out:
+ return eError;
+}
+
+void IonDeinit(void)
+{
+ SYS_DATA *psSysData = gpsIonPrivateData;
+ ion_free(psSysData->ion_client, psSysData->ion_rogue_allocation);
+ psSysData->ion_rogue_allocation = NULL;
+ ion_client_destroy(psSysData->ion_client);
+ psSysData->ion_client = NULL;
+}
+
+struct ion_device *IonDevAcquire(void)
+{
+ return gpsIonPrivateData->pdata->ion_device;
+}
+
+void IonDevRelease(struct ion_device *ion_device)
+{
+ PVR_ASSERT(ion_device == gpsIonPrivateData->pdata->ion_device);
+}
+#endif /* defined(SUPPORT_ION) */
+
+static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++)
+ {
+ psDevPAddr[ui32Idx].uiAddr =
+ psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base;
+ }
+}
+
+static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++)
+ {
+ psCpuPAddr[ui32Idx].uiAddr =
+ psDevPAddr[ui32Idx].uiAddr + psSysData->pdata->tc_memory_base;
+ }
+}
+
+static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 uiNumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ if (sizeof(*psDevPAddr) == sizeof(*psCpuPAddr))
+ {
+ OSCachedMemCopy(psDevPAddr, psCpuPAddr, uiNumOfAddr * sizeof(*psDevPAddr));
+ return;
+ }
+
+ /* In this case we may have a 32bit host, so we can't do a memcpy */
+ /* Optimise common case */
+ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+ if (uiNumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < uiNumOfAddr; ++ui32Idx)
+ {
+ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+ }
+ }
+}
+
+static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 uiNumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ if (sizeof(*psCpuPAddr) == sizeof(*psDevPAddr))
+ {
+ OSCachedMemCopy(psCpuPAddr, psDevPAddr, uiNumOfAddr * sizeof(*psCpuPAddr));
+ return;
+ }
+
+ /* In this case we may have a 32bit host, so we can't do a memcpy.
+ * Check we are not dropping any data from the 64bit dev addr */
+ PVR_ASSERT(UI64_TOPWORD_IS_ZERO(psDevPAddr[0].uiAddr));
+ /* Optimise common case */
+ psCpuPAddr[0].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[0].uiAddr);
+ if (uiNumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < uiNumOfAddr; ++ui32Idx)
+ {
+ PVR_ASSERT(UI64_TOPWORD_IS_ZERO(psDevPAddr[ui32Idx].uiAddr));
+ psCpuPAddr[ui32Idx].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[ui32Idx].uiAddr);
+ }
+ }
+
+}
+
+static void TCHybridCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++)
+ {
+ psDevPAddr[ui32Idx].uiAddr =
+ (psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base) +
+ ODIN_MEMORY_HYBRID_DEVICE_BASE;
+ }
+}
+
+static void TCHybridDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++)
+ {
+ psCpuPAddr[ui32Idx].uiAddr =
+ (psDevPAddr[ui32Idx].uiAddr - ODIN_MEMORY_HYBRID_DEVICE_BASE) +
+ psSysData->pdata->tc_memory_base;
+ }
+}
+
+static PVRSRV_ERROR
+InitLocalHeap(PHYS_HEAP_CONFIG *psPhysHeap,
+ IMG_UINT64 uiBaseAddr, IMG_UINT64 uiStartAddr,
+ IMG_UINT64 uiSize, PHYS_HEAP_FUNCTIONS *psFuncs,
+ PHYS_HEAP_USAGE_FLAGS ui32Flags)
+{
+ psPhysHeap->sCardBase.uiAddr = uiBaseAddr;
+ psPhysHeap->sStartAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(uiStartAddr);
+ psPhysHeap->uiSize = uiSize;
+
+ psPhysHeap->eType = PHYS_HEAP_TYPE_LMA;
+ psPhysHeap->pszPDumpMemspaceName = "LMA";
+ psPhysHeap->psMemFuncs = psFuncs;
+ psPhysHeap->ui32UsageFlags = ui32Flags;
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+InitLocalHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UINT32 *pui32PhysHeapCount)
+{
+ struct tc_rogue_platform_data *pdata = psSysData->pdata;
+ PHYS_HEAP_FUNCTIONS *psHeapFuncs;
+ IMG_UINT64 uiCardBase;
+ IMG_UINT64 uiCpuBase = pdata->rogue_heap_memory_base;
+ IMG_UINT64 uiOrigHeapSize = pdata->rogue_heap_memory_size;
+ IMG_UINT64 uiHeapSize = 0;
+ PVRSRV_ERROR eError;
+ IMG_UINT64 uiFwCarveoutSize;
+
+ if (psSysData->pdata->baseboard == TC_BASEBOARD_ODIN &&
+ psSysData->pdata->mem_mode == TC_MEMORY_HYBRID)
+ {
+ psHeapFuncs = &gsHybridPhysHeapFuncs;
+ uiCardBase = ODIN_MEMORY_HYBRID_DEVICE_BASE;
+ }
+ else if (pdata->mem_mode == TC_MEMORY_HYBRID)
+ {
+ psHeapFuncs = &gsHostPhysHeapFuncs;
+ uiCardBase = pdata->tc_memory_base;
+ }
+ else
+ {
+ psHeapFuncs = &gsLocalPhysHeapFuncs;
+ uiCardBase = 0;
+ }
+
+#if defined(SUPPORT_AUTOVZ)
+ /* Carveout out enough LMA memory to hold the heaps of
+ * all supported OSIDs and the FW page tables */
+ uiFwCarveoutSize = (RGX_NUM_DRIVERS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE) +
+ RGX_FIRMWARE_MAX_PAGETABLE_SIZE;
+#elif defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+ /* Carveout out enough LMA memory to hold the heaps of all supported OSIDs */
+ uiFwCarveoutSize = (RGX_NUM_DRIVERS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE);
+#else
+ /* Create a memory carveout just for the Host's Firmware heap.
+ * Guests will allocate their own physical memory. */
+ uiFwCarveoutSize = RGX_FIRMWARE_RAW_HEAP_SIZE;
+#endif
+
+ uiHeapSize = SysRestrictGpuLocalPhysheap(uiOrigHeapSize);
+
+ if (uiOrigHeapSize != uiHeapSize)
+ {
+ IMG_UINT64 uiPrivHeapSize = uiOrigHeapSize - (uiHeapSize + uiFwCarveoutSize);
+ eError = InitLocalHeap(&pasPhysHeaps[(*pui32PhysHeapCount)++],
+ uiCardBase,
+ 0,
+ uiPrivHeapSize,
+ psHeapFuncs,
+ PHYS_HEAP_USAGE_GPU_PRIVATE);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ uiCardBase += uiPrivHeapSize;
+ uiCpuBase += uiPrivHeapSize;
+ }
+ else
+ {
+ uiHeapSize -= uiFwCarveoutSize;
+ }
+
+ eError = InitLocalHeap(&pasPhysHeaps[(*pui32PhysHeapCount)++],
+ uiCardBase,
+ uiCpuBase,
+ uiHeapSize,
+ psHeapFuncs,
+ PHYS_HEAP_USAGE_GPU_LOCAL);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ uiCardBase += uiHeapSize;
+ uiCpuBase += uiHeapSize;
+
+ /* allocate the Host Driver's Firmware Heap from the reserved carveout */
+ eError = InitLocalHeap(&pasPhysHeaps[(*pui32PhysHeapCount)++],
+ uiCardBase,
+ uiCpuBase,
+ uiFwCarveoutSize,
+ psHeapFuncs,
+ PHYS_HEAP_USAGE_FW_SHARED);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if TC_DISPLAY_MEM_SIZE != 0
+ uiCardBase += uiFwCarveoutSize;
+ eError = InitLocalHeap(&pasPhysHeaps[(*pui32PhysHeapCount)++],
+ uiCardBase,
+ pdata->pdp_heap_memory_base,
+ pdata->pdp_heap_memory_size,
+ psHeapFuncs,
+ PHYS_HEAP_USAGE_EXTERNAL | PHYS_HEAP_USAGE_DISPLAY);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+InitHostHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UINT32 *pui32PhysHeapCount)
+{
+ if (psSysData->pdata->mem_mode != TC_MEMORY_LOCAL)
+ {
+ pasPhysHeaps[*pui32PhysHeapCount].eType = PHYS_HEAP_TYPE_UMA;
+ pasPhysHeaps[*pui32PhysHeapCount].pszPDumpMemspaceName = "SYSMEM";
+ pasPhysHeaps[*pui32PhysHeapCount].psMemFuncs = &gsHostPhysHeapFuncs;
+ pasPhysHeaps[*pui32PhysHeapCount].ui32UsageFlags = PHYS_HEAP_USAGE_CPU_LOCAL;
+
+ (*pui32PhysHeapCount)++;
+
+ PVR_DPF((PVR_DBG_WARNING,
+ "Initialising CPU_LOCAL UMA Host PhysHeaps with memory mode: %d",
+ psSysData->pdata->mem_mode));
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PhysHeapsInit(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps,
+ void *pvPrivData, IMG_UINT32 *pui32PhysHeapCount)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+
+ eError = InitLocalHeaps(psSysData, pasPhysHeaps, pui32PhysHeapCount);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ eError = InitHostHeaps(psSysData, pasPhysHeaps, pui32PhysHeapCount);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ /* Initialise fields that don't change between memory modes.
+ * Fix up heap IDs. This is needed for multi-testchip systems to
+ * ensure the heap IDs are unique as this is what Services expects.
+ */
+ for (i = 0; i < *pui32PhysHeapCount; i++)
+ {
+ pasPhysHeaps[i].hPrivData = pvPrivData;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PhysHeapsCreate(const SYS_DATA *psSysData, PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PHYS_HEAP_CONFIG **ppasPhysHeapsOut,
+ IMG_UINT32 *puiPhysHeapCountOut)
+{
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ IMG_UINT32 ui32NumPhysHeaps;
+ IMG_UINT32 ui32PhysHeapCount = 0;
+ PVRSRV_ERROR eError;
+
+ switch (psSysData->pdata->mem_mode)
+ {
+ case TC_MEMORY_LOCAL: ui32NumPhysHeaps = 1U; break;
+ case TC_MEMORY_HYBRID: ui32NumPhysHeaps = 2U; break;
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: unsupported memory mode %d", __func__, psSysData->pdata->mem_mode));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+ }
+
+#if TC_DISPLAY_MEM_SIZE != 0
+ if (psSysData->pdata->mem_mode == TC_MEMORY_LOCAL ||
+ psSysData->pdata->mem_mode == TC_MEMORY_HYBRID)
+ {
+ ui32NumPhysHeaps += 1U;
+ }
+#endif
+
+ /* dedicated LMA heap for Firmware */
+ ui32NumPhysHeaps += 1U;
+
+ if (SysRestrictGpuLocalAddPrivateHeap())
+ {
+ ui32NumPhysHeaps++;
+ psDevConfig->bHasNonMappableLocalMemory = IMG_TRUE;
+ }
+ else
+ {
+ psDevConfig->bHasNonMappableLocalMemory = IMG_FALSE;
+ }
+
+ pasPhysHeaps = OSAllocMem(sizeof(*pasPhysHeaps) * ui32NumPhysHeaps);
+ if (!pasPhysHeaps)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ eError = PhysHeapsInit(psSysData, pasPhysHeaps, psDevConfig, &ui32PhysHeapCount);
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMem(pasPhysHeaps);
+ return eError;
+ }
+
+ PVR_ASSERT(ui32PhysHeapCount == ui32NumPhysHeaps);
+
+ *ppasPhysHeapsOut = pasPhysHeaps;
+ *puiPhysHeapCountOut = ui32PhysHeapCount;
+
+ return PVRSRV_OK;
+}
+
+static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ if (psDevConfig->pszVersion)
+ {
+ OSFreeMem(psDevConfig->pszVersion);
+ }
+
+ OSFreeMem(psDevConfig->pasPhysHeaps);
+
+ OSFreeMem(psDevConfig);
+}
+
+static void odinTCDevPhysAddr2DmaAddr(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_DMA_ADDR *psDmaAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_BOOL *pbValid,
+ IMG_UINT32 ui32NumAddr,
+ IMG_BOOL bSparseAlloc)
+{
+ IMG_CPU_PHYADDR sCpuPAddr = {0};
+ IMG_UINT32 ui32Idx;
+
+ /* Fast path */
+ if (!bSparseAlloc)
+ {
+ /* In Odin, DMA address space is the same as host CPU */
+ TCLocalDevPAddrToCpuPAddr(psDevConfig,
+ 1,
+ &sCpuPAddr,
+ psDevPAddr);
+ psDmaAddr->uiAddr = sCpuPAddr.uiAddr;
+ }
+ else
+ {
+ for (ui32Idx = 0; ui32Idx < ui32NumAddr; ui32Idx++)
+ {
+ if (pbValid[ui32Idx])
+ {
+ TCLocalDevPAddrToCpuPAddr(psDevConfig,
+ 1,
+ &sCpuPAddr,
+ &psDevPAddr[ui32Idx]);
+ psDmaAddr[ui32Idx].uiAddr = sCpuPAddr.uiAddr;
+ }
+ else
+ {
+ /* Invalid DMA address marker */
+ psDmaAddr[ui32Idx].uiAddr = ~((IMG_UINT64)0x0);
+ }
+ }
+ }
+}
+
+static void * odinTCgetCDMAChan(PVRSRV_DEVICE_CONFIG *psDevConfig, char *name)
+{
+ struct device* psDev = (struct device*) psDevConfig->pvOSDevice;
+ return tc_dma_chan(psDev->parent, name);
+}
+
+static void odinTCFreeCDMAChan(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ void* channel)
+{
+
+ struct device* psDev = (struct device*) psDevConfig->pvOSDevice;
+ struct dma_chan *chan = (struct dma_chan*) channel;
+
+ tc_dma_chan_free(psDev->parent, chan);
+}
+
+static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData,
+ PVRSRV_DEVICE_CONFIG **ppsDevConfigOut)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ RGX_DATA *psRGXData;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo;
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ IMG_UINT32 uiPhysHeapCount;
+ PVRSRV_ERROR eError;
+
+ psDevConfig = OSAllocZMem(sizeof(*psDevConfig) +
+ sizeof(*psRGXData) +
+ sizeof(*psRGXTimingInfo));
+ if (!psDevConfig)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psRGXData = (RGX_DATA *) IMG_OFFSET_ADDR(psDevConfig, sizeof(*psDevConfig));
+ psRGXTimingInfo = (RGX_TIMING_INFORMATION *) IMG_OFFSET_ADDR(psRGXData, sizeof(*psRGXData));
+
+ eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorFreeDevConfig;
+ }
+
+ /* Setup RGX specific timing data */
+#if defined(TC_APOLLO_BONNIE)
+ /* For BonnieTC there seems to be an additional 5x multiplier that occurs to the clock as measured speed is 540Mhz not 108Mhz. */
+ psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) * 6 * 5;
+#elif defined(TC_APOLLO_ES2)
+ psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) * 6;
+#else
+ psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) /
+ tc_core_clock_multiplex(&psSysData->pdev->dev);
+#endif
+ psRGXTimingInfo->bEnableActivePM = IMG_FALSE;
+ psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE;
+ psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+ /* Set up the RGX data */
+ psRGXData->psRGXTimingInfo = psRGXTimingInfo;
+
+ /* Setup the device config */
+ psDevConfig->pvOSDevice = &psSysData->pdev->dev;
+ psDevConfig->pszName = "tc";
+ psDevConfig->pszVersion = GetDeviceVersionString(psSysData);
+
+ psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start;
+ psDevConfig->ui32RegsSize = resource_size(psSysData->registers);
+
+ if (psSysData->pdata->baseboard == TC_BASEBOARD_ODIN &&
+ psSysData->pdata->mem_mode == TC_MEMORY_HYBRID)
+ {
+ psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_CPU_LOCAL;
+ }
+ else
+ {
+ psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL;
+ }
+
+ psDevConfig->ui32IRQ = TC_INTERRUPT_EXT;
+
+ psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE;
+
+ psDevConfig->pasPhysHeaps = pasPhysHeaps;
+ psDevConfig->ui32PhysHeapCount = uiPhysHeapCount;
+
+ /* Only required for LMA but having this always set shouldn't be a problem */
+ psDevConfig->bDevicePA0IsValid = IMG_TRUE;
+
+ psDevConfig->hDevData = psRGXData;
+ psDevConfig->hSysData = psSysData;
+
+#if defined(SUPPORT_ALT_REGBASE)
+ if (psSysData->pdata->mem_mode != TC_MEMORY_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: alternative GPU register base is "
+ "supported only in LMA mode", __func__));
+ goto ErrorFreeDevConfig;
+ }
+
+ /* Using display memory base as the alternative GPU register base,
+ * since the display memory range is not used by the firmware. */
+ TCLocalCpuPAddrToDevPAddr(psDevConfig, 1,
+ &psDevConfig->sAltRegsGpuPBase,
+ &pasPhysHeaps[PHY_HEAP_CARD_EXT].sStartAddr);
+#endif
+
+#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS)
+ /* Fake DVFS configuration used purely for testing purposes */
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage;
+#endif
+#if defined(SUPPORT_LINUX_DVFS)
+ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = 1000;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE;
+ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90;
+ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10;
+#endif
+
+ /* DMA channel config */
+ psDevConfig->pfnSlaveDMAGetChan = odinTCgetCDMAChan;
+ psDevConfig->pfnSlaveDMAFreeChan = odinTCFreeCDMAChan;
+ psDevConfig->pfnDevPhysAddr2DmaAddr = odinTCDevPhysAddr2DmaAddr;
+ psDevConfig->pszDmaTxChanName = psSysData->pdata->tc_dma_tx_chan_name;
+ psDevConfig->pszDmaRxChanName = psSysData->pdata->tc_dma_rx_chan_name;
+ psDevConfig->bHasDma = IMG_TRUE;
+ /* Following two values are expressed in number of bytes */
+ psDevConfig->ui32DmaTransferUnit = 1;
+ psDevConfig->ui32DmaAlignment = 1;
+
+ *ppsDevConfigOut = psDevConfig;
+
+ return PVRSRV_OK;
+
+ErrorFreeDevConfig:
+ OSFreeMem(psDevConfig);
+ return eError;
+}
+
+#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__)
+/* #define _DBG(...) PVR_LOG((__VA_ARGS__)) */
+#define _DBG(...)
+
+static PVRSRV_ERROR PrePower(IMG_HANDLE hSysData,
+ PVRSRV_SYS_POWER_STATE eNewPowerState,
+ PVRSRV_SYS_POWER_STATE eCurrentPowerState,
+ PVRSRV_POWER_FLAGS ePwrFlags)
+{
+ SYS_DATA *psSysData = (SYS_DATA *) hSysData;
+ IMG_DEV_PHYADDR sDevPAddr = {0};
+ IMG_UINT64 uiHeapTotalSize, uiHeapUsedSize, uiHeapFreeSize;
+ IMG_UINT64 uiSize = 0, uiOffset = 0;
+ PVRSRV_ERROR eError;
+
+ _DBG("(%s()) state: current=%s, new=%s; flags: 0x%08x", __func__,
+ PVRSRVSysPowerStateToString(eCurrentPowerState),
+ PVRSRVSysPowerStateToString(eNewPowerState), ePwrFlags);
+
+ /* The transition might be both from ON or OFF states to OFF state so check
+ * only for the *new* state. Also this is only valid for suspend requests. */
+ if (eNewPowerState != PVRSRV_SYS_POWER_STATE_OFF ||
+ !BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_SUSPEND_REQ))
+ {
+ return PVRSRV_OK;
+ }
+
+ eError = LMA_HeapIteratorCreate(psSysData->psDevConfig->psDevNode,
+ PVRSRV_PHYS_HEAP_GPU_LOCAL,
+ &psSysData->psHeapIter);
+ PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorCreate", return_error);
+
+ eError = LMA_HeapIteratorGetHeapStats(psSysData->psHeapIter, &uiHeapTotalSize,
+ &uiHeapUsedSize);
+ PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorGetHeapStats",
+ return_error);
+ uiHeapFreeSize = uiHeapTotalSize - uiHeapUsedSize;
+
+ _DBG("(%s()) heap stats: total=0x%" IMG_UINT64_FMTSPECx ", "
+ "used=0x%" IMG_UINT64_FMTSPECx ", free=0x%" IMG_UINT64_FMTSPECx,
+ __func__, uiHeapTotalSize, uiHeapUsedSize, uiHeapFreeSize);
+
+ psSysData->pvS3Buffer = OSAllocMem(uiHeapUsedSize);
+ PVR_LOG_GOTO_IF_NOMEM(psSysData->pvS3Buffer, eError, destroy_iterator);
+
+ while (LMA_HeapIteratorNext(psSysData->psHeapIter, &sDevPAddr, &uiSize))
+ {
+ void *pvCpuVAddr;
+ IMG_CPU_PHYADDR sCpuPAddr = {0};
+
+ if (uiOffset + uiSize > uiHeapUsedSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "uiOffset = %" IMG_UINT64_FMTSPECx ", "
+ "uiSize = %" IMG_UINT64_FMTSPECx, uiOffset, uiSize));
+
+ PVR_LOG_GOTO_WITH_ERROR("LMA_HeapIteratorNext", eError,
+ PVRSRV_ERROR_INVALID_OFFSET,
+ free_buffer);
+ }
+
+ TCLocalDevPAddrToCpuPAddr(psSysData->psDevConfig, 1, &sCpuPAddr,
+ &sDevPAddr);
+
+ pvCpuVAddr = OSMapPhysToLin(sCpuPAddr, uiSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC);
+
+ _DBG("(%s()) iterator: dev_paddr=%px, cpu_paddr=%px, cpu_vaddr=%px, "
+ "size=0x%05" IMG_UINT64_FMTSPECx, __func__,
+ (void *) sDevPAddr.uiAddr, (void *) sCpuPAddr.uiAddr,
+ pvCpuVAddr, uiSize);
+
+ /* copy memory */
+ memcpy((IMG_BYTE *) psSysData->pvS3Buffer + uiOffset, pvCpuVAddr,
+ uiSize);
+ /* and now poison it */
+ memset(pvCpuVAddr, 0x9b, uiSize);
+
+ uiOffset += uiSize;
+
+ OSUnMapPhysToLin(pvCpuVAddr, uiSize);
+ }
+
+ return PVRSRV_OK;
+
+free_buffer:
+ OSFreeMem(psSysData->pvS3Buffer);
+ psSysData->pvS3Buffer = NULL;
+destroy_iterator:
+ LMA_HeapIteratorDestroy(psSysData->psHeapIter);
+ psSysData->psHeapIter = NULL;
+return_error:
+ return eError;
+}
+
+static PVRSRV_ERROR PostPower(IMG_HANDLE hSysData,
+ PVRSRV_SYS_POWER_STATE eNewPowerState,
+ PVRSRV_SYS_POWER_STATE eCurrentPowerState,
+ PVRSRV_POWER_FLAGS ePwrFlags)
+{
+ SYS_DATA *psSysData = (SYS_DATA *) hSysData;
+ IMG_DEV_PHYADDR sDevPAddr = {0};
+ IMG_UINT64 uiSize = 0, uiOffset = 0;
+ PVRSRV_ERROR eError;
+
+ _DBG("(%s()) state: current=%s, new=%s; flags=0x%08x; buffer=%px", __func__,
+ PVRSRVSysPowerStateToString(eCurrentPowerState),
+ PVRSRVSysPowerStateToString(eNewPowerState), ePwrFlags,
+ psSysData->pvS3Buffer);
+
+ /* The transition might be both to ON or OFF states from OFF state so check
+ * only for the *current* state. Also this is only valid for resume requests. */
+ if (eCurrentPowerState != PVRSRV_SYS_POWER_STATE_OFF ||
+ !BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_RESUME_REQ) ||
+ psSysData->pvS3Buffer == NULL)
+ {
+ return PVRSRV_OK;
+ }
+
+ eError = LMA_HeapIteratorReset(psSysData->psHeapIter);
+ PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorReset", free_buffer);
+
+ while (LMA_HeapIteratorNext(psSysData->psHeapIter, &sDevPAddr, &uiSize))
+ {
+ void *pvCpuVAddr;
+ IMG_CPU_PHYADDR sCpuPAddr = {0};
+
+ TCLocalDevPAddrToCpuPAddr(psSysData->psDevConfig, 1, &sCpuPAddr,
+ &sDevPAddr);
+
+ pvCpuVAddr = OSMapPhysToLin(sCpuPAddr, uiSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC);
+
+ _DBG("(%s()) iterator: dev_paddr=%px, cpu_paddr=%px, cpu_vaddr=%px, "
+ "size=0x%05" IMG_UINT64_FMTSPECx, __func__,
+ (void *) sDevPAddr.uiAddr, (void *) sCpuPAddr.uiAddr,
+ pvCpuVAddr, uiSize);
+
+ /* copy memory */
+ memcpy(pvCpuVAddr, (IMG_BYTE *) psSysData->pvS3Buffer + uiOffset,
+ uiSize);
+
+ uiOffset += uiSize;
+
+ OSUnMapPhysToLin(pvCpuVAddr, uiSize);
+ }
+
+ LMA_HeapIteratorDestroy(psSysData->psHeapIter);
+ psSysData->psHeapIter = NULL;
+
+ OSFreeMem(psSysData->pvS3Buffer);
+ psSysData->pvS3Buffer = NULL;
+
+ return PVRSRV_OK;
+
+free_buffer:
+ OSFreeMem(psSysData->pvS3Buffer);
+ psSysData->pvS3Buffer = NULL;
+
+ return eError;
+}
+#endif /* defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) */
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ SYS_DATA *psSysData;
+ resource_size_t uiRegistersSize;
+ PVRSRV_ERROR eError;
+ int err = 0;
+
+ PVR_ASSERT(pvOSDevice);
+
+ psSysData = OSAllocZMem(sizeof(*psSysData));
+ if (psSysData == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psSysData->pdev = to_platform_device((struct device *)pvOSDevice);
+ psSysData->pdata = psSysData->pdev->dev.platform_data;
+
+ /*
+ * The device cannot address system memory, so there is no DMA
+ * limitation.
+ */
+ if (psSysData->pdata->mem_mode == TC_MEMORY_LOCAL)
+ {
+ dma_set_mask(pvOSDevice, DMA_BIT_MASK(64));
+ }
+ else
+ {
+ dma_set_mask(pvOSDevice, DMA_BIT_MASK(32));
+ }
+
+ err = tc_enable(psSysData->pdev->dev.parent);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to enable PCI device (%d)", __func__, err));
+ eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+ goto ErrFreeSysData;
+ }
+
+ psSysData->registers = platform_get_resource_byname(psSysData->pdev,
+ IORESOURCE_MEM,
+ "rogue-regs");
+ if (!psSysData->registers)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get Rogue register information",
+ __func__));
+ eError = PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+ goto ErrorDevDisable;
+ }
+
+ /* Check the address range is large enough. */
+ uiRegistersSize = resource_size(psSysData->registers);
+ if (uiRegistersSize < SYS_RGX_REG_REGION_SIZE)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Rogue register region isn't big enough (was %pa, required 0x%08x)",
+ __func__, &uiRegistersSize, SYS_RGX_REG_REGION_SIZE));
+
+ eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL;
+ goto ErrorDevDisable;
+ }
+
+ /* Reserve the address range */
+ if (!request_mem_region(psSysData->registers->start,
+ resource_size(psSysData->registers),
+ SYS_RGX_DEV_NAME))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Rogue register memory region not available",
+ __func__));
+ eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+
+ goto ErrorDevDisable;
+ }
+
+ eError = DeviceConfigCreate(psSysData, &psDevConfig);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorReleaseMemRegion;
+ }
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ eError = IonInit(psSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise ION", __func__));
+ goto ErrorDeviceConfigDestroy;
+ }
+#endif
+
+ /* Set psDevConfig->pfnSysDevErrorNotify callback */
+ psDevConfig->pfnSysDevErrorNotify = SysRGXErrorNotify;
+
+#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__)
+ /* power functions */
+ psDevConfig->pfnPrePowerState = PrePower;
+ psDevConfig->pfnPostPowerState = PostPower;
+
+ psSysData->psDevConfig = psDevConfig;
+#endif /* defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) */
+
+ *ppsDevConfig = psDevConfig;
+
+ return PVRSRV_OK;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ErrorDeviceConfigDestroy:
+ DeviceConfigDestroy(psDevConfig);
+#endif
+ErrorReleaseMemRegion:
+ release_mem_region(psSysData->registers->start,
+ resource_size(psSysData->registers));
+ErrorDevDisable:
+ tc_disable(psSysData->pdev->dev.parent);
+ErrFreeSysData:
+ OSFreeMem(psSysData);
+ return eError;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ SYS_DATA *psSysData = (SYS_DATA *)psDevConfig->hSysData;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ IonDeinit();
+#endif
+
+ DeviceConfigDestroy(psDevConfig);
+
+ release_mem_region(psSysData->registers->start,
+ resource_size(psSysData->registers));
+ tc_disable(psSysData->pdev->dev.parent);
+
+ OSFreeMem(psSysData);
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+#if defined(TC_APOLLO_TCF5)
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+ return PVRSRV_OK;
+#else
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ u32 tmp = 0;
+ u32 pll;
+
+ PVR_DUMPDEBUG_LOG("------[ rgx_tc system debug ]------");
+
+ if (tc_sys_info(psSysData->pdev->dev.parent, &tmp, &pll))
+ goto err_out;
+
+ if (tmp > 0)
+ PVR_DUMPDEBUG_LOG("Chip temperature: %d degrees C", tmp);
+ PVR_DUMPDEBUG_LOG("PLL status: %x", pll);
+
+err_out:
+ return eError;
+#endif
+}
+
+typedef struct
+{
+ struct device *psDev;
+ int iInterruptID;
+ void *pvData;
+ PFN_LISR pfnLISR;
+} LISR_DATA;
+
+static void TCInterruptHandler(void* pvData)
+{
+ LISR_DATA *psLISRData = pvData;
+ psLISRData->pfnLISR(psLISRData->pvData);
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData)
+{
+ SYS_DATA *psSysData = (SYS_DATA *)hSysData;
+ LISR_DATA *psLISRData;
+ PVRSRV_ERROR eError;
+ int err;
+
+ if (ui32IRQ != TC_INTERRUPT_EXT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: No device matching IRQ %d", __func__, ui32IRQ));
+ return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ }
+
+ psLISRData = OSAllocZMem(sizeof(*psLISRData));
+ if (!psLISRData)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_out;
+ }
+
+ psLISRData->pfnLISR = pfnLISR;
+ psLISRData->pvData = pvData;
+ psLISRData->iInterruptID = ui32IRQ;
+ psLISRData->psDev = psSysData->pdev->dev.parent;
+
+ err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, TCInterruptHandler, psLISRData);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err));
+ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ goto err_free_data;
+ }
+
+ err = tc_enable_interrupt(psLISRData->psDev, psLISRData->iInterruptID);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: tc_enable_interrupt() failed (%d)", __func__, err));
+ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ goto err_unset_interrupt_handler;
+ }
+
+ *phLISRData = psLISRData;
+ eError = PVRSRV_OK;
+
+ PVR_TRACE(("Installed device LISR " IMG_PFN_FMTSPEC " with tc module to ID %u",
+ pfnLISR, ui32IRQ));
+
+err_out:
+ return eError;
+err_unset_interrupt_handler:
+ tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL);
+err_free_data:
+ OSFreeMem(psLISRData);
+ goto err_out;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+ LISR_DATA *psLISRData = (LISR_DATA *) hLISRData;
+ int err;
+
+ err = tc_disable_interrupt(psLISRData->psDev, psLISRData->iInterruptID);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: tc_disable_interrupt() failed (%d)", __func__, err));
+ }
+
+ err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err));
+ }
+
+ PVR_TRACE(("Uninstalled device LISR " IMG_PFN_FMTSPEC " with tc module from ID %u",
+ psLISRData->pfnLISR, psLISRData->iInterruptID));
+
+ OSFreeMem(psLISRData);
+
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#if defined(VIRTUAL_PLATFORM) || defined(FPGA)
+#define MAX_HW_TIME_US (240000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (120000)
+#else
+#define MAX_HW_TIME_US (500000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000)
+#endif
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT (10000)
+
+#define SYS_RGX_DEV_NAME "tc_rogue"
+
+#endif /* !defined(__SYSINFO_H__) */
--- /dev/null
+########################################################################### ###
+#@File
+#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License Dual MIT/GPLv2
+#
+# The contents of this file are subject to the MIT license as set out below.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+#
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+#
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+#
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+PVRSRVKM_NAME = $(PVRSRV_MODNAME)
+
+$(PVRSRVKM_NAME)-y += services/system/rogue/$(PVR_SYSTEM)/sysconfig.o \
+ services/system/common/sysconfig_cmn.o \
+ services/server/common/vmm_pvz_client.o \
+ services/server/common/vmm_pvz_server.o \
+ services/server/common/vz_vmm_pvz.o \
+ services/server/common/vz_vmm_vm.o \
+ services/system/rogue/common/vmm_type_$(VMM_TYPE).o
+
+ifeq ($(SUPPORT_ION),1)
+$(PVRSRVKM_NAME)-y += services/system/common/env/linux/ion_support_generic.o
+endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_device.h"
+#include "syscommon.h"
+#include "vz_vmm_pvz.h"
+#include "allocmem.h"
+#include "sysinfo.h"
+#include "sysconfig.h"
+#include "physheap.h"
+#include "pvr_debug.h"
+#if defined(SUPPORT_ION)
+#include "ion_support.h"
+#endif
+#if defined(__linux__)
+#include <linux/dma-mapping.h>
+#endif
+#include "rgx_bvnc_defs_km.h"
+/*
+ * In systems that support trusted device address protection, there are three
+ * physical heaps from which pages should be allocated:
+ * - one heap for normal allocations
+ * - one heap for allocations holding META code memory
+ * - one heap for allocations holding secured DRM data
+ */
+
+#define PHYS_HEAP_IDX_GENERAL 0
+#define PHYS_HEAP_IDX_FW 1
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#define PHYS_HEAP_IDX_TDFWMEM 2
+#define PHYS_HEAP_IDX_TDSECUREBUF 3
+#elif defined(SUPPORT_DEDICATED_FW_MEMORY)
+#define PHYS_HEAP_IDX_FW_MEMORY 2
+#endif
+
+/* Change to test CPU_LOCAL sys layers*/
+#define UMA_HEAP_USAGE_FLAG PHYS_HEAP_USAGE_GPU_LOCAL
+//#define UMA_HEAP_USAGE_FLAG PHYS_HEAP_USAGE_CPU_LOCAL
+
+#define UMA_DEFAULT_HEAP PVRSRV_PHYS_HEAP_GPU_LOCAL
+//#define UMA_DEFAULT_HEAP PVRSRV_PHYS_HEAP_CPU_LOCAL
+
+/*
+ CPU to Device physical address translation
+*/
+static
+void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+ /* Optimise common case */
+ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+ }
+ }
+}
+
+/*
+ Device to CPU physical address translation
+*/
+static
+void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+ /* Optimise common case */
+ psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr;
+ }
+ }
+}
+
+static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs =
+{
+ /* pfnCpuPAddrToDevPAddr */
+ UMAPhysHeapCpuPAddrToDevPAddr,
+ /* pfnDevPAddrToCpuPAddr */
+ UMAPhysHeapDevPAddrToCpuPAddr,
+};
+
+static PVRSRV_ERROR PhysHeapsCreate(PHYS_HEAP_CONFIG **ppasPhysHeapsOut,
+ IMG_UINT32 *puiPhysHeapCountOut)
+{
+ /*
+ * This function is called during device initialisation, which on Linux,
+ * means it won't be called concurrently. As such, there's no need to
+ * protect it with a lock or use an atomic variable.
+ */
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ IMG_UINT32 uiHeapCount = 2;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ uiHeapCount += 2;
+#elif defined(SUPPORT_DEDICATED_FW_MEMORY)
+ uiHeapCount += 1;
+#endif
+
+ pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * uiHeapCount);
+ if (!pasPhysHeaps)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].pszPDumpMemspaceName = "SYSMEM";
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].eType = PHYS_HEAP_TYPE_UMA;
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].psMemFuncs = &gsPhysHeapFuncs;
+ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32UsageFlags = UMA_HEAP_USAGE_FLAG;
+
+ pasPhysHeaps[PHYS_HEAP_IDX_FW].pszPDumpMemspaceName = "SYSMEM_FW";
+ pasPhysHeaps[PHYS_HEAP_IDX_FW].eType = PHYS_HEAP_TYPE_UMA;
+ pasPhysHeaps[PHYS_HEAP_IDX_FW].psMemFuncs = &gsPhysHeapFuncs;
+ pasPhysHeaps[PHYS_HEAP_IDX_FW].ui32UsageFlags = PHYS_HEAP_USAGE_FW_SHARED;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].pszPDumpMemspaceName = "TDFWMEM";
+ pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].eType = PHYS_HEAP_TYPE_UMA;
+ pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].psMemFuncs = &gsPhysHeapFuncs;
+ pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].ui32UsageFlags =
+ PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA;
+
+ pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].pszPDumpMemspaceName = "TDSECBUFMEM";
+ pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].eType = PHYS_HEAP_TYPE_UMA;
+ pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].psMemFuncs = &gsPhysHeapFuncs;
+ pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].ui32UsageFlags =
+ PHYS_HEAP_USAGE_GPU_SECURE;
+
+#elif defined(SUPPORT_DEDICATED_FW_MEMORY)
+ pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].pszPDumpMemspaceName = "DEDICATEDFWMEM";
+ pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].eType = PHYS_HEAP_TYPE_UMA;
+ pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].psMemFuncs = &gsPhysHeapFuncs;
+ pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].ui32UsageFlags =
+ PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA;
+#endif
+
+ *ppasPhysHeapsOut = pasPhysHeaps;
+ *puiPhysHeapCountOut = uiHeapCount;
+
+ return PVRSRV_OK;
+}
+
+static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps)
+{
+ OSFreeMem(pasPhysHeaps);
+}
+
+static void SysDevFeatureDepInit(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT64 ui64Features)
+{
+#if defined(SUPPORT_AXI_ACE_TEST)
+ if ( ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK)
+ {
+ psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY;
+ }else
+#endif
+ {
+ psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE;
+ }
+}
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ RGX_DATA *psRGXData;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo;
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ IMG_UINT32 uiPhysHeapCount;
+ PVRSRV_ERROR eError;
+
+#if defined(__linux__)
+ dma_set_mask(pvOSDevice, DMA_BIT_MASK(40));
+#endif
+
+ psDevConfig = OSAllocZMem(sizeof(*psDevConfig) +
+ sizeof(*psRGXData) +
+ sizeof(*psRGXTimingInfo));
+ if (!psDevConfig)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig));
+ psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData));
+
+ eError = PhysHeapsCreate(&pasPhysHeaps, &uiPhysHeapCount);
+ if (eError)
+ {
+ goto ErrorFreeDevConfig;
+ }
+
+ /* Setup RGX specific timing data */
+ psRGXTimingInfo->ui32CoreClockSpeed = RGX_NOHW_CORE_CLOCK_SPEED;
+ psRGXTimingInfo->bEnableActivePM = IMG_FALSE;
+ psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE;
+ psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+ /* Set up the RGX data */
+ psRGXData->psRGXTimingInfo = psRGXTimingInfo;
+
+ /* Setup the device config */
+ psDevConfig->pvOSDevice = pvOSDevice;
+ psDevConfig->pszName = "nohw";
+ psDevConfig->pszVersion = NULL;
+ psDevConfig->pfnSysDevFeatureDepInit = SysDevFeatureDepInit;
+
+ /* Device setup information */
+ psDevConfig->sRegsCpuPBase.uiAddr = 0x00f00000;
+ psDevConfig->ui32IRQ = 0x00000bad;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS)
+ psDevConfig->ui32RegsSize = RGX_VIRTUALISATION_REG_SIZE_PER_OS * GPUVIRT_VALIDATION_NUM_OS;
+#else
+ psDevConfig->ui32RegsSize = 0x4000;
+#endif
+
+ psDevConfig->pasPhysHeaps = pasPhysHeaps;
+ psDevConfig->ui32PhysHeapCount = uiPhysHeapCount;
+ psDevConfig->eDefaultHeap = UMA_DEFAULT_HEAP;
+
+ /* No power management on no HW system */
+ psDevConfig->pfnPrePowerState = NULL;
+ psDevConfig->pfnPostPowerState = NULL;
+
+ psDevConfig->bHasFBCDCVersion31 = IMG_FALSE;
+
+ /* No clock frequency either */
+ psDevConfig->pfnClockFreqGet = NULL;
+
+ psDevConfig->hDevData = psRGXData;
+
+ /* Setup other system specific stuff */
+#if defined(SUPPORT_ION)
+ IonInit(NULL);
+#endif
+
+ /* Set psDevConfig->pfnSysDevErrorNotify callback */
+ psDevConfig->pfnSysDevErrorNotify = SysRGXErrorNotify;
+
+ *ppsDevConfig = psDevConfig;
+
+ return PVRSRV_OK;
+
+ErrorFreeDevConfig:
+ OSFreeMem(psDevConfig);
+ return eError;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+#if defined(SUPPORT_ION)
+ IonDeinit();
+#endif
+
+ PhysHeapsDestroy(psDevConfig->pasPhysHeaps);
+ OSFreeMem(psDevConfig);
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData)
+{
+ PVR_UNREFERENCED_PARAMETER(hSysData);
+ PVR_UNREFERENCED_PARAMETER(ui32IRQ);
+ PVR_UNREFERENCED_PARAMETER(pszName);
+ PVR_UNREFERENCED_PARAMETER(pfnLISR);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+ PVR_UNREFERENCED_PARAMETER(phLISRData);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+ PVR_UNREFERENCED_PARAMETER(hLISRData);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+ return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (sysconfig.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+
+#if !defined(SYSCCONFIG_H)
+#define SYSCCONFIG_H
+
+
+#define RGX_NOHW_CORE_CLOCK_SPEED 100000000
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (0)
+
+/*****************************************************************************
+ * system specific data structures
+ *****************************************************************************/
+
+#endif /* SYSCCONFIG_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(SYSINFO_H)
+#define SYSINFO_H
+
+/*!< System specific poll/timeout details */
+#define MAX_HW_TIME_US (500000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000)
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT (10000)
+
+#if defined(__linux__)
+#define SYS_RGX_DEV_NAME "rgxnohw"
+#endif
+
+#endif /* !defined(SYSINFO_H) */
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
-#include <soc/starfive/jh71xx_pmu.h>
+#include <soc/starfive/jh7110_pmu.h>
#endif
#include "pvr_debug.h"
#include "kernel_compatibility.h"
#include <linux/pm_runtime.h>
+#include <linux/of.h>
struct sf7110_cfg sf_cfg_t = {0,};
static void SetVoltage(IMG_UINT32 ui32Volt) {}
#endif
-#ifdef CONFIG_SIFIVE_FLUSH
-extern void sifive_flush64_range(unsigned long start, unsigned long len);
-#endif
+extern void sifive_l2_flush64_range(unsigned long start, unsigned long len);
void do_sifive_l2_flush64_range(unsigned long start, unsigned long len)
{
-#ifdef CONFIG_SIFIVE_FLUSH
- sifive_flush64_range(ALIGN_DOWN(start, 64), len + start % 64);
-#endif
+ sifive_l2_flush64_range(ALIGN_DOWN(start, 64), len + start % 64);
}
void do_invalid_range(unsigned long start, unsigned long len)
return;
}
len = (unsigned long)(sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr);
- //printk("FF cop:%d, s:%llx, len:%lx\n", eRequestType, sCPUPhysStart.uiAddr, len);
- //if(len < 64)
- // dump_stack();
+
switch (eRequestType)
{
case PVRSRV_CACHE_OP_INVALIDATE:
- //do_invalid_range(sCPUPhysStart.uiAddr, len);
+ do_sifive_l2_flush64_range(sCPUPhysStart.uiAddr, len);
break;
case PVRSRV_CACHE_OP_CLEAN:
case PVRSRV_CACHE_OP_FLUSH:
static IMG_UINT32 sys_gpu_runtime_resume(IMG_HANDLE hd)
{
- jh71xx_pmu_hw_event_turn_off_mask(0);
+ starfive_pmu_hw_event_turn_off_mask(0);
clk_prepare_enable(sf_cfg_t.clk_axi);
u0_img_gpu_enable();
static IMG_UINT32 sys_gpu_runtime_suspend(IMG_HANDLE hd)
{
u0_img_gpu_disable();
- clk_disable_unprepare(sf_cfg_t.clk_axi);
- jh71xx_pmu_hw_event_turn_off_mask((uint32_t)-1);
+ starfive_pmu_hw_event_turn_off_mask((uint32_t)-1);
return 0;
}
return -ENOMEM;
psf->gpu_reg_start = STARFIVE_7110_GPU_PBASE;
psf->gpu_reg_size = STARFIVE_7110_GPU_SIZE;
+ psf->rate = RGX_STARFIVE_7100_CORE_CLOCK_SPEED;
psf->clk_apb = devm_clk_get_optional(dev, "clk_apb");
if (IS_ERR(psf->clk_apb)) {
goto err_gpu_unmap;
}
+ if (of_find_node_by_path("/opp-table-0/opp-1250000000")) {
+ psf->rate = RGX_STARFIVE_7100_CORE_CLOCK_SPEED_BIN2;
+ }
+
psf->runtime_resume = sys_gpu_runtime_resume;
psf->runtime_suspend = sys_gpu_runtime_suspend;
{
clk_prepare_enable(sf_cfg_t.clk_apb);
clk_prepare_enable(sf_cfg_t.clk_rtc);
- clk_set_rate(sf_cfg_t.clk_div, RGX_STARFIVE_7100_CORE_CLOCK_SPEED);
+ clk_set_rate(sf_cfg_t.clk_div, sf_cfg_t.rate);
clk_prepare_enable(sf_cfg_t.clk_core);
clk_prepare_enable(sf_cfg_t.clk_sys);
/*
* Setup RGX specific timing data
*/
- gsRGXTimingInfo.ui32CoreClockSpeed = RGX_STARFIVE_7100_CORE_CLOCK_SPEED;
gsRGXTimingInfo.bEnableActivePM = IMG_TRUE;
gsRGXTimingInfo.bEnableRDPowIsland = IMG_TRUE;
gsRGXTimingInfo.ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
return PVRSRV_ERROR_BAD_MAPPING;
}
gsDevices[0].hSysData = &sf_cfg_t;
+ gsRGXTimingInfo.ui32CoreClockSpeed = sf_cfg_t.rate;
pm_runtime_enable(sf_cfg_t.dev);
/* power management on HW system */
struct device *dev;
SYS_DEV_CLK_GET runtime_resume;
SYS_DEV_CLK_GET runtime_suspend;
+ unsigned long rate;
};
#define mk_crg_offset(x) ((x) - (U0_SYS_CRG__SAIF_BD_APBS__BASE_ADDR))
#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (80000)
#else
#define RGX_STARFIVE_7100_CORE_CLOCK_SPEED (594.0 * 1000 * 1000)//maybe 400M?
+#define RGX_STARFIVE_7100_CORE_CLOCK_SPEED_BIN2 (396.0 * 1000 * 1000)
#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (100)
#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File dma_support.c
+@Title System DMA support
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provides a contiguous memory allocator (i.e. DMA allocator);
+ APIs are used for allocation/ioremapping (DMA/PA <-> CPU/VA)
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined(__linux__)
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#endif
+
+#include "allocmem.h"
+#include "dma_support.h"
+
+#define DMA_MAX_IOREMAP_ENTRIES 8
+static IMG_BOOL gbEnableDmaIoRemapping = IMG_FALSE;
+static DMA_ALLOC gsDmaIoRemapArray[DMA_MAX_IOREMAP_ENTRIES] = {{0}};
+static IMG_UINT32 gsDmaIoRemapRef[DMA_MAX_IOREMAP_ENTRIES] = {0};
+
+/*!
+******************************************************************************
+ @Function SysDmaAllocMem
+
+ @Description Allocates physically contiguous memory
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (psDmaAlloc != NULL && psDmaAlloc->pvOSDevice != NULL)
+ {
+#if defined(__linux__)
+ psDmaAlloc->pvVirtAddr =
+ dma_alloc_coherent((struct device *)psDmaAlloc->pvOSDevice,
+ (size_t) psDmaAlloc->ui64Size,
+ (dma_addr_t *)&psDmaAlloc->sBusAddr.uiAddr,
+ GFP_KERNEL);
+ PVR_LOG_RETURN_IF_FALSE((NULL != psDmaAlloc->pvVirtAddr), "dma_alloc_coherent() failed", PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES);
+#else
+ #error "Provide OS implementation of DMA allocation";
+#endif
+ }
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+ @Function SysDmaFreeMem
+
+ @Description Free physically contiguous memory
+
+ @Return void
+ ******************************************************************************/
+void SysDmaFreeMem(DMA_ALLOC *psDmaAlloc)
+{
+ if (psDmaAlloc && psDmaAlloc->pvVirtAddr)
+ {
+#if defined(__linux__)
+ dma_free_coherent((struct device *)psDmaAlloc->pvOSDevice,
+ (size_t) psDmaAlloc->ui64Size,
+ psDmaAlloc->pvVirtAddr,
+ (dma_addr_t )psDmaAlloc->sBusAddr.uiAddr);
+#else
+ #error "Provide OS implementation of DMA deallocation";
+#endif
+ }
+}
+
+/*!
+******************************************************************************
+ @Function SysDmaRegisterForIoRemapping
+
+ @Description Registers DMA_ALLOC for manual I/O remapping
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psDmaAlloc)
+{
+ IMG_UINT32 ui32Idx;
+ PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS;
+
+ if (psDmaAlloc == NULL ||
+ psDmaAlloc->ui64Size == 0 ||
+ psDmaAlloc->sBusAddr.uiAddr == 0)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ else if (psDmaAlloc->pvVirtAddr == NULL)
+ {
+ /* Check if an I/O remap entry already exists for this request */
+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+ {
+ if (gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr &&
+ gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr <= psDmaAlloc->sBusAddr.uiAddr &&
+ gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr + gsDmaIoRemapArray[ui32Idx].ui64Size >= psDmaAlloc->sBusAddr.uiAddr + psDmaAlloc->ui64Size)
+ {
+ PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].pvVirtAddr);
+ break;
+ }
+ }
+
+ if (ui32Idx < DMA_MAX_IOREMAP_ENTRIES)
+ {
+ IMG_UINT64 ui64Offset;
+ ui64Offset = psDmaAlloc->sBusAddr.uiAddr - gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr;
+ psDmaAlloc->pvVirtAddr = gsDmaIoRemapArray[ui32Idx].pvVirtAddr + (uintptr_t)ui64Offset;
+ gsDmaIoRemapRef[ui32Idx] += 1;
+ return PVRSRV_OK;
+ }
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Check if there is a free I/O remap table entry for this request */
+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+ {
+ if (gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == 0)
+ {
+ PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].pvVirtAddr == NULL);
+ PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].ui64Size == 0);
+ break;
+ }
+ }
+
+ if (ui32Idx >= DMA_MAX_IOREMAP_ENTRIES)
+ {
+ return eError;
+ }
+
+ gsDmaIoRemapArray[ui32Idx].ui64Size = psDmaAlloc->ui64Size;
+ gsDmaIoRemapArray[ui32Idx].sBusAddr = psDmaAlloc->sBusAddr;
+ gsDmaIoRemapArray[ui32Idx].pvVirtAddr = psDmaAlloc->pvVirtAddr;
+ gsDmaIoRemapRef[ui32Idx] += 1;
+
+ PVR_LOG(("DMA: register I/O remap: VA: 0x%p, PA: 0x%llx, Size: 0x%llx",
+ psDmaAlloc->pvVirtAddr,
+ psDmaAlloc->sBusAddr.uiAddr,
+ psDmaAlloc->ui64Size));
+
+ gbEnableDmaIoRemapping = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+ @Function SysDmaDeregisterForIoRemapping
+
+ @Description Deregisters DMA_ALLOC from manual I/O remapping
+
+ @Return void
+ ******************************************************************************/
+void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psDmaAlloc)
+{
+ IMG_UINT32 ui32Idx;
+
+ if (psDmaAlloc == NULL ||
+ psDmaAlloc->ui64Size == 0 ||
+ psDmaAlloc->pvVirtAddr == NULL ||
+ psDmaAlloc->sBusAddr.uiAddr == 0)
+ {
+ return;
+ }
+
+ /* Remove specified entry from the list of I/O remap entries */
+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+ {
+ if (gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr &&
+ gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr <= psDmaAlloc->sBusAddr.uiAddr &&
+ gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr + gsDmaIoRemapArray[ui32Idx].ui64Size >= psDmaAlloc->sBusAddr.uiAddr + psDmaAlloc->ui64Size)
+ {
+ if (! --gsDmaIoRemapRef[ui32Idx])
+ {
+ PVR_LOG(("DMA: deregister I/O remap: VA: 0x%p, PA: 0x%llx, Size: 0x%llx",
+ gsDmaIoRemapArray[ui32Idx].pvVirtAddr,
+ gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr,
+ gsDmaIoRemapArray[ui32Idx].ui64Size));
+
+ gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr = 0;
+ gsDmaIoRemapArray[ui32Idx].pvVirtAddr = NULL;
+ gsDmaIoRemapArray[ui32Idx].ui64Size = 0;
+ }
+
+ break;
+ }
+ }
+
+ /* Check if no other I/O remap entries exists for remapping */
+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+ {
+ if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr != NULL)
+ {
+ break;
+ }
+ }
+
+ if (ui32Idx == DMA_MAX_IOREMAP_ENTRIES)
+ {
+ /* No entries found so disable remapping */
+ gbEnableDmaIoRemapping = IMG_FALSE;
+ }
+}
+
+/*!
+******************************************************************************
+ @Function SysDmaDevPAddrToCpuVAddr
+
+ @Description Maps a DMA_ALLOC physical address to CPU virtual address
+
+ @Return IMG_CPU_VIRTADDR on success. Otherwise, a NULL
+ ******************************************************************************/
+IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size)
+{
+ IMG_CPU_VIRTADDR pvDMAVirtAddr = NULL;
+ DMA_ALLOC *psHeapDmaAlloc;
+ IMG_UINT32 ui32Idx;
+
+ if (gbEnableDmaIoRemapping == IMG_FALSE)
+ {
+ return pvDMAVirtAddr;
+ }
+
+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+ {
+ psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx];
+ if (psHeapDmaAlloc->sBusAddr.uiAddr && uiAddr >= psHeapDmaAlloc->sBusAddr.uiAddr)
+ {
+ IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size;
+ IMG_UINT64 uiOffset = uiAddr - psHeapDmaAlloc->sBusAddr.uiAddr;
+
+ if (uiOffset < uiSpan)
+ {
+ PVR_ASSERT((uiOffset+ui64Size-1) < uiSpan);
+ pvDMAVirtAddr = psHeapDmaAlloc->pvVirtAddr + uiOffset;
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "DMA: remap: PA: 0x%llx => VA: 0x%p",
+ uiAddr, pvDMAVirtAddr));
+
+ break;
+ }
+ }
+ }
+
+ return pvDMAVirtAddr;
+}
+
+/*!
+******************************************************************************
+ @Function SysDmaCpuVAddrToDevPAddr
+
+ @Description Maps a DMA_ALLOC CPU virtual address to physical address
+
+ @Return Non-zero value on success. Otherwise, a 0
+ ******************************************************************************/
+IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr)
+{
+ IMG_UINT64 uiAddr = 0;
+ DMA_ALLOC *psHeapDmaAlloc;
+ IMG_UINT32 ui32Idx;
+
+ if (gbEnableDmaIoRemapping == IMG_FALSE)
+ {
+ return uiAddr;
+ }
+
+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+ {
+ psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx];
+ if (psHeapDmaAlloc->pvVirtAddr && pvDMAVirtAddr >= psHeapDmaAlloc->pvVirtAddr)
+ {
+ IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size;
+ IMG_UINT64 uiOffset = pvDMAVirtAddr - psHeapDmaAlloc->pvVirtAddr;
+
+ if (uiOffset < uiSpan)
+ {
+ uiAddr = psHeapDmaAlloc->sBusAddr.uiAddr + uiOffset;
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "DMA: remap: VA: 0x%p => PA: 0x%llx",
+ pvDMAVirtAddr, uiAddr));
+
+ break;
+ }
+ }
+ }
+
+ return uiAddr;
+}
+
+/******************************************************************************
+ End of file (dma_support.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File vmm_type_stub.c
+@Title Stub VM manager type
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Sample stub (no-operation) VM manager implementation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "rgxheapconfig.h"
+
+#include "vmm_impl.h"
+#include "vmm_pvz_server.h"
+
+static PVRSRV_ERROR
+StubVMMMapDevPhysHeap(IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64Addr)
+{
+ PVR_UNREFERENCED_PARAMETER(ui64Size);
+ PVR_UNREFERENCED_PARAMETER(ui64Addr);
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMUnmapDevPhysHeap(void)
+{
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static VMM_PVZ_CONNECTION gsStubVmmPvz =
+{
+ .sClientFuncTab = {
+ /* pfnMapDevPhysHeap */
+ &StubVMMMapDevPhysHeap,
+
+ /* pfnUnmapDevPhysHeap */
+ &StubVMMUnmapDevPhysHeap
+ },
+
+ .sServerFuncTab = {
+ /* pfnMapDevPhysHeap */
+ &PvzServerMapDevPhysHeap,
+
+ /* pfnUnmapDevPhysHeap */
+ &PvzServerUnmapDevPhysHeap
+ },
+
+ .sVmmFuncTab = {
+ /* pfnOnVmOnline */
+ &PvzServerOnVmOnline,
+
+ /* pfnOnVmOffline */
+ &PvzServerOnVmOffline,
+
+ /* pfnVMMConfigure */
+ &PvzServerVMMConfigure
+ }
+};
+
+PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection)
+{
+ PVR_LOG_RETURN_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS);
+ *psPvzConnection = &gsStubVmmPvz;
+ PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support"));
+ return PVRSRV_OK;
+}
+
+void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection)
+{
+ PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection");
+}
+
+/******************************************************************************
+ End of file (vmm_type_stub.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File dma_support.h
+@Title Device contiguous memory allocator and I/O re-mapper
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides a contiguous memory allocator API; mainly
+ used for allocating / ioremapping (DMA/PA <-> CPU/VA)
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DMA_SUPPORT_H
+#define DMA_SUPPORT_H
+
+#include "osfunc.h"
+#include "pvrsrv.h"
+
+typedef struct _DMA_ALLOC_
+{
+ IMG_UINT64 ui64Size;
+ IMG_CPU_VIRTADDR pvVirtAddr;
+ IMG_DEV_PHYADDR sBusAddr;
+ void *pvOSDevice;
+} DMA_ALLOC;
+
+/*!
+*******************************************************************************
+ @Function SysDmaAllocMem
+ @Description Allocates physically contiguous memory
+ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc);
+
+/*!
+*******************************************************************************
+ @Function SysDmaFreeMem
+ @Description Free physically contiguous memory
+ @Return void
+******************************************************************************/
+void SysDmaFreeMem(DMA_ALLOC *psCmaAlloc);
+
+/*!
+*******************************************************************************
+ @Function SysDmaRegisterForIoRemapping
+ @Description Registers DMA_ALLOC for manual I/O remapping
+ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code
+******************************************************************************/
+PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc);
+
+/*!
+*******************************************************************************
+ @Function SysDmaDeregisterForIoRemapping
+ @Description Deregisters DMA_ALLOC from manual I/O remapping
+ @Return void
+******************************************************************************/
+void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc);
+
+/*!
+*******************************************************************************
+ @Function SysDmaDevPAddrToCpuVAddr
+ @Description Maps a DMA_ALLOC physical address to CPU virtual address
+ @Return IMG_CPU_VIRTADDR on success. Otherwise, a NULL
+******************************************************************************/
+IMG_CPU_VIRTADDR
+SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size);
+
+/*!
+*******************************************************************************
+ @Function SysDmaCpuVAddrToDevPAddr
+ @Description Maps a DMA_ALLOC CPU virtual address to physical address
+ @Return Non-zero value on success. Otherwise, a 0
+******************************************************************************/
+IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr);
+
+#endif /* DMA_SUPPORT_H */
+
+/******************************************************************************
+ End of file (dma_support.h)
+******************************************************************************/
--- /dev/null
+########################################################################### ###
+#@File
+#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License Dual MIT/GPLv2
+#
+# The contents of this file are subject to the MIT license as set out below.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+#
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+#
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+#
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+PVRSRVKM_NAME = $(PVRSRV_MODNAME)
+
+$(PVRSRVKM_NAME)-y += \
+ services/system/volcanic/$(PVR_SYSTEM)/sysconfig.o \
+ services/system/common/sysconfig_cmn.o \
+ services/server/common/vmm_pvz_client.o \
+ services/server/common/vmm_pvz_server.o \
+ services/server/common/vz_vmm_pvz.o \
+ services/server/common/vz_vmm_vm.o \
+ services/system/volcanic/common/vmm_type_$(VMM_TYPE).o
+
+$(PVRSRVKM_NAME)-y += \
+ services/system/volcanic/$(PVR_SYSTEM)/fpga.o
+
+
+ccflags-y += \
+ -I$(TOP)/services/system/volcanic/common/env/linux \
+ -I$(TOP)/services/include/env/linux \
+ -I$(TOP)/kernel/drivers/staging/imgtec/tc \
+ -I$(TOP)/include/volcanic/system/rgx_tc -I$(TOP)/include/system/rgx_tc
--- /dev/null
+/*************************************************************************/ /*!
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+#include <linux/module.h>
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pci_support.h"
+#include "oskm_apphint.h"
+#include "pdump_km.h"
+#include "rgxtbdefs_km.h"
+
+
+#include "fpga.h"
+
+/*!
+ ******************************************************************************
+ * System FPGA parameters
+ *****************************************************************************/
+static IMG_UINT32 gui32SysTBBWDropN = 0;
+static IMG_UINT32 gui32SysTBBWPeriod = 0;
+static IMG_UINT32 gui32SysTBQOS0Min = 0;
+static IMG_UINT32 gui32SysTBQOS0Max = 0;
+static IMG_UINT32 gui32SysTBQOS15Min = 0;
+static IMG_UINT32 gui32SysTBQOS15Max = 0;
+static IMG_UINT32 gui32SysTBQOSDist = 0;
+static IMG_UINT32 gui32SysTBMemArbiter = 0;
+static IMG_UINT32 gui32SysTBMaxIdOutRW = 0;
+static IMG_UINT32 gui32SysTBMaxIdOutWr = 0;
+static IMG_UINT32 gui32SysTBMaxIdOutRd = 0;
+/* these allow raw writes to RGX_TB_QOS_RD_LATENCY and RGX_TB_QOS_WR_LATENCY */
+static IMG_UINT64 gui64SysTBQOSLatencyRd = 0;
+static IMG_UINT64 gui64SysTBQOSLatencyWr = 0;
+
+#if defined(__linux__)
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+module_param_named(sys_tb_bandwidth_drop, gui32SysTBBWDropN, uint, S_IRUGO | S_IWUSR);
+module_param_named(sys_tb_bandwidth_period, gui32SysTBBWPeriod, uint, S_IRUGO | S_IWUSR);
+module_param_named(sys_tb_qos0_min, gui32SysTBQOS0Min, uint, S_IRUGO | S_IWUSR);
+module_param_named(sys_tb_qos0_max, gui32SysTBQOS0Max, uint, S_IRUGO | S_IWUSR);
+module_param_named(sys_tb_qos15_min, gui32SysTBQOS15Min, uint, S_IRUGO | S_IWUSR);
+module_param_named(sys_tb_qos15_max, gui32SysTBQOS15Max, uint, S_IRUGO | S_IWUSR);
+module_param_named(sys_tb_qos_dist, gui32SysTBQOSDist, uint, S_IRUGO | S_IWUSR);
+module_param_named(sys_tb_mem_arbiter, gui32SysTBMemArbiter, uint, S_IRUGO | S_IWUSR);
+module_param_named(sys_tb_max_id_outstanding_rdwr, gui32SysTBMaxIdOutRW, uint, S_IRUGO | S_IWUSR);
+module_param_named(sys_tb_max_id_outstanding_wr, gui32SysTBMaxIdOutWr, uint, S_IRUGO | S_IWUSR);
+module_param_named(sys_tb_max_id_outstanding_rd, gui32SysTBMaxIdOutRd, uint, S_IRUGO | S_IWUSR);
+
+module_param_named(sys_tb_qos_latency_rd, gui64SysTBQOSLatencyRd, ullong, S_IRUGO | S_IWUSR);
+module_param_named(sys_tb_qos_latency_wr, gui64SysTBQOSLatencyWr, ullong, S_IRUGO | S_IWUSR);
+#endif
+
+
+/*
+ * work out the length in bits for a register field from its ~CLRMSK.
+ * Assumes the relevant bits are all contiguous.
+ */
+static IMG_UINT32 FieldLengthBits(IMG_UINT32 mask)
+{
+ IMG_UINT32 count = 0;
+
+ while (mask != 0)
+ {
+ count += (mask & 1);
+ mask >>= 1;
+ }
+ return count;
+}
+
+
+static IMG_UINT32 TBBandwidthLimiterGet(void)
+{
+ IMG_UINT32 ui32BandwidthLimiter = 0;
+
+ /* create bandwidth limiter reg value */
+ if (gui32SysTBBWDropN != 0 || gui32SysTBBWPeriod != 0)
+ {
+ IMG_UINT32 ui31DropN_ext;
+ IMG_UINT32 ui31Period_ext;
+
+ /* get EXT bits from the apphint values */
+ ui31DropN_ext = gui32SysTBBWDropN >> FieldLengthBits(~RGX_TB_BW_LIMITER_DROPN_CLRMSK);
+ ui31Period_ext = gui32SysTBBWPeriod >> FieldLengthBits(~RGX_TB_BW_LIMITER_PERIOD_CLRMSK);
+
+ ui32BandwidthLimiter = (RGX_TB_BW_LIMITER_ENABLE_EN << RGX_TB_BW_LIMITER_ENABLE_SHIFT) |
+ ((gui32SysTBBWDropN << RGX_TB_BW_LIMITER_DROPN_SHIFT) & ~RGX_TB_BW_LIMITER_DROPN_CLRMSK) |
+ ((gui32SysTBBWPeriod << RGX_TB_BW_LIMITER_PERIOD_SHIFT) & ~RGX_TB_BW_LIMITER_PERIOD_CLRMSK) |
+ ((ui31DropN_ext << RGX_TB_BW_LIMITER_DROPN_EXT_SHIFT) & ~RGX_TB_BW_LIMITER_DROPN_EXT_CLRMSK) |
+ ((ui31Period_ext << RGX_TB_BW_LIMITER_PERIOD_EXT_SHIFT) & ~RGX_TB_BW_LIMITER_PERIOD_EXT_CLRMSK);
+ }
+
+ return ui32BandwidthLimiter;
+}
+
+
+/*
+ * These latencies can be specified in total using gui64SysTBQOSLatencyRd/Wr or in individual fields.
+ * If full register is specified then the individual fields are ignored.
+ * Individual fields only allow same values to be set for read and write.
+ */
+static IMG_UINT64 TBQOSReadLatencyGet(void)
+{
+ if (gui64SysTBQOSLatencyRd != 0)
+ {
+ return gui64SysTBQOSLatencyRd;
+ }
+
+ return ((((IMG_UINT64)gui32SysTBQOS15Max) << RGX_TB_QOS_RD_LATENCY_MAX_15_SHIFT) & ~RGX_TB_QOS_RD_LATENCY_MAX_15_CLRMSK) |
+ ((((IMG_UINT64)gui32SysTBQOS15Min) << RGX_TB_QOS_RD_LATENCY_MIN_15_SHIFT) & ~RGX_TB_QOS_RD_LATENCY_MIN_15_CLRMSK) |
+ ((((IMG_UINT64)gui32SysTBQOS0Max) << RGX_TB_QOS_RD_LATENCY_MAX_0_SHIFT) & ~RGX_TB_QOS_RD_LATENCY_MAX_0_CLRMSK) |
+ ((((IMG_UINT64)gui32SysTBQOS0Min) << RGX_TB_QOS_RD_LATENCY_MIN_0_SHIFT) & ~RGX_TB_QOS_RD_LATENCY_MIN_0_CLRMSK) |
+ ((((IMG_UINT64)gui32SysTBQOSDist) << RGX_TB_QOS_RD_LATENCY_DIST_SHIFT) & ~RGX_TB_QOS_RD_LATENCY_DIST_CLRMSK);
+}
+
+static IMG_UINT64 TBQOSWriteLatencyGet(void)
+{
+ if (gui64SysTBQOSLatencyWr != 0)
+ {
+ return gui64SysTBQOSLatencyWr;
+ }
+
+ return ((((IMG_UINT64)gui32SysTBQOS15Max) << RGX_TB_QOS_WR_LATENCY_MAX_15_SHIFT) & ~RGX_TB_QOS_WR_LATENCY_MAX_15_CLRMSK) |
+ ((((IMG_UINT64)gui32SysTBQOS15Min) << RGX_TB_QOS_WR_LATENCY_MIN_15_SHIFT) & ~RGX_TB_QOS_WR_LATENCY_MIN_15_CLRMSK) |
+ ((((IMG_UINT64)gui32SysTBQOS0Max) << RGX_TB_QOS_WR_LATENCY_MAX_0_SHIFT) & ~RGX_TB_QOS_WR_LATENCY_MAX_0_CLRMSK) |
+ ((((IMG_UINT64)gui32SysTBQOS0Min) << RGX_TB_QOS_WR_LATENCY_MIN_0_SHIFT) & ~RGX_TB_QOS_WR_LATENCY_MIN_0_CLRMSK) |
+ ((((IMG_UINT64)gui32SysTBQOSDist) << RGX_TB_QOS_WR_LATENCY_DIST_SHIFT) & ~RGX_TB_QOS_WR_LATENCY_DIST_CLRMSK);
+}
+
+static IMG_UINT32 TBMemArbiterGet(void)
+{
+ return gui32SysTBMemArbiter;
+}
+
+static IMG_UINT64 TBQOSMaxIdOutstandingGet(void)
+{
+ return ((((IMG_UINT64)gui32SysTBMaxIdOutRW) << RGX_TB_MAX_ID_OUTSTANDING_RD_WR_SHIFT) & ~RGX_TB_MAX_ID_OUTSTANDING_RD_WR_CLRMSK) |
+ ((((IMG_UINT64)gui32SysTBMaxIdOutWr) << RGX_TB_MAX_ID_OUTSTANDING_WRITE_SHIFT) & ~RGX_TB_MAX_ID_OUTSTANDING_WRITE_CLRMSK) |
+ ((((IMG_UINT64)gui32SysTBMaxIdOutRd) << RGX_TB_MAX_ID_OUTSTANDING_READ_SHIFT) & ~RGX_TB_MAX_ID_OUTSTANDING_READ_CLRMSK);
+}
+
+
+PVRSRV_ERROR FPGA_Reset(struct resource *registers, IMG_BOOL bFullReset)
+{
+ IMG_CPU_PHYADDR sWrapperRegsCpuPBase;
+ void *pvWrapperRegs;
+ IMG_UINT32 ui32BandwidthLimiter;
+ IMG_UINT64 ui64ReadQOSLatency;
+ IMG_UINT64 ui64WriteQOSLatency;
+ IMG_UINT32 ui32MemArbiter;
+ IMG_UINT64 ui64MaxIdOutstanding;
+
+ sWrapperRegsCpuPBase.uiAddr = registers->start + FPGA_RGX_TB_REG_WRAPPER_OFFSET;
+
+ /*
+ Create a temporary mapping of the FPGA wrapper registers in order to reset
+ required registers.
+ */
+ pvWrapperRegs = OSMapPhysToLin(sWrapperRegsCpuPBase,
+ FPGA_RGX_TB_REG_WRAPPER_SIZE,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+ if (pvWrapperRegs == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to create wrapper register mapping", __func__));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+
+ /*
+ Set the bandwidth limiter if required.
+ */
+ ui32BandwidthLimiter = TBBandwidthLimiterGet();
+ if (ui32BandwidthLimiter != 0)
+ {
+ OSWriteHWReg32(pvWrapperRegs, RGX_TB_BW_LIMITER, ui32BandwidthLimiter);
+ (void) OSReadHWReg32(pvWrapperRegs, RGX_TB_BW_LIMITER);
+ PVR_LOG(("%s: Bandwidth limiter = 0x%08X", __func__, OSReadHWReg32(pvWrapperRegs, RGX_TB_BW_LIMITER)));
+ }
+
+ /*
+ Set the read/write QoS latency values if required.
+ */
+ ui64ReadQOSLatency = TBQOSReadLatencyGet();
+ if (ui64ReadQOSLatency != 0)
+ {
+ OSWriteHWReg64(pvWrapperRegs, RGX_TB_QOS_RD_LATENCY, ui64ReadQOSLatency);
+ (void) OSReadHWReg64(pvWrapperRegs, RGX_TB_QOS_RD_LATENCY);
+ PVR_LOG(("%s: QOS Read latency = 0x%016llX", __func__, OSReadHWReg64(pvWrapperRegs, RGX_TB_QOS_RD_LATENCY)));
+ }
+
+ ui64WriteQOSLatency = TBQOSWriteLatencyGet();
+ if (ui64WriteQOSLatency != 0)
+ {
+ OSWriteHWReg64(pvWrapperRegs, RGX_TB_QOS_WR_LATENCY, ui64WriteQOSLatency);
+ (void) OSReadHWReg64(pvWrapperRegs, RGX_TB_QOS_WR_LATENCY);
+ PVR_LOG(("%s: QOS Write latency = 0x%016llX", __func__, OSReadHWReg64(pvWrapperRegs, RGX_TB_QOS_WR_LATENCY)));
+ }
+
+ ui32MemArbiter = TBMemArbiterGet();
+ if (ui32MemArbiter != 0)
+ {
+ OSWriteHWReg32(pvWrapperRegs, RGX_TB_MEM_ARBITER, ui32MemArbiter);
+ (void) OSReadHWReg32(pvWrapperRegs, RGX_TB_MEM_ARBITER);
+ PVR_LOG(("%s: Mem arbiter = 0x%08X", __func__, OSReadHWReg32(pvWrapperRegs, RGX_TB_MEM_ARBITER)));
+ }
+
+ ui64MaxIdOutstanding = TBQOSMaxIdOutstandingGet();
+ if (ui64MaxIdOutstanding != 0)
+ {
+ OSWriteHWReg64(pvWrapperRegs, RGX_TB_MAX_ID_OUTSTANDING, ui64MaxIdOutstanding);
+ (void) OSReadHWReg64(pvWrapperRegs, RGX_TB_MAX_ID_OUTSTANDING);
+ PVR_LOG(("%s: Max Id Outstanding = 0x%016llX", __func__, OSReadHWReg64(pvWrapperRegs, RGX_TB_MAX_ID_OUTSTANDING)));
+ }
+
+ /*
+ Remove the temporary register mapping.
+ */
+ OSUnMapPhysToLin(pvWrapperRegs, FPGA_RGX_TB_REG_WRAPPER_SIZE);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR FPGA_SysDebugInfo(struct resource *registers,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ IMG_CPU_PHYADDR sWrapperRegsCpuPBase;
+ void *pvWrapperRegs;
+
+ sWrapperRegsCpuPBase.uiAddr = registers->start + FPGA_RGX_TB_REG_WRAPPER_OFFSET;
+
+ /*
+ Create a temporary mapping of the FPGA wrapper registers.
+ */
+ pvWrapperRegs = OSMapPhysToLin(sWrapperRegsCpuPBase,
+ FPGA_RGX_TB_REG_WRAPPER_SIZE,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+ if (pvWrapperRegs == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to create wrapper register mapping", __func__));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+
+#define SYS_FPGA_DBG_R32(R) PVR_DUMPDEBUG_LOG("%-29s : 0x%08X", #R, (IMG_UINT32)OSReadHWReg32(pvWrapperRegs, RGX_TB_##R))
+#define SYS_FPGA_DBG_R64(R) PVR_DUMPDEBUG_LOG("%-29s : 0x%016llX", #R, (IMG_UINT64)OSReadHWReg64(pvWrapperRegs, RGX_TB_##R))
+
+ SYS_FPGA_DBG_R32(BW_LIMITER);
+
+ SYS_FPGA_DBG_R64(QOS_RD_LATENCY);
+ SYS_FPGA_DBG_R64(QOS_WR_LATENCY);
+
+ SYS_FPGA_DBG_R32(MEM_ARBITER);
+ SYS_FPGA_DBG_R64(MAX_ID_OUTSTANDING);
+
+ /*
+ Remove the temporary register mapping.
+ */
+ OSUnMapPhysToLin(pvWrapperRegs, FPGA_RGX_TB_REG_WRAPPER_SIZE);
+
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title FPGA PCI header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines FPGA PCI registers
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/*
+ * Defines for FPGA System Bus Decode.
+ */
+
+#define FPGA_RGX_TB_REG_PCI_BASENUM 2
+#define FPGA_RGX_TB_REG_SIZE (16 * 1024)
+#define FPGA_RGX_TB_REG_WRAPPER_OFFSET 0x1000000
+#define FPGA_RGX_TB_REG_WRAPPER_SIZE 0x8000
+
+
+/*
+ * Interface to the Reset function
+ */
+PVRSRV_ERROR FPGA_Reset(struct resource *registers, IMG_BOOL bFullReset);
+
+#if 1
+#define DevReset(psSysData, flags) FPGA_Reset((psSysData)->registers, flags)
+#else
+#define DevReset(psSysData, flags) PVRSRV_OK
+#endif
+
+PVRSRV_ERROR FPGA_SysDebugInfo(struct resource *registers,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#include "sysinfo.h"
+#include "apollo_regs.h"
+
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+#include "syscommon.h"
+
+#if defined(SUPPORT_ION)
+#include PVR_ANDROID_ION_HEADER
+#include "ion_support.h"
+#include "ion_sys.h"
+#endif
+
+#include "tc_drv.h"
+#include "fpga.h"
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+/* Must be consecutive and start from 0 */
+#define PHY_HEAP_CARD_GPU 0
+#define PHY_HEAP_CARD_EXT 1
+#if defined(SUPPORT_SECURITY_VALIDATION)
+#define PHY_HEAP_SEC_FW_CODE 2
+#define PHY_HEAP_SEC_FW_DATA 3
+#define PHY_HEAP_SEC_MEM 4
+#define PHY_HEAP_SYSTEM 5
+#else
+#define PHY_HEAP_CARD_FW 2
+#define PHY_HEAP_SYSTEM 3
+#endif
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+#define PHY_HEAP_LMA_NUM 5
+#define PHY_HEAP_HYBRID_NUM 6
+#else
+#define PHY_HEAP_LMA_NUM 3
+#define PHY_HEAP_HYBRID_NUM 4
+#endif
+
+#if defined(SUPPORT_SECURITY_VALIDATION) && (RGX_NUM_DRIVERS_SUPPORTED > 1)
+#error "Security support and virtualization are currently mutually exclusive."
+#endif
+
+#define ODIN_MEMORY_HYBRID_DEVICE_BASE 0x400000000
+
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10)
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+#define SECURE_FW_CODE_MEM_SIZE (0x200000) /* 2MB (max HMMU page size) */
+#define SECURE_FW_DATA_MEM_SIZE (0x200000) /* 2MB (max HMMU page size) */
+#define SECURE_MEM_SIZE (0x4000000) /* 32MB (multiple of max HMMU page size) */
+#endif
+
+#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS)
+
+/* Fake DVFS configuration used purely for testing purposes */
+
+static const IMG_OPP asOPPTable[] =
+{
+ { 8, 25000000},
+ { 16, 50000000},
+ { 32, 75000000},
+ { 64, 100000000},
+};
+
+#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP))
+
+static void SetFrequency(IMG_HANDLE hSysData, IMG_UINT32 ui32Frequency)
+{
+ PVR_UNREFERENCED_PARAMETER(hSysData);
+
+ PVR_DPF((PVR_DBG_ERROR, "SetFrequency %u", ui32Frequency));
+}
+
+static void SetVoltage(IMG_HANDLE hSysData, IMG_UINT32 ui32Voltage)
+{
+ PVR_UNREFERENCED_PARAMETER(hSysData);
+
+ PVR_DPF((PVR_DBG_ERROR, "SetVoltage %u", ui32Voltage));
+}
+
+#endif
+
+static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs =
+{
+ .pfnCpuPAddrToDevPAddr = TCLocalCpuPAddrToDevPAddr,
+ .pfnDevPAddrToCpuPAddr = TCLocalDevPAddrToCpuPAddr,
+};
+
+static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+static PHYS_HEAP_FUNCTIONS gsHostPhysHeapFuncs =
+{
+ .pfnCpuPAddrToDevPAddr = TCHostCpuPAddrToDevPAddr,
+ .pfnDevPAddrToCpuPAddr = TCHostDevPAddrToCpuPAddr,
+};
+
+static void TCHybridCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void TCHybridDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+static PHYS_HEAP_FUNCTIONS gsHybridPhysHeapFuncs =
+{
+ .pfnCpuPAddrToDevPAddr = TCHybridCpuPAddrToDevPAddr,
+ .pfnDevPAddrToCpuPAddr = TCHybridDevPAddrToCpuPAddr
+};
+
+typedef struct _SYS_DATA_ SYS_DATA;
+
+struct _SYS_DATA_
+{
+ struct platform_device *pdev;
+
+ struct tc_rogue_platform_data *pdata;
+
+ struct resource *registers;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ struct ion_client *ion_client;
+ struct ion_handle *ion_rogue_allocation;
+#endif
+};
+
+#define SYSTEM_INFO_FORMAT_STRING "FPGA Revision: %s - TCF Core Revision: %s - TCF Core Target Build ID: %s - PCI Version: %s - Macro Version: %s"
+#define FPGA_REV_MAX_LEN 8 /* current longest format: "x.y.z" */
+#define TCF_CORE_REV_MAX_LEN 8 /* current longest format: "x.y.z" */
+#define TCF_CORE_CFG_MAX_LEN 4 /* current longest format: "x" */
+#define PCI_VERSION_MAX_LEN 4 /* current longest format: "x" */
+#define MACRO_VERSION_MAX_LEN 8 /* current longest format: "x.yz" */
+
+static IMG_CHAR *GetDeviceVersionString(SYS_DATA *psSysData)
+{
+ int err;
+ char str_fpga_rev[FPGA_REV_MAX_LEN]={0};
+ char str_tcf_core_rev[TCF_CORE_REV_MAX_LEN]={0};
+ char str_tcf_core_target_build_id[TCF_CORE_CFG_MAX_LEN]={0};
+ char str_pci_ver[PCI_VERSION_MAX_LEN]={0};
+ char str_macro_ver[MACRO_VERSION_MAX_LEN]={0};
+
+ IMG_CHAR *pszVersion;
+ IMG_UINT32 ui32StringLength;
+
+ err = tc_sys_strings(psSysData->pdev->dev.parent,
+ str_fpga_rev, sizeof(str_fpga_rev),
+ str_tcf_core_rev, sizeof(str_tcf_core_rev),
+ str_tcf_core_target_build_id, sizeof(str_tcf_core_target_build_id),
+ str_pci_ver, sizeof(str_pci_ver),
+ str_macro_ver, sizeof(str_macro_ver));
+ if (err)
+ {
+ return NULL;
+ }
+
+ /* Calculate how much space we need to allocate for the string */
+ ui32StringLength = OSStringLength(SYSTEM_INFO_FORMAT_STRING);
+ ui32StringLength += OSStringLength(str_fpga_rev);
+ ui32StringLength += OSStringLength(str_tcf_core_rev);
+ ui32StringLength += OSStringLength(str_tcf_core_target_build_id);
+ ui32StringLength += OSStringLength(str_pci_ver);
+ ui32StringLength += OSStringLength(str_macro_ver);
+
+ /* Create the version string */
+ pszVersion = OSAllocMem(ui32StringLength * sizeof(IMG_CHAR));
+ if (pszVersion)
+ {
+ OSSNPrintf(&pszVersion[0], ui32StringLength,
+ SYSTEM_INFO_FORMAT_STRING,
+ str_fpga_rev,
+ str_tcf_core_rev,
+ str_tcf_core_target_build_id,
+ str_pci_ver,
+ str_macro_ver);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to create format string", __func__));
+ }
+
+ return pszVersion;
+}
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+static SYS_DATA *gpsIonPrivateData;
+
+PVRSRV_ERROR IonInit(void *pvPrivateData)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYS_DATA *psSysData = pvPrivateData;
+ gpsIonPrivateData = psSysData;
+
+ psSysData->ion_client = ion_client_create(psSysData->pdata->ion_device, SYS_RGX_DEV_NAME);
+ if (IS_ERR(psSysData->ion_client))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create ION client (%ld)", __func__, PTR_ERR(psSysData->ion_client)));
+ eError = PVRSRV_ERROR_ION_NO_CLIENT;
+ goto err_out;
+ }
+ /* Allocate the whole rogue ion heap and pass that to services to manage */
+ psSysData->ion_rogue_allocation = ion_alloc(psSysData->ion_client, psSysData->pdata->rogue_heap_memory_size, 4096, (1 << psSysData->pdata->ion_heap_id), 0);
+ if (IS_ERR(psSysData->ion_rogue_allocation))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate ION rogue buffer (%ld)", __func__, PTR_ERR(psSysData->ion_rogue_allocation)));
+ eError = PVRSRV_ERROR_ION_FAILED_TO_ALLOC;
+ goto err_destroy_client;
+
+ }
+
+ return PVRSRV_OK;
+err_destroy_client:
+ ion_client_destroy(psSysData->ion_client);
+ psSysData->ion_client = NULL;
+err_out:
+ return eError;
+}
+
+void IonDeinit(void)
+{
+ SYS_DATA *psSysData = gpsIonPrivateData;
+ ion_free(psSysData->ion_client, psSysData->ion_rogue_allocation);
+ psSysData->ion_rogue_allocation = NULL;
+ ion_client_destroy(psSysData->ion_client);
+ psSysData->ion_client = NULL;
+}
+
+struct ion_device *IonDevAcquire(void)
+{
+ return gpsIonPrivateData->pdata->ion_device;
+}
+
+void IonDevRelease(struct ion_device *ion_device)
+{
+ PVR_ASSERT(ion_device == gpsIonPrivateData->pdata->ion_device);
+}
+#endif /* defined(SUPPORT_ION) */
+
+static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++)
+ {
+ psDevPAddr[ui32Idx].uiAddr =
+ psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base;
+ }
+}
+
+static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++)
+ {
+ psCpuPAddr[ui32Idx].uiAddr =
+ psDevPAddr[ui32Idx].uiAddr + psSysData->pdata->tc_memory_base;
+ }
+}
+
+static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 uiNumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVR_ASSERT(sizeof(*psDevPAddr) == sizeof(*psCpuPAddr));
+ OSCachedMemCopy(psDevPAddr, psCpuPAddr, uiNumOfAddr * sizeof(*psDevPAddr));
+}
+
+static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 uiNumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVR_ASSERT(sizeof(*psCpuPAddr) == sizeof(*psDevPAddr));
+ OSCachedMemCopy(psCpuPAddr, psDevPAddr, uiNumOfAddr * sizeof(*psCpuPAddr));
+}
+
+static void TCHybridCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++)
+ {
+ psDevPAddr[ui32Idx].uiAddr =
+ (psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base) +
+ ODIN_MEMORY_HYBRID_DEVICE_BASE;
+ }
+}
+
+static void TCHybridDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData;
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++)
+ {
+ psCpuPAddr[ui32Idx].uiAddr =
+ (psDevPAddr[ui32Idx].uiAddr - ODIN_MEMORY_HYBRID_DEVICE_BASE) +
+ psSysData->pdata->tc_memory_base;
+ }
+}
+
+static PVRSRV_ERROR
+InitLocalHeap(PHYS_HEAP_CONFIG *psPhysHeap,
+ IMG_UINT64 uiBaseAddr, IMG_UINT64 uiStartAddr,
+ IMG_UINT64 uiSize, PHYS_HEAP_FUNCTIONS *psFuncs,
+ PHYS_HEAP_USAGE_FLAGS ui32Flags)
+{
+ psPhysHeap->sCardBase.uiAddr = uiBaseAddr;
+ psPhysHeap->sStartAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(uiStartAddr);
+ psPhysHeap->uiSize = uiSize;
+ psPhysHeap->eType = PHYS_HEAP_TYPE_LMA;
+ psPhysHeap->pszPDumpMemspaceName = "LMA";
+ psPhysHeap->psMemFuncs = psFuncs;
+ psPhysHeap->ui32UsageFlags = ui32Flags;
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+CreateCardGPUHeaps(const SYS_DATA *psSysData,
+ PHYS_HEAP_CONFIG *pasPhysHeaps,
+ PHYS_HEAP_FUNCTIONS *psHeapFuncs)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT64 ui64CardAddr;
+ IMG_UINT64 ui64StartAddr = psSysData->pdata->rogue_heap_memory_base;
+ IMG_UINT64 ui64OriginalRogueHeapSize = psSysData->pdata->rogue_heap_memory_size;
+ IMG_UINT64 ui64RogueHeapSize = 0;
+ IMG_UINT64 uiReserved;
+#if defined(SUPPORT_SECURITY_VALIDATION)
+ IMG_UINT64 uiTDFWCodeSize = SECURE_FW_CODE_MEM_SIZE;
+ IMG_UINT64 uiTDFWDataSize = SECURE_FW_DATA_MEM_SIZE;
+ IMG_UINT64 uiTDSecBufSize = SECURE_MEM_SIZE;
+#else
+ IMG_UINT64 uiFwCarveoutSize;
+
+#if defined(SUPPORT_AUTOVZ)
+ /* Carveout out enough LMA memory to hold the heaps of
+ * all supported OSIDs and the FW page tables */
+ uiFwCarveoutSize = (RGX_NUM_DRIVERS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE) +
+ RGX_FIRMWARE_MAX_PAGETABLE_SIZE;
+#elif defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+ /* Carveout out enough LMA memory to hold the heaps of all supported OSIDs */
+ uiFwCarveoutSize = (RGX_NUM_DRIVERS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE);
+#else
+ /* Create a memory carveout just for the Host's Firmware heap.
+ * Guests will allocate their own physical memory. */
+ uiFwCarveoutSize = RGX_FIRMWARE_RAW_HEAP_SIZE;
+#endif
+#endif /* defined(SUPPORT_SECURITY_VALIDATION) */
+
+ if (psSysData->pdata->baseboard == TC_BASEBOARD_ODIN &&
+ psSysData->pdata->mem_mode == TC_MEMORY_HYBRID)
+ {
+ ui64CardAddr = ODIN_MEMORY_HYBRID_DEVICE_BASE;
+ }
+ else
+ {
+ ui64CardAddr = 0;
+ }
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+ uiReserved = uiTDFWCodeSize + uiTDFWDataSize + uiTDSecBufSize;
+#else
+ uiReserved = uiFwCarveoutSize;
+#endif
+
+ ui64RogueHeapSize = SysRestrictGpuLocalPhysheap(ui64OriginalRogueHeapSize);
+
+ if (ui64OriginalRogueHeapSize != ui64RogueHeapSize)
+ {
+ IMG_UINT32 uiDynamicPrivateArrayPos;
+ IMG_UINT64 uiPrivHeapSize = ui64OriginalRogueHeapSize - (ui64RogueHeapSize + uiReserved);
+
+ /* Will always be the last entry in the array */
+ uiDynamicPrivateArrayPos =
+ psSysData->pdata->mem_mode == TC_MEMORY_HYBRID ?
+ PHY_HEAP_HYBRID_NUM : PHY_HEAP_LMA_NUM;
+
+ eError = InitLocalHeap(&pasPhysHeaps[uiDynamicPrivateArrayPos],
+ ui64CardAddr,
+ 0,
+ uiPrivHeapSize,
+ psHeapFuncs,
+ PHYS_HEAP_USAGE_GPU_PRIVATE);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ ui64CardAddr += uiPrivHeapSize;
+ ui64StartAddr += uiPrivHeapSize;
+ }
+ else
+ {
+ ui64RogueHeapSize -= uiReserved;
+ }
+
+ eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_CARD_GPU],
+ ui64CardAddr,
+ IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr),
+ ui64RogueHeapSize,
+ psHeapFuncs,
+ PHYS_HEAP_USAGE_GPU_LOCAL);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ ui64CardAddr += ui64RogueHeapSize;
+ ui64StartAddr += ui64RogueHeapSize;
+
+#if defined(SUPPORT_SECURITY_VALIDATION)
+ /* Setup the secure FW code heap */
+ eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_SEC_FW_CODE],
+ ui64CardAddr,
+ IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr),
+ uiTDFWCodeSize, psHeapFuncs,
+ PHYS_HEAP_USAGE_FW_CODE);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ ui64CardAddr += uiTDFWCodeSize;
+ ui64StartAddr += uiTDFWCodeSize;
+
+ /* Setup the secure FW data heap */
+ eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_SEC_FW_DATA],
+ ui64CardAddr,
+ IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr),
+ uiTDFWDataSize, psHeapFuncs,
+ PHYS_HEAP_USAGE_FW_PRIV_DATA);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ ui64CardAddr += uiTDFWDataSize;
+ ui64StartAddr += uiTDFWDataSize;
+
+ /* Setup the secure buffers heap */
+ eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_SEC_MEM],
+ ui64CardAddr,
+ IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr),
+ uiTDSecBufSize, psHeapFuncs,
+ PHYS_HEAP_USAGE_GPU_SECURE);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ ui64CardAddr += uiTDSecBufSize;
+ ui64StartAddr += uiTDSecBufSize;
+#else
+ /* allocate the Host Driver's Firmware Heap from the reserved carveout */
+ eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_CARD_FW],
+ ui64CardAddr,
+ IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr),
+ RGX_FIRMWARE_RAW_HEAP_SIZE,
+ psHeapFuncs,
+ PHYS_HEAP_USAGE_FW_SHARED);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif /* defined(SUPPORT_SECURITY_VALIDATION) */
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+CreateCardEXTHeap(const SYS_DATA *psSysData,
+ PHYS_HEAP_CONFIG *pasPhysHeaps,
+ PHYS_HEAP_FUNCTIONS *psHeapFuncs)
+{
+ IMG_UINT64 ui64CardAddr = 0;
+ IMG_UINT64 ui64StartAddr = psSysData->pdata->pdp_heap_memory_base;
+ IMG_UINT64 ui64Size = psSysData->pdata->pdp_heap_memory_size;
+ PVRSRV_ERROR eError;
+
+ if (psSysData->pdata->baseboard == TC_BASEBOARD_ODIN &&
+ psSysData->pdata->mem_mode == TC_MEMORY_HYBRID)
+ {
+ ui64CardAddr = ODIN_MEMORY_HYBRID_DEVICE_BASE;
+ }
+ else
+ {
+ ui64CardAddr = 0;
+ }
+
+ eError = InitLocalHeap(&pasPhysHeaps[PHY_HEAP_CARD_EXT],
+ ui64CardAddr,
+ IMG_CAST_TO_CPUPHYADDR_UINT(ui64StartAddr),
+ ui64Size, psHeapFuncs,
+ PHYS_HEAP_USAGE_EXTERNAL | PHYS_HEAP_USAGE_DISPLAY);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+InitLocalHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps)
+{
+ PHYS_HEAP_FUNCTIONS *psHeapFuncs;
+ PVRSRV_ERROR eError;
+
+ if (psSysData->pdata->baseboard == TC_BASEBOARD_ODIN &&
+ psSysData->pdata->mem_mode == TC_MEMORY_HYBRID)
+ {
+ psHeapFuncs = &gsHybridPhysHeapFuncs;
+ }
+ else if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID)
+ {
+ psHeapFuncs = &gsHostPhysHeapFuncs;
+ }
+ else
+ {
+ psHeapFuncs = &gsLocalPhysHeapFuncs;
+ }
+
+ eError = CreateCardGPUHeaps(psSysData, pasPhysHeaps, psHeapFuncs);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ eError = CreateCardEXTHeap(psSysData, pasPhysHeaps, psHeapFuncs);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+InitHostHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps)
+{
+ PVR_UNREFERENCED_PARAMETER(psSysData);
+
+ pasPhysHeaps[PHY_HEAP_SYSTEM].eType = PHYS_HEAP_TYPE_UMA;
+ pasPhysHeaps[PHY_HEAP_SYSTEM].pszPDumpMemspaceName = "SYSMEM";
+ pasPhysHeaps[PHY_HEAP_SYSTEM].psMemFuncs = &gsHostPhysHeapFuncs;
+ pasPhysHeaps[PHY_HEAP_SYSTEM].ui32UsageFlags = PHYS_HEAP_USAGE_CPU_LOCAL;
+
+ PVR_DPF((PVR_DBG_WARNING,
+ "Initialising CPU_LOCAL UMA Host PhysHeaps with memory mode: %d",
+ psSysData->pdata->mem_mode));
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PhysHeapsInit(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps,
+ void *pvPrivData, IMG_UINT32 ui32NumHeaps)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+
+ eError = InitLocalHeaps(psSysData, pasPhysHeaps);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID)
+ {
+ eError = InitHostHeaps(psSysData, pasPhysHeaps);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ /* Initialise fields that don't change between memory modes.
+ * Fix up heap IDs. This is needed for multi-testchip systems to
+ * ensure the heap IDs are unique as this is what Services expects.
+ */
+ for (i = 0; i < ui32NumHeaps; i++)
+ {
+ pasPhysHeaps[i].hPrivData = pvPrivData;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PhysHeapsCreate(const SYS_DATA *psSysData, PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PHYS_HEAP_CONFIG **ppasPhysHeapsOut,
+ IMG_UINT32 *puiPhysHeapCountOut)
+{
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32NumHeaps = psSysData->pdata->mem_mode == TC_MEMORY_HYBRID ?
+ PHY_HEAP_HYBRID_NUM : PHY_HEAP_LMA_NUM;
+
+ if (SysRestrictGpuLocalAddPrivateHeap())
+ {
+ ui32NumHeaps++;
+ psDevConfig->bHasNonMappableLocalMemory = IMG_TRUE;
+ }
+ else
+ {
+ psDevConfig->bHasNonMappableLocalMemory = IMG_FALSE;
+ }
+
+ pasPhysHeaps = OSAllocMem(sizeof(*pasPhysHeaps) * ui32NumHeaps);
+ if (!pasPhysHeaps)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ eError = PhysHeapsInit(psSysData, pasPhysHeaps, psDevConfig, ui32NumHeaps);
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMem(pasPhysHeaps);
+ return eError;
+ }
+
+ *ppasPhysHeapsOut = pasPhysHeaps;
+ *puiPhysHeapCountOut = ui32NumHeaps;
+
+ return PVRSRV_OK;
+}
+
+static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ if (psDevConfig->pszVersion)
+ {
+ OSFreeMem(psDevConfig->pszVersion);
+ }
+
+ OSFreeMem(psDevConfig->pasPhysHeaps);
+
+ OSFreeMem(psDevConfig);
+}
+
+static void odinTCDevPhysAddr2DmaAddr(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_DMA_ADDR *psDmaAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_BOOL *pbValid,
+ IMG_UINT32 ui32NumAddr,
+ IMG_BOOL bSparseAlloc)
+{
+ IMG_CPU_PHYADDR sCpuPAddr = {0};
+ IMG_UINT32 ui32Idx;
+
+ /* Fast path */
+ if (!bSparseAlloc)
+ {
+ /* In Odin, DMA address space is the same as host CPU */
+ TCLocalDevPAddrToCpuPAddr(psDevConfig,
+ 1,
+ &sCpuPAddr,
+ psDevPAddr);
+ psDmaAddr->uiAddr = sCpuPAddr.uiAddr;
+ }
+ else
+ {
+ for (ui32Idx = 0; ui32Idx < ui32NumAddr; ui32Idx++)
+ {
+ if (pbValid[ui32Idx])
+ {
+ TCLocalDevPAddrToCpuPAddr(psDevConfig,
+ 1,
+ &sCpuPAddr,
+ &psDevPAddr[ui32Idx]);
+ psDmaAddr[ui32Idx].uiAddr = sCpuPAddr.uiAddr;
+ }
+ else
+ {
+ /* Invalid DMA address marker */
+ psDmaAddr[ui32Idx].uiAddr = ~((IMG_UINT64)0x0);
+ }
+ }
+ }
+}
+
+static void * odinTCgetCDMAChan(PVRSRV_DEVICE_CONFIG *psDevConfig, char *name)
+{
+ struct device* psDev = (struct device*) psDevConfig->pvOSDevice;
+ return tc_dma_chan(psDev->parent, name);
+}
+
+static void odinTCFreeCDMAChan(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ void* channel)
+{
+
+ struct device* psDev = (struct device*) psDevConfig->pvOSDevice;
+ struct dma_chan *chan = (struct dma_chan*) channel;
+
+ tc_dma_chan_free(psDev->parent, chan);
+}
+static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData,
+ PVRSRV_DEVICE_CONFIG **ppsDevConfigOut)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ RGX_DATA *psRGXData;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo;
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ IMG_UINT32 uiPhysHeapCount;
+ PVRSRV_ERROR eError;
+
+ psDevConfig = OSAllocZMem(sizeof(*psDevConfig) +
+ sizeof(*psRGXData) +
+ sizeof(*psRGXTimingInfo));
+ if (!psDevConfig)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psRGXData = IMG_OFFSET_ADDR(psDevConfig, sizeof(*psDevConfig));
+ psRGXTimingInfo = IMG_OFFSET_ADDR(psRGXData, sizeof(*psRGXData));
+
+ eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorFreeDevConfig;
+ }
+
+ /* Setup RGX specific timing data */
+ psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) /
+ tc_core_clock_multiplex(&psSysData->pdev->dev);
+ psRGXTimingInfo->bEnableActivePM = IMG_FALSE;
+ psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE;
+ psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+ /* Set up the RGX data */
+ psRGXData->psRGXTimingInfo = psRGXTimingInfo;
+
+ /* Setup the device config */
+ psDevConfig->pvOSDevice = &psSysData->pdev->dev;
+ psDevConfig->pszName = "tc";
+ psDevConfig->pszVersion = GetDeviceVersionString(psSysData);
+
+ psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start;
+ psDevConfig->ui32RegsSize = resource_size(psSysData->registers);
+
+ psDevConfig->ui32IRQ = TC_INTERRUPT_EXT;
+
+ psDevConfig->pasPhysHeaps = pasPhysHeaps;
+ psDevConfig->ui32PhysHeapCount = uiPhysHeapCount;
+
+ if (psSysData->pdata->baseboard == TC_BASEBOARD_ODIN &&
+ psSysData->pdata->mem_mode == TC_MEMORY_HYBRID)
+ {
+ psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_CPU_LOCAL;
+ }
+ else
+ {
+ psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL;
+ }
+
+ /* Only required for LMA but having this always set shouldn't be a problem */
+ psDevConfig->bDevicePA0IsValid = IMG_TRUE;
+
+ psDevConfig->hDevData = psRGXData;
+ psDevConfig->hSysData = psSysData;
+
+ psDevConfig->pfnSysDevFeatureDepInit = NULL;
+
+#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS)
+ /* Fake DVFS configuration used purely for testing purposes */
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage;
+#endif
+#if defined(SUPPORT_LINUX_DVFS)
+ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = 1000;
+ psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE;
+ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90;
+ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10;
+#endif
+
+ psDevConfig->bHasFBCDCVersion31 = IMG_FALSE;
+
+ /* DMA channel config */
+ psDevConfig->pfnSlaveDMAGetChan = odinTCgetCDMAChan;
+ psDevConfig->pfnSlaveDMAFreeChan = odinTCFreeCDMAChan;
+ psDevConfig->pfnDevPhysAddr2DmaAddr = odinTCDevPhysAddr2DmaAddr;
+ psDevConfig->pszDmaTxChanName = psSysData->pdata->tc_dma_tx_chan_name;
+ psDevConfig->pszDmaRxChanName = psSysData->pdata->tc_dma_rx_chan_name;
+ psDevConfig->bHasDma = true;
+ /* Following two values are expressed in number of bytes */
+ psDevConfig->ui32DmaTransferUnit = 1;
+ psDevConfig->ui32DmaAlignment = 1;
+
+ *ppsDevConfigOut = psDevConfig;
+
+ return PVRSRV_OK;
+
+ErrorFreeDevConfig:
+ OSFreeMem(psDevConfig);
+ return eError;
+}
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ SYS_DATA *psSysData;
+ resource_size_t uiRegistersSize;
+ PVRSRV_ERROR eError;
+ int err = 0;
+
+ PVR_ASSERT(pvOSDevice);
+
+ psSysData = OSAllocZMem(sizeof(*psSysData));
+ if (psSysData == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psSysData->pdev = to_platform_device((struct device *)pvOSDevice);
+ psSysData->pdata = psSysData->pdev->dev.platform_data;
+
+ /*
+ * The device cannot address system memory, so there is no DMA
+ * limitation.
+ */
+ if (psSysData->pdata->mem_mode == TC_MEMORY_LOCAL)
+ {
+ dma_set_mask(pvOSDevice, DMA_BIT_MASK(64));
+ }
+ else
+ {
+ dma_set_mask(pvOSDevice, DMA_BIT_MASK(32));
+ }
+
+ err = tc_enable(psSysData->pdev->dev.parent);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to enable PCI device (%d)", __func__, err));
+ eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+ goto ErrFreeSysData;
+ }
+
+ psSysData->registers = platform_get_resource_byname(psSysData->pdev,
+ IORESOURCE_MEM,
+ "rogue-regs");
+ if (!psSysData->registers)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get Rogue register information",
+ __func__));
+ eError = PVRSRV_ERROR_PCI_REGION_UNAVAILABLE;
+ goto ErrorDevDisable;
+ }
+
+ /* Check the address range is large enough. */
+ uiRegistersSize = resource_size(psSysData->registers);
+ if (uiRegistersSize < SYS_RGX_REG_REGION_SIZE)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Rogue register region isn't big enough (was %pa, required 0x%08x)",
+ __func__, &uiRegistersSize, SYS_RGX_REG_REGION_SIZE));
+
+ eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL;
+ goto ErrorDevDisable;
+ }
+
+ /* Reserve the address range */
+ if (!request_mem_region(psSysData->registers->start,
+ resource_size(psSysData->registers),
+ SYS_RGX_DEV_NAME))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Rogue register memory region not available",
+ __func__));
+ eError = PVRSRV_ERROR_PCI_CALL_FAILED;
+
+ goto ErrorDevDisable;
+ }
+
+ /*
+ * Reset the device as required.
+ */
+ eError = DevReset(psSysData, IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't reset device", __func__));
+ goto ErrorDevDisable;
+ }
+
+ eError = DeviceConfigCreate(psSysData, &psDevConfig);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorReleaseMemRegion;
+ }
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ eError = IonInit(psSysData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise ION", __func__));
+ goto ErrorDeviceConfigDestroy;
+ }
+#endif
+
+ /* Set psDevConfig->pfnSysDevErrorNotify callback */
+ psDevConfig->pfnSysDevErrorNotify = SysRGXErrorNotify;
+
+ *ppsDevConfig = psDevConfig;
+
+ return PVRSRV_OK;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ErrorDeviceConfigDestroy:
+ DeviceConfigDestroy(psDevConfig);
+#endif
+ErrorReleaseMemRegion:
+ release_mem_region(psSysData->registers->start,
+ resource_size(psSysData->registers));
+ErrorDevDisable:
+ tc_disable(psSysData->pdev->dev.parent);
+ErrFreeSysData:
+ OSFreeMem(psSysData);
+ return eError;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ SYS_DATA *psSysData = (SYS_DATA *)psDevConfig->hSysData;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ IonDeinit();
+#endif
+
+ DeviceConfigDestroy(psDevConfig);
+
+ release_mem_region(psSysData->registers->start,
+ resource_size(psSysData->registers));
+ tc_disable(psSysData->pdev->dev.parent);
+
+ OSFreeMem(psSysData);
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+#if defined(TC_APOLLO_TCF5)
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+ return PVRSRV_OK;
+#else
+ SYS_DATA *psSysData = psDevConfig->hSysData;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ u32 tmp = 0;
+ u32 pll;
+
+ PVR_DUMPDEBUG_LOG("------[ rgx_tc system debug ]------");
+
+ if (tc_sys_info(psSysData->pdev->dev.parent, &tmp, &pll))
+ goto err_out;
+
+ if (tmp > 0)
+ PVR_DUMPDEBUG_LOG("Chip temperature: %d degrees C", tmp);
+ PVR_DUMPDEBUG_LOG("PLL status: %x", pll);
+
+ eError = FPGA_SysDebugInfo(psSysData->registers,
+ pfnDumpDebugPrintf,
+ pvDumpDebugFile);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't dump registers from device", __func__));
+ goto err_out;
+ }
+
+err_out:
+ return eError;
+#endif
+}
+
+typedef struct
+{
+ struct device *psDev;
+ int iInterruptID;
+ void *pvData;
+ PFN_LISR pfnLISR;
+} LISR_DATA;
+
+static void TCInterruptHandler(void* pvData)
+{
+ LISR_DATA *psLISRData = pvData;
+ psLISRData->pfnLISR(psLISRData->pvData);
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData)
+{
+ SYS_DATA *psSysData = (SYS_DATA *)hSysData;
+ LISR_DATA *psLISRData;
+ PVRSRV_ERROR eError;
+ int err;
+
+ if (ui32IRQ != TC_INTERRUPT_EXT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: No device matching IRQ %d", __func__, ui32IRQ));
+ return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ }
+
+ psLISRData = OSAllocZMem(sizeof(*psLISRData));
+ if (!psLISRData)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_out;
+ }
+
+ psLISRData->pfnLISR = pfnLISR;
+ psLISRData->pvData = pvData;
+ psLISRData->iInterruptID = ui32IRQ;
+ psLISRData->psDev = psSysData->pdev->dev.parent;
+
+ err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, TCInterruptHandler, psLISRData);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err));
+ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ goto err_free_data;
+ }
+
+ err = tc_enable_interrupt(psLISRData->psDev, psLISRData->iInterruptID);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: tc_enable_interrupt() failed (%d)", __func__, err));
+ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR;
+ goto err_unset_interrupt_handler;
+ }
+
+ *phLISRData = psLISRData;
+ eError = PVRSRV_OK;
+
+ PVR_TRACE(("Installed device LISR " IMG_PFN_FMTSPEC " to irq %u", pfnLISR, ui32IRQ));
+
+err_out:
+ return eError;
+err_unset_interrupt_handler:
+ tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL);
+err_free_data:
+ OSFreeMem(psLISRData);
+ goto err_out;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+ LISR_DATA *psLISRData = (LISR_DATA *) hLISRData;
+ int err;
+
+ err = tc_disable_interrupt(psLISRData->psDev, psLISRData->iInterruptID);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: tc_disable_interrupt() failed (%d)", __func__, err));
+ }
+
+ err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err));
+ }
+
+ PVR_TRACE(("Uninstalled device LISR " IMG_PFN_FMTSPEC " from irq %u", psLISRData->pfnLISR, psLISRData->iInterruptID));
+
+ OSFreeMem(psLISRData);
+
+ return PVRSRV_OK;
+}
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#if defined(VIRTUAL_PLATFORM)
+#define MAX_HW_TIME_US (240000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (120000)
+#else
+#define MAX_HW_TIME_US (20000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (10000) // (1500)
+#endif
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT (10000)
+
+#define SYS_RGX_DEV_NAME "tc_rogue"
+
+#endif /* !defined(__SYSINFO_H__) */
--- /dev/null
+########################################################################### ###
+#@File
+#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License Dual MIT/GPLv2
+#
+# The contents of this file are subject to the MIT license as set out below.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+#
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+#
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+#
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+PVRSRVKM_NAME = $(PVRSRV_MODNAME)
+
+$(PVRSRVKM_NAME)-y += services/system/volcanic/$(PVR_SYSTEM)/sysconfig.o \
+ services/system/common/sysconfig_cmn.o \
+ services/server/common/vmm_pvz_client.o \
+ services/server/common/vmm_pvz_server.o \
+ services/server/common/vz_vmm_pvz.o \
+ services/server/common/vz_vmm_vm.o \
+ services/system/volcanic/common/vmm_type_$(VMM_TYPE).o
+
+ifeq ($(SUPPORT_ION),1)
+$(PVRSRVKM_NAME)-y += services/system/common/env/linux/ion_support_generic.o
+endif
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+#include "syscommon.h"
+#include "vz_vmm_pvz.h"
+#include "allocmem.h"
+#include "sysinfo.h"
+#include "sysconfig.h"
+#include "physheap.h"
+#include "pvr_debug.h"
+#if defined(SUPPORT_ION)
+#include "ion_support.h"
+#endif
+#if defined(__linux__)
+#include <linux/dma-mapping.h>
+#endif
+/*
+ * In systems that support trusted device address protection, there are three
+ * physical heaps from which pages should be allocated:
+ * - one heap for normal allocations
+ * - one heap for allocations holding META code memory
+ * - one heap for allocations holding secured DRM data
+ */
+
+#if defined(SUPPORT_VALIDATION) && defined(PDUMP)
+#include "validation_soc.h"
+#endif
+
+/* Change to test CPU_LOCAL sys layers*/
+#define UMA_HEAP_USAGE_FLAG PHYS_HEAP_USAGE_GPU_LOCAL
+//#define UMA_HEAP_USAGE_FLAG PHYS_HEAP_USAGE_CPU_LOCAL
+#define UMA_DEFAULT_HEAP PVRSRV_PHYS_HEAP_GPU_LOCAL
+//#define UMA_DEFAULT_HEAP PVRSRV_PHYS_HEAP_CPU_LOCAL
+
+/*
+ CPU to Device physical address translation
+*/
+static
+void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+ /* Optimise common case */
+ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+ }
+ }
+}
+
+/*
+ Device to CPU physical address translation
+*/
+static
+void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+ /* Optimise common case */
+ psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr;
+ }
+ }
+}
+
+static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs =
+{
+ /* pfnCpuPAddrToDevPAddr */
+ UMAPhysHeapCpuPAddrToDevPAddr,
+ /* pfnDevPAddrToCpuPAddr */
+ UMAPhysHeapDevPAddrToCpuPAddr,
+};
+
+static PVRSRV_ERROR PhysHeapsCreate(PHYS_HEAP_CONFIG **ppasPhysHeapsOut,
+ IMG_UINT32 *puiPhysHeapCountOut)
+{
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ IMG_UINT32 ui32NextHeapID = 0;
+ IMG_UINT32 uiHeapCount = 1;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ uiHeapCount += 2;
+#elif defined(SUPPORT_DEDICATED_FW_MEMORY)
+ uiHeapCount += 1;
+#endif
+ uiHeapCount += !PVRSRV_VZ_MODE_IS(NATIVE) ? 1:0;
+
+ pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * uiHeapCount);
+ if (!pasPhysHeaps)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "SYSMEM";
+ pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA;
+ pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs;
+ pasPhysHeaps[ui32NextHeapID].ui32UsageFlags = UMA_HEAP_USAGE_FLAG;
+ ui32NextHeapID++;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "TDFWMEM";
+ pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA;
+ pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs;
+ pasPhysHeaps[ui32NextHeapID].ui32UsageFlags =
+ PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA;
+ ui32NextHeapID++;
+
+ pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "TDSECBUFMEM";
+ pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA;
+ pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs;
+ pasPhysHeaps[ui32NextHeapID].ui32UsageFlags =
+ PHYS_HEAP_USAGE_GPU_SECURE;
+ ui32NextHeapID++;
+
+#elif defined(SUPPORT_DEDICATED_FW_MEMORY)
+ pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "DEDICATEDFWMEM";
+ pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA;
+ pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs;
+ pasPhysHeaps[ui32NextHeapID].ui32UsageFlags =
+ PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA;
+ ui32NextHeapID++;
+#endif
+
+ if (! PVRSRV_VZ_MODE_IS(NATIVE))
+ {
+ pasPhysHeaps[ui32NextHeapID].pszPDumpMemspaceName = "SYSMEM";
+ pasPhysHeaps[ui32NextHeapID].eType = PHYS_HEAP_TYPE_UMA;
+ pasPhysHeaps[ui32NextHeapID].psMemFuncs = &gsPhysHeapFuncs;
+ pasPhysHeaps[ui32NextHeapID].ui32UsageFlags = PHYS_HEAP_USAGE_FW_SHARED;
+ ui32NextHeapID++;
+ }
+
+ *ppasPhysHeapsOut = pasPhysHeaps;
+ *puiPhysHeapCountOut = uiHeapCount;
+
+ return PVRSRV_OK;
+}
+
+static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps)
+{
+ OSFreeMem(pasPhysHeaps);
+}
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ RGX_DATA *psRGXData;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo;
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ IMG_UINT32 uiPhysHeapCount;
+ PVRSRV_ERROR eError;
+
+#if defined(__linux__)
+ dma_set_mask(pvOSDevice, DMA_BIT_MASK(40));
+#endif
+
+ psDevConfig = OSAllocZMem(sizeof(*psDevConfig) +
+ sizeof(*psRGXData) +
+ sizeof(*psRGXTimingInfo));
+ if (!psDevConfig)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psRGXData = IMG_OFFSET_ADDR(psDevConfig, sizeof(*psDevConfig));
+ psRGXTimingInfo = IMG_OFFSET_ADDR(psRGXData, sizeof(*psRGXData));
+
+ eError = PhysHeapsCreate(&pasPhysHeaps, &uiPhysHeapCount);
+ if (eError)
+ {
+ goto ErrorFreeDevConfig;
+ }
+
+ /* Setup RGX specific timing data */
+ psRGXTimingInfo->ui32CoreClockSpeed = RGX_NOHW_CORE_CLOCK_SPEED;
+ psRGXTimingInfo->bEnableActivePM = IMG_FALSE;
+ psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE;
+ psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS;
+
+ /* Set up the RGX data */
+ psRGXData->psRGXTimingInfo = psRGXTimingInfo;
+
+ /* Setup the device config */
+ psDevConfig->pvOSDevice = pvOSDevice;
+ psDevConfig->pszName = "nohw";
+ psDevConfig->pszVersion = NULL;
+
+ /* Device setup information */
+ psDevConfig->sRegsCpuPBase.uiAddr = 0x00f00baa;
+ psDevConfig->ui32IRQ = 0x00000bad;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS)
+ psDevConfig->ui32RegsSize = RGX_VIRTUALISATION_REG_SIZE_PER_OS * GPUVIRT_VALIDATION_NUM_OS;
+#else
+ psDevConfig->ui32RegsSize = 0x4000;
+#endif
+
+ psDevConfig->pasPhysHeaps = pasPhysHeaps;
+ psDevConfig->ui32PhysHeapCount = uiPhysHeapCount;
+
+ psDevConfig->eDefaultHeap = UMA_DEFAULT_HEAP;
+
+ /* No power management on no HW system */
+ psDevConfig->pfnPrePowerState = NULL;
+ psDevConfig->pfnPostPowerState = NULL;
+
+ /* No clock frequency either */
+ psDevConfig->pfnClockFreqGet = NULL;
+
+ psDevConfig->hDevData = psRGXData;
+
+ psDevConfig->bDevicePA0IsValid = IMG_FALSE;
+ psDevConfig->pfnSysDevFeatureDepInit = NULL;
+
+ /* Setup other system specific stuff */
+#if defined(SUPPORT_ION)
+ IonInit(NULL);
+#endif
+
+ /* Pdump validation system registers */
+#if defined(SUPPORT_VALIDATION) && defined(PDUMP)
+ {
+ PVRSRV_DEVICE_NODE* psDeviceNode;
+
+ /* Pretend we are calling for device 0 for now
+ (sDeviceNode.sDevId.ui32InternalID is 0 above)
+ until this code can be moved elsewhere.
+ */
+ psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode));
+ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem");
+
+ PVRSRVConfigureSysCtrl(psDeviceNode, NULL, PDUMP_FLAGS_CONTINUOUS);
+#if defined(SUPPORT_SECURITY_VALIDATION)
+ PVRSRVConfigureTrustedDevice(psDeviceNode, NULL, PDUMP_FLAGS_CONTINUOUS);
+#endif
+ OSFreeMem(psDeviceNode);
+ }
+#endif
+
+ psDevConfig->bHasFBCDCVersion31 = IMG_TRUE;
+
+ /* Set psDevConfig->pfnSysDevErrorNotify callback */
+ psDevConfig->pfnSysDevErrorNotify = SysRGXErrorNotify;
+
+ *ppsDevConfig = psDevConfig;
+
+ return PVRSRV_OK;
+
+ErrorFreeDevConfig:
+ OSFreeMem(psDevConfig);
+ return eError;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+#if defined(SUPPORT_ION)
+ IonDeinit();
+#endif
+
+ PhysHeapsDestroy(psDevConfig->pasPhysHeaps);
+ OSFreeMem(psDevConfig);
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData)
+{
+ PVR_UNREFERENCED_PARAMETER(hSysData);
+ PVR_UNREFERENCED_PARAMETER(ui32IRQ);
+ PVR_UNREFERENCED_PARAMETER(pszName);
+ PVR_UNREFERENCED_PARAMETER(pfnLISR);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+ PVR_UNREFERENCED_PARAMETER(phLISRData);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+ PVR_UNREFERENCED_PARAMETER(hLISRData);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+ return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (sysconfig.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+
+#if !defined(SYSCCONFIG_H)
+#define SYSCCONFIG_H
+
+
+#define RGX_NOHW_CORE_CLOCK_SPEED 100000000
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (0)
+
+/*****************************************************************************
+ * system specific data structures
+ *****************************************************************************/
+
+#endif /* SYSCCONFIG_H */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(SYSINFO_H)
+#define SYSINFO_H
+
+/*!< System specific poll/timeout details */
+#define MAX_HW_TIME_US (500000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000)
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT (10000)
+
+#if defined(__linux__)
+#define SYS_RGX_DEV_NAME "rgxnohw"
+#endif
+
+#endif /* !defined(SYSINFO_H) */
--- /dev/null
+########################################################################### ###
+#@File
+#@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+#@License Dual MIT/GPLv2
+#
+# The contents of this file are subject to the MIT license as set out below.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# Alternatively, the contents of this file may be used under the terms of
+# the GNU General Public License Version 2 ("GPL") in which case the provisions
+# of GPL are applicable instead of those above.
+#
+# If you wish to allow use of your version of this file only under the terms of
+# GPL, and not to allow others to use your version of this file under the terms
+# of the MIT license, indicate your decision by deleting the provisions above
+# and replace them with the notice and other provisions required by GPL as set
+# out in the file called "GPL-COPYING" included in this distribution. If you do
+# not delete the provisions above, a recipient may use your version of this file
+# under the terms of either the MIT license or GPL.
+#
+# This License is also included in this distribution in the file called
+# "MIT-COPYING".
+#
+# EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+# PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+# PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+### ###########################################################################
+PVRSRVKM_NAME = $(PVRSRV_MODNAME)
+
+$(PVRSRVKM_NAME)-y += services/system/$(PVR_SYSTEM)/sysconfig.o \
+ services/system/$(PVR_SYSTEM)/vz_validation.o \
+ services/system/common/env/linux/interrupt_support.o \
+
+$(PVRSRVKM_NAME)-y += \
+ services/server/common/vmm_pvz_client.o \
+ services/server/common/vmm_pvz_server.o \
+ services/server/common/vz_vmm_pvz.o \
+ services/server/common/vz_vmm_vm.o \
+
+ifeq ($(PVR_ARCH),rogue)
+ $(PVRSRVKM_NAME)-y += \
+ services/system/rogue/common/env/linux/dma_support.o \
+ services/system/rogue/common/vmm_type_$(VMM_TYPE).o
+ else
+ $(PVRSRVKM_NAME)-y += \
+ services/system/volcanic/common/env/linux/dma_support.o \
+ services/system/volcanic/common/vmm_type_$(VMM_TYPE).o
+endif
--- /dev/null
+/*************************************************************************/ /*!
+@File sysconfig.c
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "img_defs.h"
+#include "physheap.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+#include "interrupt_support.h"
+#include "sysconfig.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "vz_validation.h"
+#endif
+
+#define VZ_EXAMPLE_SYSTEM_NAME "vz_example"
+
+static void SysCpuPAToDevPA(IMG_HANDLE hPrivData, IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPA, IMG_CPU_PHYADDR *psCpuPA);
+static void SysDevPAToCpuPA(IMG_HANDLE hPrivData, IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPA, IMG_DEV_PHYADDR *psDevPA);
+
+typedef struct _SYS_DATA_
+{
+ IMG_HANDLE hSysLISRData;
+ PFN_LISR pfnDeviceLISR;
+ void *pvDeviceLISRData;
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ IMG_HANDLE hValidationData;
+#endif
+} SYS_DATA;
+
+typedef enum _PHYS_HEAP_IDX_
+{
+ PHYS_HEAP_IDX_SYSMEM,
+ PHYS_HEAP_IDX_FIRMWARE,
+#if defined(RGX_PREMAP_FW_HEAPS)
+ PHYS_HEAP_IDX_FW_PT,
+#endif
+ PHYS_HEAP_IDX_COUNT,
+} PHYS_HEAP_IDX;
+
+static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = {
+ SysCpuPAToDevPA, /* pfnCpuPAddrToDevPAddr */
+ SysDevPAToCpuPA, /* pfnDevPAddrToCpuPAddr */
+};
+
+/*
+ * CPU Physical to Device Physical address translation:
+ * Template implementation below assumes CPU and GPU views of physical memory are identical
+ */
+static void SysCpuPAToDevPA(IMG_HANDLE hPrivData, IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPA, IMG_CPU_PHYADDR *psCpuPA)
+{
+ /* Optimise common case */
+ psDevPA[0].uiAddr = psCpuPA[0].uiAddr;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psDevPA[ui32Idx].uiAddr = psCpuPA[ui32Idx].uiAddr;
+ }
+ }
+
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+}
+
+/*
+ * Device Physical to CPU Physical address translation:
+ * Template implementation below assumes CPU and GPU views of physical memory are identical
+ */
+static void SysDevPAToCpuPA(IMG_HANDLE hPrivData, IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPA, IMG_DEV_PHYADDR *psDevPA)
+{
+ /* Optimise common case */
+ psCpuPA[0].uiAddr = psDevPA[0].uiAddr;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psCpuPA[ui32Idx].uiAddr = psDevPA[ui32Idx].uiAddr;
+ }
+ }
+
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+}
+
+static IMG_BOOL SystemISRHandler(void *pvData)
+{
+ SYS_DATA *psSysData = pvData;
+ IMG_BOOL bHandled;
+
+ /* Any special system interrupt handling goes here */
+
+ bHandled = psSysData->pfnDeviceLISR(psSysData->pvDeviceLISRData);
+ return bHandled;
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData)
+{
+ SYS_DATA *psSysData = (SYS_DATA *)hSysData;
+ PVRSRV_ERROR eError;
+
+ if (psSysData->hSysLISRData)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: ISR for %s already installed!", __func__, pszName));
+ return PVRSRV_ERROR_CANT_REGISTER_CALLBACK;
+ }
+
+ /* Wrap the device LISR */
+ psSysData->pfnDeviceLISR = pfnLISR;
+ psSysData->pvDeviceLISRData = pvData;
+
+ eError = OSInstallSystemLISR(&psSysData->hSysLISRData, ui32IRQ, pszName,
+ SystemISRHandler, psSysData,
+ SYS_IRQ_FLAG_TRIGGER_DEFAULT);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ *phLISRData = psSysData;
+
+ PVR_LOG(("Installed device LISR %s on IRQ %d", pszName, ui32IRQ));
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+ SYS_DATA *psSysData = (SYS_DATA *)hLISRData;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psSysData);
+
+ eError = OSUninstallSystemLISR(psSysData->hSysLISRData);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ /* clear interrupt data */
+ psSysData->pfnDeviceLISR = NULL;
+ psSysData->pvDeviceLISRData = NULL;
+ psSysData->hSysLISRData = NULL;
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR SysPrePower(IMG_HANDLE hSysData,
+ PVRSRV_SYS_POWER_STATE eNewPowerState,
+ PVRSRV_SYS_POWER_STATE eCurrentPowerState,
+ PVRSRV_POWER_FLAGS ePwrFlags)
+{
+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+ PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", PVRSRV_OK);
+
+ PVR_UNREFERENCED_PARAMETER(hSysData);
+ PVR_UNREFERENCED_PARAMETER(ePwrFlags);
+
+ /* on powering down */
+ if (eNewPowerState != PVRSRV_SYS_POWER_STATE_ON)
+ {
+ IMG_CPU_PHYADDR sSoCRegBase = {SOC_REGBANK_BASE};
+
+ void* pvSocRegs = OSMapPhysToLin(sSoCRegBase,
+ SOC_REGBANK_SIZE,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+ OSWriteHWReg32(pvSocRegs, POW_DOMAIN_DISABLE_REG, POW_DOMAIN_GPU);
+ OSUnMapPhysToLin(pvSocRegs, SOC_REGBANK_SIZE);
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR SysPostPower(IMG_HANDLE hSysData,
+ PVRSRV_SYS_POWER_STATE eNewPowerState,
+ PVRSRV_SYS_POWER_STATE eCurrentPowerState,
+ PVRSRV_POWER_FLAGS ePwrFlags)
+{
+ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK);
+ PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", PVRSRV_OK);
+
+ PVR_UNREFERENCED_PARAMETER(hSysData);
+ PVR_UNREFERENCED_PARAMETER(ePwrFlags);
+
+ /* on powering up */
+ if (eCurrentPowerState != PVRSRV_SYS_POWER_STATE_ON)
+ {
+ IMG_CPU_PHYADDR sSoCRegBase = {SOC_REGBANK_BASE};
+
+ void* pvSocRegs = OSMapPhysToLin(sSoCRegBase,
+ SOC_REGBANK_SIZE,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+ OSWriteHWReg32(pvSocRegs, POW_DOMAIN_ENABLE_REG, POW_DOMAIN_GPU);
+ OSUnMapPhysToLin(pvSocRegs, SOC_REGBANK_SIZE);
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_SYS_POWER_STATE RGXGpuDomainPower(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ IMG_CPU_PHYADDR sSoCRegBase = {SOC_REGBANK_BASE};
+
+ void* pvSocRegs = OSMapPhysToLin(sSoCRegBase,
+ SOC_REGBANK_SIZE,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+ IMG_UINT32 ui32SocDomainPower = OSReadHWReg32(pvSocRegs, POW_DOMAIN_STATUS_REG);
+
+ bool bGpuDomainIsPowered = BITMASK_HAS(ui32SocDomainPower, POW_DOMAIN_GPU);
+
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ OSUnMapPhysToLin(pvSocRegs, SOC_REGBANK_SIZE);
+
+ return (bGpuDomainIsPowered) ? PVRSRV_SYS_POWER_STATE_ON : PVRSRV_SYS_POWER_STATE_OFF;
+}
+
+static void SysDevFeatureDepInit(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT64 ui64Features)
+{
+ PVR_UNREFERENCED_PARAMETER(ui64Features);
+ psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE;
+}
+
+static PVRSRV_DRIVER_MODE GetDriverMode(struct platform_device *psDev)
+{
+ PVRSRV_DRIVER_MODE eDriverMode;
+
+#if (RGX_NUM_DRIVERS_SUPPORTED > 1)
+ if (of_property_read_u32(psDev->dev.of_node, "vz-mode", (IMG_UINT32*) &eDriverMode))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Cannot retrieve driver mode from Device Tree. "
+ "Default to native mode.", __func__));
+ eDriverMode = DRIVER_MODE_NATIVE;
+ }
+#else
+ eDriverMode = DRIVER_MODE_NATIVE;
+#endif
+
+ return eDriverMode;
+}
+
+static PVRSRV_ERROR DeviceConfigCreate(void *pvOSDevice,
+ PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ struct platform_device *psDev;
+ struct resource *dev_res = NULL;
+ int dev_irq;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ RGX_DATA *psRGXData;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo;
+ SYS_DATA *psSysData;
+ PHYS_HEAP_CONFIG *psPhysHeapConfig;
+
+ psDevConfig = OSAllocZMem(sizeof(*psDevConfig) +
+ sizeof(*psRGXData) +
+ sizeof(*psRGXTimingInfo) +
+ sizeof(*psSysData) +
+ sizeof(*psPhysHeapConfig) * PHYS_HEAP_IDX_COUNT);
+ if (!psDevConfig)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig));
+ psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData));
+ psSysData = (SYS_DATA *)((IMG_CHAR *)psRGXTimingInfo + sizeof(*psRGXTimingInfo));
+ psPhysHeapConfig = (PHYS_HEAP_CONFIG *)((IMG_CHAR *)psSysData + sizeof(*psSysData));
+
+ psDev = to_platform_device((struct device *)pvOSDevice);
+
+ dma_set_mask(pvOSDevice, DMA_BIT_MASK(40));
+
+ dev_irq = platform_get_irq(psDev, 0);
+ if (dev_irq < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_irq failed (%d)", __func__, -dev_irq));
+ eError = PVRSRV_ERROR_INVALID_DEVICE;
+ return eError;
+ }
+
+ dev_res = platform_get_resource(psDev, IORESOURCE_MEM, 0);
+ if (dev_res == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_resource failed", __func__));
+ eError = PVRSRV_ERROR_INVALID_DEVICE;
+ return eError;
+ }
+
+ /* Device Setup */
+ psDevConfig->pvOSDevice = pvOSDevice;
+ psDevConfig->pszName = VZ_EXAMPLE_SYSTEM_NAME;
+ psDevConfig->pszVersion = NULL;
+
+ /* Device setup information */
+ psDevConfig->sRegsCpuPBase.uiAddr = dev_res->start;
+ psDevConfig->ui32IRQ = dev_irq;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION_MTS)
+ psDevConfig->ui32RegsSize = RGX_VIRTUALISATION_REG_SIZE_PER_OS * GPUVIRT_VALIDATION_NUM_OS;
+#else
+ psDevConfig->ui32RegsSize = (unsigned int)(dev_res->end - dev_res->start);
+#endif
+
+ PVR_ASSERT(psDevConfig->ui32RegsSize >= RGX_VIRTUALISATION_REG_SIZE_PER_OS);
+
+ /* Power management */
+ psDevConfig->pfnPrePowerState = SysPrePower;
+ psDevConfig->pfnPostPowerState = SysPostPower;
+ psDevConfig->pfnGpuDomainPower = PVRSRV_VZ_MODE_IS(GUEST) ? NULL : RGXGpuDomainPower;
+
+ /* Setup RGX specific timing data */
+ psRGXTimingInfo->ui32CoreClockSpeed = DEFAULT_CLOCK_RATE;
+ psRGXTimingInfo->bEnableActivePM = IMG_FALSE;
+ psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE;
+ psRGXTimingInfo->ui32ActivePMLatencyms = 0;
+
+ /* Setup RGX specific data */
+ psRGXData->psRGXTimingInfo = psRGXTimingInfo;
+
+ /* Minimal configuration */
+ psDevConfig->pfnClockFreqGet = NULL;
+ psDevConfig->hDevData = psRGXData;
+ psDevConfig->hSysData = psSysData;
+ psDevConfig->pfnSysDevFeatureDepInit = SysDevFeatureDepInit;
+ psDevConfig->bHasFBCDCVersion31 = IMG_FALSE;
+
+ psDevConfig->pasPhysHeaps = psPhysHeapConfig;
+
+ /* If driver mode is not overridden by the apphint, set it here */
+ if (!psPVRSRVData->bForceApphintDriverMode)
+ {
+ psPVRSRVData->eDriverMode = GetDriverMode(psDev);
+ }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ psDevConfig->pfnSysDevVirtInit = SysInitValidation;
+
+ eError = CreateMPUWatchdogThread(&psSysData->hValidationData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Create MPU watchdog thread failed.", __func__));
+ OSFreeMem(psDevConfig);
+ return eError;
+ }
+#endif
+
+ return eError;
+}
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || defined(SUPPORT_GPUVIRT_VALIDATION)
+/* Obtain the IPA of the carveout range reserved for this VM */
+static IMG_UINT64 GetFwCarveoutBase(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ struct platform_device *psDev = to_platform_device((struct device *)psDevConfig->pvOSDevice);
+ IMG_UINT64 ui64BaseAddress;
+
+ if (of_property_read_u64(psDev->dev.of_node, "fw-carveout", &ui64BaseAddress))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Cannot retrieve firmware carveout address from Device Tree."
+ " Using default Base Address: 0x%llX",
+ __func__, FW_CARVEOUT_IPA_BASE));
+ ui64BaseAddress = FW_CARVEOUT_IPA_BASE;
+ }
+
+ return ui64BaseAddress;
+}
+
+#if defined(RGX_PREMAP_FW_HEAPS)
+static IMG_UINT64 GetFwPageTableCarveoutBase(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ struct platform_device *psDev = to_platform_device((struct device *)psDevConfig->pvOSDevice);
+ IMG_UINT64 ui64BaseAddress;
+
+ if (of_property_read_u64(psDev->dev.of_node, "fw-pt-carveout", &ui64BaseAddress))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Cannot retrieve firmware page table carveout address"
+ " from Device Tree. Using default Base Address: 0x%llX",
+ __func__, FW_PT_CARVEOUT_IPA_BASE));
+ ui64BaseAddress = FW_PT_CARVEOUT_IPA_BASE;
+ }
+
+ return ui64BaseAddress;
+}
+#endif
+#endif
+
+static PVRSRV_ERROR PhysHeapCfgCreate(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ IMG_CPU_PHYADDR sCpuBase;
+ IMG_DEV_PHYADDR sDeviceBase;
+ PHYS_HEAP_CONFIG *psPhysHeapConfig = psDevConfig->pasPhysHeaps;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ /*
+ * Validation-only driver
+ * This driver mode is designed to exercise the system's 2nd stage
+ * memory mapping or protection mechanisms without having a fully
+ * virtualized system. It is intended to run on pre-production
+ * emulation devices to validate the correct functioning of VZ
+ * hardware.
+ * It uses a native driver that relies on LMA memory.
+ * The driver partitions its assigned LMA range into equal parts,
+ * one for each RGX_NUM_DRIVERS_SUPPORTED. Each OSID partition will be
+ * protected by the 2nd stage memory access control device and
+ * the OSID based access can be tested according to the steps
+ * described in the "GPU Virtualization Validation" doc.
+ */
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].pszPDumpMemspaceName = "SYSMEM";
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].eType = PHYS_HEAP_TYPE_UMA;
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].psMemFuncs = &gsPhysHeapFuncs;
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].hPrivData = NULL;
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL;
+
+ sCpuBase.uiAddr = GetFwCarveoutBase(psDevConfig);
+ SysCpuPAToDevPA(NULL, 1, &sDeviceBase, &sCpuBase);
+
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].eType = PHYS_HEAP_TYPE_LMA;
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].sStartAddr = sCpuBase;
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].sCardBase = sDeviceBase;
+
+ psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL;
+ psDevConfig->ui32PhysHeapCount = 1;
+#else
+ /* Virtualization driver */
+
+ /* Heap configuration for general use */
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].pszPDumpMemspaceName = "SYSMEM";
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].eType = PHYS_HEAP_TYPE_UMA;
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].psMemFuncs = &gsPhysHeapFuncs;
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].hPrivData = NULL;
+ psPhysHeapConfig[PHYS_HEAP_IDX_SYSMEM].ui32UsageFlags = PHYS_HEAP_USAGE_GPU_LOCAL;
+
+ /* Heap configuration for memory shared with the firmware */
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].pszPDumpMemspaceName = "SYSMEM_FW";
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].psMemFuncs = &gsPhysHeapFuncs;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].hPrivData = NULL;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].ui32UsageFlags = PHYS_HEAP_USAGE_FW_SHARED;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE;
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+ {
+ /*
+ * In a static memory VZ setup, the size and base addresses of
+ * all Host and Guest drivers' Firmware heaps are laid out
+ * consecutively in a physically contiguous memory range known
+ * in advance by the Host driver.
+ *
+ * During the Host driver initialisation, it maps the entire range
+ * into the Firmware's virtual address space. No other mapping
+ * operations into the Firmware's VA space are needed after this.
+ * Guest driver must know only the base address of the range
+ * assign to it during system planning stage.
+ *
+ * The system integrator must ensure that:
+ * - physically contiguous RAM region used as a Firmware heap
+ * is not managed by any OS or Hypervisor (a carveout)
+ * - Host driver must come online before any Guest drivers
+ * - Host driver sets up the Firmware before Guests submits work
+ */
+
+ sCpuBase.uiAddr = GetFwCarveoutBase(psDevConfig);
+ SysCpuPAToDevPA(NULL, 1, &sDeviceBase, &sCpuBase);
+
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].eType = PHYS_HEAP_TYPE_LMA;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].sStartAddr = sCpuBase;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].sCardBase = sDeviceBase;
+ }
+#else
+ /* Dynamic Firmware heap allocation */
+ if (PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ /*
+ * Guest drivers must provide a physically contiguous memory
+ * range to the Host via a PVZ call to have it mapped into
+ * the Firmware's address space. Guest drivers use the OS
+ * kernel's DMA/CMA allocator to obtain a DMA buffer to be
+ * used as a firmware heap. This memory will be managed
+ * internally by the Guest driver after the heap is created.
+ */
+ DMA_ALLOC *psDmaAlloc = OSAllocZMem(sizeof(DMA_ALLOC));
+
+ eError = (psDmaAlloc == NULL) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_OK;
+ if (eError == PVRSRV_OK)
+ {
+ psDmaAlloc->pvOSDevice = psDevConfig->pvOSDevice;
+ psDmaAlloc->ui64Size = RGX_FIRMWARE_RAW_HEAP_SIZE;
+
+ eError = SysDmaAllocMem(psDmaAlloc);
+ if (eError == PVRSRV_OK)
+ {
+ eError = SysDmaRegisterForIoRemapping(psDmaAlloc);
+
+ if (eError == PVRSRV_OK)
+ {
+ sCpuBase.uiAddr = psDmaAlloc->sBusAddr.uiAddr;
+ SysCpuPAToDevPA(NULL, 1, &sDeviceBase, &sCpuBase);
+
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].eType = PHYS_HEAP_TYPE_DMA;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].hPrivData = psDmaAlloc;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].sStartAddr = sCpuBase;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].sCardBase = sDeviceBase;
+ }
+ else
+ {
+ OSFreeMem(psDmaAlloc);
+ SysDmaFreeMem(psDmaAlloc);
+ }
+ }
+ else
+ {
+ OSFreeMem(psDmaAlloc);
+ }
+ }
+ }
+ else
+ {
+ /*
+ * The Host or Native driver uses memory managed by
+ * the kernel on a page granularity and creates on-demand
+ * mappings into the Firmware's address space.
+ */
+ psPhysHeapConfig[PHYS_HEAP_IDX_FIRMWARE].eType = PHYS_HEAP_TYPE_UMA;
+ }
+#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) && defined(RGX_PREMAP_FW_HEAPS)
+ if (!PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ sCpuBase.uiAddr = GetFwPageTableCarveoutBase(psDevConfig);
+ SysCpuPAToDevPA(NULL, 1, &sDeviceBase, &sCpuBase);
+
+ psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].pszPDumpMemspaceName = "SYSMEM_FW_PT";
+ psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].psMemFuncs = &gsPhysHeapFuncs;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].hPrivData = NULL;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].ui32UsageFlags = PHYS_HEAP_USAGE_FW_PREMAP_PT;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].eType = PHYS_HEAP_TYPE_LMA;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].uiSize = RGX_FIRMWARE_MAX_PAGETABLE_SIZE;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].sStartAddr = sCpuBase;
+ psPhysHeapConfig[PHYS_HEAP_IDX_FW_PT].sCardBase = sDeviceBase;
+ }
+#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */
+
+ /* Device's physical heaps */
+ psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL;
+ psDevConfig->ui32PhysHeapCount = PHYS_HEAP_IDX_COUNT;
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+ return eError;
+}
+
+static void PhysHeapCfgDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ SYS_DATA *psSysData = (SYS_DATA*)psDevConfig->hSysData;
+
+ PVR_ASSERT(psSysData);
+#endif
+
+#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS)
+ if (PVRSRV_VZ_MODE_IS(GUEST))
+ {
+ DMA_ALLOC *psDmaAlloc = psDevConfig->pasPhysHeaps[PHYS_HEAP_IDX_FIRMWARE].hPrivData;
+
+ SysDmaDeregisterForIoRemapping(psDmaAlloc);
+ SysDmaFreeMem(psDmaAlloc);
+ OSFreeMem(psDmaAlloc);
+ }
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ DestroyMPUWatchdogThread(psSysData->hValidationData);
+#endif
+}
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ PVRSRV_ERROR eError;
+
+ eError = DeviceConfigCreate(pvOSDevice, &psDevConfig);
+ if (eError == PVRSRV_OK)
+ {
+ eError = PhysHeapCfgCreate(psDevConfig);
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMem(psDevConfig);
+ }
+
+ *ppsDevConfig = psDevConfig;
+ }
+
+ return eError;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PhysHeapCfgDestroy(psDevConfig);
+
+ OSFreeMem(psDevConfig);
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ /* print here any system information useful for debug dumps */
+
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+ return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (sysconfig.c)
+******************************************************************************/
--- /dev/null
+/*************************************************************************/ /*!
+@File sysconfig.h
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+
+#if !defined(__SYSCCONFIG_H__)
+#define __SYSCCONFIG_H__
+
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS 100
+
+#define DEFAULT_CLOCK_RATE IMG_UINT64_C(600000000)
+
+/* fixed IPA Base of the memory carveout reserved for the GPU Firmware Heaps */
+#define FW_CARVEOUT_IPA_BASE IMG_UINT64_C(0x7E000000)
+
+/* fixed IPA Base of the memory carveout reserved for the Firmware's Page Tables */
+#define FW_PT_CARVEOUT_IPA_BASE IMG_UINT64_C(0x8F000000)
+
+/* mock SoC registers */
+#define SOC_REGBANK_BASE IMG_UINT64_C(0xF0000000)
+#define SOC_REGBANK_SIZE IMG_UINT32_C(0x10000)
+#define POW_DOMAIN_ENABLE_REG IMG_UINT32_C(0xA000)
+#define POW_DOMAIN_DISABLE_REG IMG_UINT32_C(0xA008)
+#define POW_DOMAIN_STATUS_REG IMG_UINT32_C(0xA010)
+
+#define POW_DOMAIN_GPU IMG_UINT32_C(0x1)
+
+#define MPU_EVENT_STATUS_REG IMG_UINT32_C(0xB000)
+#define MPU_EVENT_OSID_REG IMG_UINT32_C(0xB008)
+#define MPU_EVENT_ADDRESS_REG IMG_UINT32_C(0xB010)
+#define MPU_EVENT_DIRECTION_REG IMG_UINT32_C(0xB018)
+#define MPU_EVENT_CLEAR_REG IMG_UINT32_C(0xB020)
+
+#define MPU_GPU_BUS_REQUESTER IMG_UINT32_C(1)
+#define MPU_WRITE_ACCESS IMG_UINT32_C(1)
+
+#define MPU_PROTECTED_RANGE0_START_REG IMG_UINT32_C(0xC000)
+#define MPU_PROTECTED_RANGE1_START_REG IMG_UINT32_C(0xC008)
+#define MPU_PROTECTED_RANGE2_START_REG IMG_UINT32_C(0xC010)
+#define MPU_PROTECTED_RANGE3_START_REG IMG_UINT32_C(0xC018)
+#define MPU_PROTECTED_RANGE4_START_REG IMG_UINT32_C(0xC020)
+#define MPU_PROTECTED_RANGE5_START_REG IMG_UINT32_C(0xC028)
+#define MPU_PROTECTED_RANGE6_START_REG IMG_UINT32_C(0xC030)
+#define MPU_PROTECTED_RANGE7_START_REG IMG_UINT32_C(0xC038)
+#define MPU_PROTECTED_RANGE8_START_REG IMG_UINT32_C(0xC040)
+#define MPU_PROTECTED_RANGE9_START_REG IMG_UINT32_C(0xC048)
+#define MPU_PROTECTED_RANGE10_START_REG IMG_UINT32_C(0xC050)
+#define MPU_PROTECTED_RANGE11_START_REG IMG_UINT32_C(0xC058)
+#define MPU_PROTECTED_RANGE12_START_REG IMG_UINT32_C(0xC060)
+#define MPU_PROTECTED_RANGE13_START_REG IMG_UINT32_C(0xC068)
+#define MPU_PROTECTED_RANGE14_START_REG IMG_UINT32_C(0xC070)
+#define MPU_PROTECTED_RANGE15_START_REG IMG_UINT32_C(0xC078)
+
+#define MPU_PROTECTED_RANGE0_END_REG IMG_UINT32_C(0xC100)
+#define MPU_PROTECTED_RANGE1_END_REG IMG_UINT32_C(0xC108)
+#define MPU_PROTECTED_RANGE2_END_REG IMG_UINT32_C(0xC110)
+#define MPU_PROTECTED_RANGE3_END_REG IMG_UINT32_C(0xC118)
+#define MPU_PROTECTED_RANGE4_END_REG IMG_UINT32_C(0xC120)
+#define MPU_PROTECTED_RANGE5_END_REG IMG_UINT32_C(0xC128)
+#define MPU_PROTECTED_RANGE6_END_REG IMG_UINT32_C(0xC130)
+#define MPU_PROTECTED_RANGE7_END_REG IMG_UINT32_C(0xC138)
+#define MPU_PROTECTED_RANGE8_END_REG IMG_UINT32_C(0xC140)
+#define MPU_PROTECTED_RANGE9_END_REG IMG_UINT32_C(0xC148)
+#define MPU_PROTECTED_RANGE10_END_REG IMG_UINT32_C(0xC150)
+#define MPU_PROTECTED_RANGE11_END_REG IMG_UINT32_C(0xC158)
+#define MPU_PROTECTED_RANGE12_END_REG IMG_UINT32_C(0xC160)
+#define MPU_PROTECTED_RANGE13_END_REG IMG_UINT32_C(0xC168)
+#define MPU_PROTECTED_RANGE14_END_REG IMG_UINT32_C(0xC170)
+#define MPU_PROTECTED_RANGE15_END_REG IMG_UINT32_C(0xC178)
+
+
+#define MPU_PROTECTED_RANGE0_OSID_REG IMG_UINT32_C(0xC200)
+#define MPU_PROTECTED_RANGE1_OSID_REG IMG_UINT32_C(0xC208)
+#define MPU_PROTECTED_RANGE2_OSID_REG IMG_UINT32_C(0xC210)
+#define MPU_PROTECTED_RANGE3_OSID_REG IMG_UINT32_C(0xC218)
+#define MPU_PROTECTED_RANGE4_OSID_REG IMG_UINT32_C(0xC220)
+#define MPU_PROTECTED_RANGE5_OSID_REG IMG_UINT32_C(0xC228)
+#define MPU_PROTECTED_RANGE6_OSID_REG IMG_UINT32_C(0xC230)
+#define MPU_PROTECTED_RANGE7_OSID_REG IMG_UINT32_C(0xC238)
+#define MPU_PROTECTED_RANGE8_OSID_REG IMG_UINT32_C(0xC240)
+#define MPU_PROTECTED_RANGE9_OSID_REG IMG_UINT32_C(0xC248)
+#define MPU_PROTECTED_RANGE10_OSID_REG IMG_UINT32_C(0xC250)
+#define MPU_PROTECTED_RANGE11_OSID_REG IMG_UINT32_C(0xC258)
+#define MPU_PROTECTED_RANGE12_OSID_REG IMG_UINT32_C(0xC260)
+#define MPU_PROTECTED_RANGE13_OSID_REG IMG_UINT32_C(0xC268)
+#define MPU_PROTECTED_RANGE14_OSID_REG IMG_UINT32_C(0xC270)
+#define MPU_PROTECTED_RANGE15_OSID_REG IMG_UINT32_C(0xC278)
+
+#define MPU_PROTECTION_ENABLE_REG IMG_UINT32_C(0xC300)
+
+
+/*****************************************************************************
+ * system specific data structures
+ *****************************************************************************/
+
+#endif /* __SYSCCONFIG_H__ */
--- /dev/null
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#define MAX_HW_TIME_US (500000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (10000)
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT (10000)
+
+#if defined(__linux__)
+#define SYS_RGX_DEV_NAME "powervr-vz-example"
+#endif
+
+#endif /* !defined(__SYSINFO_H__) */
--- /dev/null
+/*************************************************************************/ /*!
+@File vz_validation.c
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "sysconfig.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "vz_validation.h"
+#include "virt_validation_defs.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+typedef struct _VALIDATION_CTRL_
+{
+ IMG_BOOL bUnload;
+ IMG_HANDLE hMPUWatchdogThread;
+ IMG_HANDLE hSystemWatchdogEvObj;
+} VALIDATION_CTRL;
+
+static void SysPrintAndResetFaultStatusRegister(void)
+{
+ IMG_CPU_PHYADDR sSoCRegBase = {SOC_REGBANK_BASE};
+ IMG_UINT32 ui32MPUEventStatus;
+
+ void* pvSocRegs = OSMapPhysToLin(sSoCRegBase,
+ SOC_REGBANK_SIZE,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+ ui32MPUEventStatus = OSReadHWReg32(pvSocRegs, MPU_EVENT_STATUS_REG);
+
+ if (ui32MPUEventStatus & MPU_GPU_BUS_REQUESTER)
+ {
+ IMG_UINT32 ui32MPUEventOSID = OSReadHWReg32(pvSocRegs, MPU_EVENT_OSID_REG);
+ IMG_UINT32 ui32MPUEventAddress = OSReadHWReg32(pvSocRegs, MPU_EVENT_ADDRESS_REG);
+ IMG_UINT32 ui32MPUEventDirection = OSReadHWReg32(pvSocRegs, MPU_EVENT_DIRECTION_REG);
+
+ PVR_DPF((PVR_DBG_ERROR, "MPU fault event: GPU attempted an illegal %s access to address 0x%08X while emitting OSID %u",
+ (ui32MPUEventDirection == MPU_WRITE_ACCESS) ? "WRITE" : "READ",
+ ui32MPUEventAddress,
+ ui32MPUEventOSID));
+
+ OSWriteHWReg32(pvSocRegs, MPU_EVENT_CLEAR_REG, 1);
+ }
+
+ OSUnMapPhysToLin(pvSocRegs, SOC_REGBANK_SIZE);
+}
+
+void SysInitValidation(IMG_HANDLE hSysData,
+ IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+ IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS])
+{
+ IMG_CPU_PHYADDR sSoCRegBase = {SOC_REGBANK_BASE};
+ IMG_UINT32 ui32OSID, ui32Region;
+
+ void* pvSocRegs = OSMapPhysToLin(sSoCRegBase,
+ SOC_REGBANK_SIZE,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+ IMG_UINT64 aui64RegMin[GPUVIRT_VALIDATION_MAX_OS][GPUVIRT_VALIDATION_NUM_REGIONS] =
+ {
+ {MPU_PROTECTED_RANGE0_START_REG, MPU_PROTECTED_RANGE8_START_REG},
+ {MPU_PROTECTED_RANGE1_START_REG, MPU_PROTECTED_RANGE9_START_REG},
+ {MPU_PROTECTED_RANGE2_START_REG, MPU_PROTECTED_RANGE10_START_REG},
+ {MPU_PROTECTED_RANGE3_START_REG, MPU_PROTECTED_RANGE11_START_REG},
+ {MPU_PROTECTED_RANGE4_START_REG, MPU_PROTECTED_RANGE12_START_REG},
+ {MPU_PROTECTED_RANGE5_START_REG, MPU_PROTECTED_RANGE13_START_REG},
+ {MPU_PROTECTED_RANGE6_START_REG, MPU_PROTECTED_RANGE14_START_REG},
+ {MPU_PROTECTED_RANGE7_START_REG, MPU_PROTECTED_RANGE15_START_REG}
+ };
+
+ IMG_UINT64 aui64RegMax[GPUVIRT_VALIDATION_MAX_OS][GPUVIRT_VALIDATION_NUM_REGIONS] =
+ {
+ {MPU_PROTECTED_RANGE0_END_REG, MPU_PROTECTED_RANGE8_END_REG},
+ {MPU_PROTECTED_RANGE1_END_REG, MPU_PROTECTED_RANGE9_END_REG},
+ {MPU_PROTECTED_RANGE2_END_REG, MPU_PROTECTED_RANGE10_END_REG},
+ {MPU_PROTECTED_RANGE3_END_REG, MPU_PROTECTED_RANGE11_END_REG},
+ {MPU_PROTECTED_RANGE4_END_REG, MPU_PROTECTED_RANGE12_END_REG},
+ {MPU_PROTECTED_RANGE5_END_REG, MPU_PROTECTED_RANGE13_END_REG},
+ {MPU_PROTECTED_RANGE6_END_REG, MPU_PROTECTED_RANGE14_END_REG},
+ {MPU_PROTECTED_RANGE7_END_REG, MPU_PROTECTED_RANGE15_END_REG}
+ };
+
+ IMG_UINT64 aui64RangeOSIDAccessReg[] =
+ {
+ MPU_PROTECTED_RANGE0_OSID_REG,
+ MPU_PROTECTED_RANGE1_OSID_REG,
+ MPU_PROTECTED_RANGE2_OSID_REG,
+ MPU_PROTECTED_RANGE3_OSID_REG,
+ MPU_PROTECTED_RANGE4_OSID_REG,
+ MPU_PROTECTED_RANGE5_OSID_REG,
+ MPU_PROTECTED_RANGE6_OSID_REG,
+ MPU_PROTECTED_RANGE7_OSID_REG,
+ MPU_PROTECTED_RANGE8_OSID_REG,
+ MPU_PROTECTED_RANGE9_OSID_REG,
+ MPU_PROTECTED_RANGE10_OSID_REG,
+ MPU_PROTECTED_RANGE11_OSID_REG,
+ MPU_PROTECTED_RANGE12_OSID_REG,
+ MPU_PROTECTED_RANGE13_OSID_REG,
+ MPU_PROTECTED_RANGE14_OSID_REG,
+ MPU_PROTECTED_RANGE15_OSID_REG
+ };
+
+ PVR_UNREFERENCED_PARAMETER(hSysData);
+
+ FOREACH_VALIDATION_OSID(ui32OSID)
+ {
+ /* every OSID gets access to 2 ranges: one secure range and one shared with all the other OSIDs
+ * e.g. OSID 0: secure RANGE 0 & shared RANGE 8 */
+ OSWriteHWReg64(pvSocRegs, aui64RangeOSIDAccessReg[ui32OSID], ui32OSID);
+ OSWriteHWReg64(pvSocRegs, aui64RangeOSIDAccessReg[ui32OSID+GPUVIRT_VALIDATION_MAX_OS], ui32OSID);
+
+ for (ui32Region=0; ui32Region < GPUVIRT_VALIDATION_NUM_REGIONS; ui32Region++)
+ {
+ OSWriteHWReg64(pvSocRegs, aui64RegMin[ui32OSID][ui32Region], aui64OSidMin[ui32Region][ui32OSID]);
+ OSWriteHWReg64(pvSocRegs, aui64RegMax[ui32OSID][ui32Region], aui64OSidMax[ui32Region][ui32OSID]);
+ }
+ }
+
+ OSWriteHWReg32(pvSocRegs, MPU_PROTECTION_ENABLE_REG, 1);
+
+ OSUnMapPhysToLin(pvSocRegs, SOC_REGBANK_SIZE);
+}
+
+static void MPUWatchdogThread(void *pvData)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hOSEvent;
+ IMG_UINT32 ui32Timeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+ VALIDATION_CTRL *psValidCtrl = (VALIDATION_CTRL*)pvData;
+
+ if (psValidCtrl == NULL)
+ {
+ return;
+ }
+
+ /* Open an event on the system watchdog event object so we can listen on it
+ and abort the system watchdog thread. */
+ eError = OSEventObjectOpen(psValidCtrl->hSystemWatchdogEvObj, &hOSEvent);
+ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen");
+
+ while (!psValidCtrl->bUnload)
+ {
+ eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000);
+ if (eError == PVRSRV_OK)
+ {
+ if (psValidCtrl->bUnload)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Shutdown event received.", __func__));
+ break;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Power state change event received.", __func__));
+ }
+ }
+ else if (eError != PVRSRV_ERROR_TIMEOUT)
+ {
+ /* If timeout do nothing otherwise print warning message. */
+ PVR_DPF((PVR_DBG_ERROR, "%s: "
+ "Error (%d) when waiting for event!", __func__, eError));
+ }
+
+ SysPrintAndResetFaultStatusRegister();
+ }
+ eError = OSEventObjectClose(hOSEvent);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+
+PVRSRV_ERROR CreateMPUWatchdogThread(IMG_HANDLE *phValidationData)
+{
+ VALIDATION_CTRL *psValidCtrl;
+ PVRSRV_ERROR eError;
+
+ psValidCtrl = OSAllocZMem(sizeof(*psValidCtrl));
+ PVR_LOG_RETURN_IF_NOMEM(psValidCtrl, "psValidCtrl");
+
+ /* Create the SysWatchdog Event Object */
+ eError = OSEventObjectCreate("PVRSRV_SYSWDG_EVENTOBJECT", &psValidCtrl->hSystemWatchdogEvObj);
+ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", error_eventobject);
+
+ /* Create a thread which is used to detect fatal errors */
+ eError = OSThreadCreatePriority(&psValidCtrl->hMPUWatchdogThread,
+ "emu_check_fault_wdg",
+ MPUWatchdogThread,
+ NULL,
+ IMG_TRUE,
+ psValidCtrl,
+ 0);
+ PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority", error_threadcreate);
+
+ *phValidationData = (IMG_HANDLE)psValidCtrl;
+
+ return PVRSRV_OK;
+
+error_threadcreate:
+ OSEventObjectDestroy(psValidCtrl->hSystemWatchdogEvObj);
+error_eventobject:
+ OSFreeMem(psValidCtrl);
+
+ return eError;
+}
+
+void DestroyMPUWatchdogThread(IMG_HANDLE hValidationData)
+{
+ VALIDATION_CTRL *psValidCtrl = (VALIDATION_CTRL*)hValidationData;
+
+ if (psValidCtrl == NULL)
+ {
+ return;
+ }
+
+ psValidCtrl->bUnload = IMG_TRUE;
+
+ if (psValidCtrl->hMPUWatchdogThread)
+ {
+ if (psValidCtrl->hSystemWatchdogEvObj)
+ {
+ OSEventObjectSignal(psValidCtrl->hSystemWatchdogEvObj);
+ }
+ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+ {
+ OSThreadDestroy(psValidCtrl->hMPUWatchdogThread);
+ psValidCtrl->hMPUWatchdogThread = NULL;
+ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ }
+
+ if (psValidCtrl->hSystemWatchdogEvObj)
+ {
+ OSEventObjectDestroy(psValidCtrl->hSystemWatchdogEvObj);
+ psValidCtrl->hSystemWatchdogEvObj = NULL;
+ }
+
+ OSFreeMem(psValidCtrl);
+}
+
+#endif
--- /dev/null
+/*************************************************************************/ /*!
+@File vz_validation.h
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__VZ_VALIDATION_H__)
+#define __VZ_VALIDATION_H__
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+PVRSRV_ERROR CreateMPUWatchdogThread(IMG_HANDLE *phValidationData);
+void DestroyMPUWatchdogThread(IMG_HANDLE hValidationData);
+
+void SysInitValidation(IMG_HANDLE hSysData,
+ IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+ IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
+#endif
+
+#endif /* __VZ_VALIDATION_H__ */
#define __KERNEL_COMPATIBILITY_H__
#include <linux/version.h>
+#include <linux/compiler.h>
/*
* Stop supporting an old kernel? Remove the top block.
* New incompatible kernel? Append a new block at the bottom.
*
- * Please write you version test as `VERSION < X.Y`, and use the earliest
+ * Please write your version test as `VERSION < X.Y`, and use the earliest
* possible version :)
+ *
+ * If including this header file in other files, this should always be the
+ * last file included, as it can affect definitions/declarations in files
+ * included after it.
*/
/* Linux 3.6 introduced seq_vprintf(). Earlier versions don't have this
#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0))
+#define MODULE_IMPORT_NS(ns)
+#endif
+
/*
* Before v5.8, the "struct mm" has a semaphore named "mmap_sem" which is
* renamed to "mmap_lock" in v5.8. Moreover, new APIs are provided to
#define uaccess_disable_privileged() uaccess_disable()
#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0))
+#define pde_data PDE_DATA
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0))
+#define iosys_map dma_buf_map
+#define iosys_map_set_vaddr_iomem dma_buf_map_set_vaddr_iomem
+#define iosys_map_clear dma_buf_map_clear
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0))
+
+#define register_shrinker(shrinker, name) \
+ register_shrinker(shrinker)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) */
+
+#if defined(__GNUC__)
+#define GCC_VERSION_AT_LEAST(major, minor) \
+ (__GNUC__ > (major) || \
+ (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
+#else
+#define GCC_VERSION_AT_LEAST(major, minor) 0
+#endif
+
+#if defined(__clang__)
+#define CLANG_VERSION_AT_LEAST(major) \
+ (__clang_major__ >= (major))
+#else
+#define CLANG_VERSION_AT_LEAST(major) 0
+#endif
+
+#if !defined(__fallthrough)
+ #if GCC_VERSION_AT_LEAST(7, 0) || CLANG_VERSION_AT_LEAST(10)
+ #define __fallthrough __attribute__((__fallthrough__))
+ #else
+ #define __fallthrough
+ #endif
+#endif
+
#endif /* __KERNEL_COMPATIBILITY_H__ */
}
#else
extern void *riscv_vmap(struct page **pages, unsigned int count,
- unsigned long flags, pgprot_t prot);
+ unsigned long flags, pgprot_t prot, int cached);
static inline void *pvr_vmap(struct page **pages,
unsigned int count,
__maybe_unused unsigned long flags,
pgprot_t prot)
{
- return riscv_vmap(pages, count, flags, prot);
+ return riscv_vmap(pages, count, flags, prot, 0);
}
+static inline void *pvr_vmap_cached(struct page **pages,
+ unsigned int count,
+ __maybe_unused unsigned long flags,
+ pgprot_t prot)
+{
+ return riscv_vmap(pages, count, flags, prot, 1);
+}
static inline void pvr_vunmap(void *pages,
__maybe_unused unsigned int count,
#define PVRVERSION_H
#define PVRVERSION_MAJ 1U
-#define PVRVERSION_MIN 17U
+#define PVRVERSION_MIN 19U
#define PVRVERSION_FAMILY "rogueddk"
-#define PVRVERSION_BRANCHNAME "1.17"
-#define PVRVERSION_BUILD 6210866
+#define PVRVERSION_BRANCHNAME "1.19"
+#define PVRVERSION_BUILD 6345021
#define PVRVERSION_BSCONTROL "Rogue_DDK_Linux_WS"
-#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 1.17@6210866"
-#define PVRVERSION_STRING_SHORT "1.17@6210866"
+#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 1.19@6345021"
+#define PVRVERSION_STRING_SHORT "1.19@6345021"
#define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved."
-#define PVRVERSION_BUILD_HI 621
-#define PVRVERSION_BUILD_LO 866
-#define PVRVERSION_STRING_NUMERIC "1.17.621.866"
+#define PVRVERSION_BUILD_HI 634
+#define PVRVERSION_BUILD_LO 5021
+#define PVRVERSION_STRING_NUMERIC "1.19.634.5021"
#define PVRVERSION_PACK(MAJOR,MINOR) (((IMG_UINT32)((IMG_UINT32)(MAJOR) & 0xFFFFU) << 16U) | (((MINOR) & 0xFFFFU) << 0U))
#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16U) & 0xFFFFU)
#define SYSPORT_MEM_PFN_OFFSET (0x400000000 >> PAGE_SHIFT)
#define __mk_pte(page, prot) pfn_pte(page_to_pfn(page) + SYSPORT_MEM_PFN_OFFSET, prot)
+#define __mk_pte_cached(page, prot) pfn_pte(page_to_pfn(page), prot)
static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr,
- pgtbl_mod_mask *mask)
+ pgtbl_mod_mask *mask, int cached)
{
pte_t *pte;
if (WARN_ON(!page))
return -ENOMEM;
/* Special processing for starfive RISC-V, pfn add an offset of 0x400000*/
- set_pte_at(&init_mm, addr, pte, __mk_pte(page, prot));
+ if (cached)
+ set_pte_at(&init_mm, addr, pte, __mk_pte_cached(page, prot));
+ else
+ set_pte_at(&init_mm, addr, pte, __mk_pte(page, prot));
(*nr)++;
} while (pte++, addr += PAGE_SIZE, addr != end);
*mask |= PGTBL_PTE_MODIFIED;
static int vmap_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr,
- pgtbl_mod_mask *mask)
+ pgtbl_mod_mask *mask, int cached)
{
pmd_t *pmd;
unsigned long next;
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
- if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask))
+ if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask,
+ cached))
return -ENOMEM;
} while (pmd++, addr = next, addr != end);
return 0;
static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr,
- pgtbl_mod_mask *mask)
+ pgtbl_mod_mask *mask, int cached)
{
pud_t *pud;
unsigned long next;
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
- if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask))
+ if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask,
+ cached))
return -ENOMEM;
} while (pud++, addr = next, addr != end);
return 0;
static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr,
- pgtbl_mod_mask *mask)
+ pgtbl_mod_mask *mask, int cached)
{
p4d_t *p4d;
unsigned long next;
return -ENOMEM;
do {
next = p4d_addr_end(addr, end);
- if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask))
+ if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask,
+ cached))
return -ENOMEM;
} while (p4d++, addr = next, addr != end);
return 0;
}
static int __map_kernel_range_noflush(unsigned long addr, unsigned long size,
- pgprot_t prot, struct page **pages)
+ pgprot_t prot, struct page **pages, int cached)
{
unsigned long start = addr;
unsigned long end = addr + size;
next = pgd_addr_end(addr, end);
if (pgd_bad(*pgd))
mask |= PGTBL_PGD_MODIFIED;
- err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
+ err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask,
+ cached);
if (err)
return err;
} while (pgd++, addr = next, addr != end);
}
static int __map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
- struct page **pages)
+ struct page **pages, int cached)
{
int ret;
- ret = __map_kernel_range_noflush(start, size, prot, pages);
+ ret = __map_kernel_range_noflush(start, size, prot, pages, cached);
flush_cache_vmap(start, start + size);
return ret;
}
void *riscv_vmap(struct page **pages, unsigned int count,
- unsigned long flags, pgprot_t prot)
+ unsigned long flags, pgprot_t prot, int cached)
{
struct vm_struct *area;
unsigned long size; /* In bytes */
return NULL;
if (__map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot),
- pages) < 0) {
+ pages, cached) < 0) {
vunmap(area->addr);
return NULL;
}
#ifndef RISCV_VMAP_H
#define RISCV_VMAP_H
-void *riscv_vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot);
+void *riscv_vmap(struct page **pages, unsigned int count, unsigned long flags,
+ pgprot_t prot, int cached);
void test_riscv_vmap(void);
#endif