Part-of: <https://gitlab.freedesktop.org/gstreamer/gstreamer/-/merge_requests/8158>
<member name="ipc_mem_lazy_enable_peer_access" value="1" c:identifier="CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS">
</member>
</enumeration>
+ <enumeration name="limit" c:type="CUlimit">
+ <source-position filename="../subprojects/gst-plugins-bad/gst-libs/gst/cuda/stub/cuda.h"/>
+ <member name="stack_size" value="0" c:identifier="CU_LIMIT_STACK_SIZE">
+ </member>
+ <member name="printf_fifo_size" value="1" c:identifier="CU_LIMIT_PRINTF_FIFO_SIZE">
+ </member>
+ <member name="malloc_heap_size" value="2" c:identifier="CU_LIMIT_MALLOC_HEAP_SIZE">
+ </member>
+ <member name="dev_runtime_sync_depth" value="3" c:identifier="CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH">
+ </member>
+ <member name="dev_runtime_pending_launch_count" value="4" c:identifier="CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT">
+ </member>
+ <member name="max_l2_fetch_granularity" value="5" c:identifier="CU_LIMIT_MAX_L2_FETCH_GRANULARITY">
+ </member>
+ <member name="persisting_l2_cache_size" value="6" c:identifier="CU_LIMIT_PERSISTING_L2_CACHE_SIZE">
+ </member>
+ <member name="shmem_size" value="7" c:identifier="CU_LIMIT_SHMEM_SIZE">
+ </member>
+ <member name="cig_enabled" value="8" c:identifier="CU_LIMIT_CIG_ENABLED">
+ </member>
+ <member name="cig_shmem_fallback_enabled" value="9" c:identifier="CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED">
+ </member>
+ </enumeration>
<record name="memAccessDesc" c:type="CUmemAccessDesc">
<source-position filename="../subprojects/gst-plugins-bad/gst-libs/gst/cuda/stub/cuda.h"/>
<field name="location" writable="1">
GST_CUDA_API
CUresult CUDAAPI CuCtxSynchronize (void);
+GST_CUDA_API
+CUresult CUDAAPI CuCtxGetLimit (size_t *plimit, CUlimit limit);
+
+GST_CUDA_API
+CUresult CUDAAPI CuCtxSetLimit (CUlimit limit, size_t value);
+
GST_CUDA_API
CUresult CUDAAPI CuCtxEnablePeerAccess (CUcontext peerContext,
unsigned int Flags);
CUresult (CUDAAPI * CuCtxPopCurrent) (CUcontext * pctx);
CUresult (CUDAAPI * CuCtxPushCurrent) (CUcontext ctx);
CUresult (CUDAAPI * CuCtxSynchronize) (void);
+ CUresult (CUDAAPI * CuCtxGetLimit) (size_t * plimit, CUlimit limit);
+ CUresult (CUDAAPI * CuCtxSetLimit) (CUlimit limit, size_t value);
CUresult (CUDAAPI * CuCtxEnablePeerAccess) (CUcontext peerContext,
unsigned int Flags);
LOAD_SYMBOL (cuCtxPushCurrent, CuCtxPushCurrent);
LOAD_SYMBOL (cuCtxEnablePeerAccess, CuCtxEnablePeerAccess);
LOAD_SYMBOL (cuCtxDisablePeerAccess, CuCtxDisablePeerAccess);
+ LOAD_SYMBOL (cuCtxGetLimit, CuCtxGetLimit);
+ LOAD_SYMBOL (cuCtxSetLimit, CuCtxSetLimit);
LOAD_SYMBOL (cuGraphicsMapResources, CuGraphicsMapResources);
LOAD_SYMBOL (cuGraphicsUnmapResources, CuGraphicsUnmapResources);
return gst_cuda_vtable.CuCtxDisablePeerAccess (peerContext);
}
+CUresult CUDAAPI
+CuCtxGetLimit (size_t *plimit, CUlimit limit)
+{
+ g_assert (gst_cuda_vtable.CuCtxGetLimit != nullptr);
+
+ return gst_cuda_vtable.CuCtxGetLimit (plimit, limit);
+}
+
+CUresult CUDAAPI
+CuCtxSetLimit (CUlimit limit, size_t value)
+{
+ g_assert (gst_cuda_vtable.CuCtxSetLimit != nullptr);
+
+ return gst_cuda_vtable.CuCtxSetLimit (limit, value);
+}
+
CUresult CUDAAPI
CuGraphicsMapResources (unsigned int count, CUgraphicsResource * resources,
CUstream hStream)
CU_EVENT_INTERPROCESS = 0x4,
} CUevent_flags;
+typedef enum
+{
+ CU_LIMIT_STACK_SIZE = 0x0,
+ CU_LIMIT_PRINTF_FIFO_SIZE = 0x1,
+ CU_LIMIT_MALLOC_HEAP_SIZE = 0x2,
+ CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH = 0x3,
+ CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT = 0x4,
+ CU_LIMIT_MAX_L2_FETCH_GRANULARITY = 0x5,
+ CU_LIMIT_PERSISTING_L2_CACHE_SIZE = 0x6,
+ CU_LIMIT_SHMEM_SIZE = 0x7,
+ CU_LIMIT_CIG_ENABLED = 0x8,
+ CU_LIMIT_CIG_SHMEM_FALLBACK_ENABLED = 0x9,
+} CUlimit;
+
typedef struct
{
gsize srcXInBytes;