2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 * Declare public libdrm_amdgpu API
29 * This file define API exposed by libdrm_amdgpu library.
30 * User wanted to use libdrm_amdgpu functionality must include
40 struct drm_amdgpu_info_hw_ip;
42 /*--------------------------------------------------------------------------*/
43 /* --------------------------- Defines ------------------------------------ */
44 /*--------------------------------------------------------------------------*/
47 * Define max. number of Command Buffers (IB) which could be sent to the single
48 * hardware IP to accommodate CE/DE requirements
50 * \sa amdgpu_cs_ib_info
52 #define AMDGPU_CS_MAX_IBS_PER_SUBMIT 4
57 #define AMDGPU_TIMEOUT_INFINITE 0xffffffffffffffffull
60 * The special flag to mark that this IB will re-used
61 * by client and should not be automatically return back
62 * to free pool by libdrm_amdgpu when submission is completed.
64 * \sa amdgpu_cs_ib_info
66 #define AMDGPU_CS_REUSE_IB 0x2
68 /*--------------------------------------------------------------------------*/
69 /* ----------------------------- Enums ------------------------------------ */
70 /*--------------------------------------------------------------------------*/
73 * Enum describing possible handle types
75 * \sa amdgpu_bo_import, amdgpu_bo_export
78 enum amdgpu_bo_handle_type {
79 /** GEM flink name (needs DRM authentication, used by DRI2) */
80 amdgpu_bo_handle_type_gem_flink_name = 0,
82 /** KMS handle which is used by all driver ioctls */
83 amdgpu_bo_handle_type_kms = 1,
85 /** DMA-buf fd handle */
86 amdgpu_bo_handle_type_dma_buf_fd = 2
90 * For performance reasons and to simplify logic libdrm_amdgpu will handle
91 * IBs only some pre-defined sizes.
93 * \sa amdgpu_cs_alloc_ib()
95 enum amdgpu_cs_ib_size {
96 amdgpu_cs_ib_size_4K = 0,
97 amdgpu_cs_ib_size_16K = 1,
98 amdgpu_cs_ib_size_32K = 2,
99 amdgpu_cs_ib_size_64K = 3,
100 amdgpu_cs_ib_size_128K = 4
103 /** The number of different IB sizes */
104 #define AMDGPU_CS_IB_SIZE_NUM 5
107 /*--------------------------------------------------------------------------*/
108 /* -------------------------- Datatypes ----------------------------------- */
109 /*--------------------------------------------------------------------------*/
112 * Define opaque pointer to context associated with fd.
113 * This context will be returned as the result of
114 * "initialize" function and should be pass as the first
115 * parameter to any API call
117 typedef struct amdgpu_device *amdgpu_device_handle;
120 * Define GPU Context type as pointer to opaque structure
121 * Example of GPU Context is the "rendering" context associated
122 * with OpenGL context (glCreateContext)
124 typedef struct amdgpu_context *amdgpu_context_handle;
127 * Define handle for amdgpu resources: buffer, GDS, etc.
129 typedef struct amdgpu_bo *amdgpu_bo_handle;
132 * Define handle for list of BOs
134 typedef struct amdgpu_bo_list *amdgpu_bo_list_handle;
137 * Define handle to be used when dealing with command
138 * buffers (a.k.a. ibs)
141 typedef struct amdgpu_ib *amdgpu_ib_handle;
144 /*--------------------------------------------------------------------------*/
145 /* -------------------------- Structures ---------------------------------- */
146 /*--------------------------------------------------------------------------*/
149 * Structure describing memory allocation request
151 * \sa amdgpu_bo_alloc()
154 struct amdgpu_bo_alloc_request {
155 /** Allocation request. It must be aligned correctly. */
159 * It may be required to have some specific alignment requirements
160 * for physical back-up storage (e.g. for displayable surface).
161 * If 0 there is no special alignment requirement
163 uint64_t phys_alignment;
166 * UMD should specify where to allocate memory and how it
167 * will be accessed by the CPU.
169 uint32_t preferred_heap;
171 /** Additional flags passed on allocation */
176 * Structure describing memory allocation request
178 * \sa amdgpu_bo_alloc()
180 struct amdgpu_bo_alloc_result {
181 /** Assigned virtual MC Base Address */
182 uint64_t virtual_mc_base_address;
184 /** Handle of allocated memory to be used by the given process only. */
185 amdgpu_bo_handle buf_handle;
189 * Special UMD specific information associated with buffer.
191 * It may be need to pass some buffer charactersitic as part
192 * of buffer sharing. Such information are defined UMD and
193 * opaque for libdrm_amdgpu as well for kernel driver.
195 * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info,
196 * amdgpu_bo_import(), amdgpu_bo_export
199 struct amdgpu_bo_metadata {
200 /** Special flag associated with surface */
204 * ASIC-specific tiling information (also used by DCE).
205 * The encoding is defined by the AMDGPU_TILING_* definitions.
207 uint64_t tiling_info;
209 /** Size of metadata associated with the buffer, in bytes. */
210 uint32_t size_metadata;
212 /** UMD specific metadata. Opaque for kernel */
213 uint32_t umd_metadata[64];
217 * Structure describing allocated buffer. Client may need
218 * to query such information as part of 'sharing' buffers mechanism
220 * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info(),
221 * amdgpu_bo_import(), amdgpu_bo_export()
223 struct amdgpu_bo_info {
224 /** Allocated memory size */
228 * It may be required to have some specific alignment requirements
229 * for physical back-up storage.
231 uint64_t phys_alignment;
234 * Assigned virtual MC Base Address.
235 * \note This information will be returned only if this buffer was
236 * allocated in the same process otherwise 0 will be returned.
238 uint64_t virtual_mc_base_address;
240 /** Heap where to allocate memory. */
241 uint32_t preferred_heap;
243 /** Additional allocation flags. */
244 uint64_t alloc_flags;
246 /** Metadata associated with buffer if any. */
247 struct amdgpu_bo_metadata metadata;
251 * Structure with information about "imported" buffer
253 * \sa amdgpu_bo_import()
256 struct amdgpu_bo_import_result {
257 /** Handle of memory/buffer to use */
258 amdgpu_bo_handle buf_handle;
263 /** Assigned virtual MC Base Address */
264 uint64_t virtual_mc_base_address;
270 * Structure to describe GDS partitioning information.
271 * \note OA and GWS resources are asscoiated with GDS partition
273 * \sa amdgpu_gpu_resource_query_gds_info
276 struct amdgpu_gds_resource_info {
277 uint32_t gds_gfx_partition_size;
278 uint32_t compute_partition_size;
279 uint32_t gds_total_size;
280 uint32_t gws_per_gfx_partition;
281 uint32_t gws_per_compute_partition;
282 uint32_t oa_per_gfx_partition;
283 uint32_t oa_per_compute_partition;
289 * Structure describing result of request to allocate GDS
291 * \sa amdgpu_gpu_resource_gds_alloc
294 struct amdgpu_gds_alloc_info {
295 /** Handle assigned to gds allocation */
296 amdgpu_bo_handle resource_handle;
298 /** How much was really allocated */
299 uint32_t gds_memory_size;
301 /** Number of GWS resources allocated */
304 /** Number of OA resources allocated */
309 * Structure to described allocated command buffer (a.k.a. IB)
311 * \sa amdgpu_cs_alloc_ib()
314 struct amdgpu_cs_ib_alloc_result {
315 /** IB allocation handle */
316 amdgpu_ib_handle handle;
318 /** Assigned GPU VM MC Address of command buffer */
321 /** Address to be used for CPU access */
326 * Structure describing IB
328 * \sa amdgpu_cs_request, amdgpu_cs_submit()
331 struct amdgpu_cs_ib_info {
335 /** Handle of command buffer */
336 amdgpu_ib_handle ib_handle;
339 * Size of Command Buffer to be submitted.
340 * - The size is in units of dwords (4 bytes).
341 * - Must be less or equal to the size of allocated IB
348 * Structure describing submission request
350 * \note We could have several IBs as packet. e.g. CE, CE, DE case for gfx
352 * \sa amdgpu_cs_submit()
354 struct amdgpu_cs_request {
355 /** Specify flags with additional information */
358 /** Specify HW IP block type to which to send the IB. */
361 /** IP instance index if there are several IPs of the same type. */
362 unsigned ip_instance;
365 * Specify ring index of the IP. We could have several rings
366 * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
371 * List handle with resources used by this request.
373 amdgpu_bo_list_handle resources;
375 /** Number of IBs to submit in the field ibs. */
376 uint32_t number_of_ibs;
379 * IBs to submit. Those IBs will be submit together as single entity
381 struct amdgpu_cs_ib_info *ibs;
385 * Structure describing request to check submission state using fence
387 * \sa amdgpu_cs_query_fence_status()
390 struct amdgpu_cs_query_fence {
392 /** In which context IB was sent to execution */
393 amdgpu_context_handle context;
395 /** Timeout in nanoseconds. */
398 /** To which HW IP type the fence belongs */
401 /** IP instance index if there are several IPs of the same type. */
402 unsigned ip_instance;
404 /** Ring index of the HW IP */
410 /** Specify fence for which we need to check
411 * submission status.*/
416 * Structure which provide information about GPU VM MC Address space
417 * alignments requirements
419 * \sa amdgpu_query_buffer_size_alignment
421 struct amdgpu_buffer_size_alignments {
422 /** Size alignment requirement for allocation in
427 * Size alignment requirement for allocation in remote memory
429 uint64_t size_remote;
434 * Structure which provide information about heap
436 * \sa amdgpu_query_heap_info()
439 struct amdgpu_heap_info {
440 /** Theoretical max. available memory in the given heap */
444 * Number of bytes allocated in the heap. This includes all processes
445 * and private allocations in the kernel. It changes when new buffers
446 * are allocated, freed, and moved. It cannot be larger than
452 * Theoretical possible max. size of buffer which
453 * could be allocated in the given heap
455 uint64_t max_allocation;
461 * Describe GPU h/w info needed for UMD correct initialization
463 * \sa amdgpu_query_gpu_info()
465 struct amdgpu_gpu_info {
468 /**< Chip revision */
470 /** Chip external revision */
471 uint32_t chip_external_rev;
476 /** max engine clock*/
477 uint64_t max_engine_clk;
478 /** number of shader engines */
479 uint32_t num_shader_engines;
480 /** number of shader arrays per engine */
481 uint32_t num_shader_arrays_per_engine;
482 /** Number of available good shader pipes */
483 uint32_t avail_quad_shader_pipes;
484 /** Max. number of shader pipes.(including good and bad pipes */
485 uint32_t max_quad_shader_pipes;
486 /** Number of parameter cache entries per shader quad pipe */
487 uint32_t cache_entries_per_quad_pipe;
488 /** Number of available graphics context */
489 uint32_t num_hw_gfx_contexts;
490 /** Number of render backend pipes */
492 /** Enabled render backend pipe mask */
493 uint32_t enabled_rb_pipes_mask;
494 /** Frequency of GPU Counter */
495 uint32_t gpu_counter_freq;
496 /** CC_RB_BACKEND_DISABLE.BACKEND_DISABLE per SE */
497 uint32_t backend_disable[4];
498 /** Value of MC_ARB_RAMCFG register*/
499 uint32_t mc_arb_ramcfg;
500 /** Value of GB_ADDR_CONFIG */
501 uint32_t gb_addr_cfg;
502 /** Values of the GB_TILE_MODE0..31 registers */
503 uint32_t gb_tile_mode[32];
504 /** Values of GB_MACROTILE_MODE0..15 registers */
505 uint32_t gb_macro_tile_mode[16];
506 /** Value of PA_SC_RASTER_CONFIG register per SE */
507 uint32_t pa_sc_raster_cfg[4];
508 /** Value of PA_SC_RASTER_CONFIG_1 register per SE */
509 uint32_t pa_sc_raster_cfg1[4];
511 uint32_t cu_active_number;
513 uint32_t cu_bitmap[4][4];
517 /*--------------------------------------------------------------------------*/
518 /*------------------------- Functions --------------------------------------*/
519 /*--------------------------------------------------------------------------*/
522 * Initialization / Cleanup
529 * \param fd - \c [in] File descriptor for AMD GPU device
530 * received previously as the result of
531 * e.g. drmOpen() call.
532 * For legacy fd type, the DRI2/DRI3 authentication
533 * should be done before calling this function.
534 * \param major_version - \c [out] Major version of library. It is assumed
535 * that adding new functionality will cause
536 * increase in major version
537 * \param minor_version - \c [out] Minor version of library
538 * \param device_handle - \c [out] Pointer to opaque context which should
539 * be passed as the first parameter on each
543 * \return 0 on success\n
544 * >0 - AMD specific error code\n
545 * <0 - Negative POSIX Error code
548 * \sa amdgpu_device_deinitialize()
550 int amdgpu_device_initialize(int fd,
551 uint32_t *major_version,
552 uint32_t *minor_version,
553 amdgpu_device_handle *device_handle);
559 * When access to such library does not needed any more the special
560 * function must be call giving opportunity to clean up any
561 * resources if needed.
563 * \param device_handle - \c [in] Context associated with file
564 * descriptor for AMD GPU device
565 * received previously as the
566 * result e.g. of drmOpen() call.
568 * \return 0 on success\n
569 * >0 - AMD specific error code\n
570 * <0 - Negative POSIX Error code
572 * \sa amdgpu_device_initialize()
575 int amdgpu_device_deinitialize(amdgpu_device_handle device_handle);
584 * Allocate memory to be used by UMD for GPU related operations
586 * \param dev - \c [in] Device handle.
587 * See #amdgpu_device_initialize()
588 * \param alloc_buffer - \c [in] Pointer to the structure describing an
590 * \param info - \c [out] Pointer to structure which return
591 * information about allocated memory
593 * \return 0 on success\n
594 * >0 - AMD specific error code\n
595 * <0 - Negative POSIX Error code
597 * \sa amdgpu_bo_free()
599 int amdgpu_bo_alloc(amdgpu_device_handle dev,
600 struct amdgpu_bo_alloc_request *alloc_buffer,
601 struct amdgpu_bo_alloc_result *info);
604 * Associate opaque data with buffer to be queried by another UMD
606 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
607 * \param buf_handle - \c [in] Buffer handle
608 * \param info - \c [in] Metadata to associated with buffer
610 * \return 0 on success\n
611 * >0 - AMD specific error code\n
612 * <0 - Negative POSIX Error code
614 int amdgpu_bo_set_metadata(amdgpu_bo_handle buf_handle,
615 struct amdgpu_bo_metadata *info);
618 * Query buffer information including metadata previusly associated with
621 * \param dev - \c [in] Device handle.
622 * See #amdgpu_device_initialize()
623 * \param buf_handle - \c [in] Buffer handle
624 * \param info - \c [out] Structure describing buffer
626 * \return 0 on success\n
627 * >0 - AMD specific error code\n
628 * <0 - Negative POSIX Error code
630 * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc()
632 int amdgpu_bo_query_info(amdgpu_bo_handle buf_handle,
633 struct amdgpu_bo_info *info);
636 * Allow others to get access to buffer
638 * \param dev - \c [in] Device handle.
639 * See #amdgpu_device_initialize()
640 * \param buf_handle - \c [in] Buffer handle
641 * \param type - \c [in] Type of handle requested
642 * \param shared_handle - \c [out] Special "shared" handle
644 * \return 0 on success\n
645 * >0 - AMD specific error code\n
646 * <0 - Negative POSIX Error code
648 * \sa amdgpu_bo_import()
651 int amdgpu_bo_export(amdgpu_bo_handle buf_handle,
652 enum amdgpu_bo_handle_type type,
653 uint32_t *shared_handle);
656 * Request access to "shared" buffer
658 * \param dev - \c [in] Device handle.
659 * See #amdgpu_device_initialize()
660 * \param type - \c [in] Type of handle requested
661 * \param shared_handle - \c [in] Shared handle received as result "import"
663 * \param output - \c [out] Pointer to structure with information
664 * about imported buffer
666 * \return 0 on success\n
667 * >0 - AMD specific error code\n
668 * <0 - Negative POSIX Error code
670 * \note Buffer must be "imported" only using new "fd" (different from
671 * one used by "exporter").
673 * \sa amdgpu_bo_export()
676 int amdgpu_bo_import(amdgpu_device_handle dev,
677 enum amdgpu_bo_handle_type type,
678 uint32_t shared_handle,
679 struct amdgpu_bo_import_result *output);
682 * Free previosuly allocated memory
684 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
685 * \param buf_handle - \c [in] Buffer handle to free
687 * \return 0 on success\n
688 * >0 - AMD specific error code\n
689 * <0 - Negative POSIX Error code
691 * \note In the case of memory shared between different applications all
692 * resources will be “physically” freed only all such applications
694 * \note If is UMD responsibility to ‘free’ buffer only when there is no
697 * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc()
700 int amdgpu_bo_free(amdgpu_bo_handle buf_handle);
703 * Request CPU access to GPU accessable memory
705 * \param buf_handle - \c [in] Buffer handle
706 * \param cpu - \c [out] CPU address to be used for access
708 * \return 0 on success\n
709 * >0 - AMD specific error code\n
710 * <0 - Negative POSIX Error code
712 * \sa amdgpu_bo_cpu_unmap()
715 int amdgpu_bo_cpu_map(amdgpu_bo_handle buf_handle, void **cpu);
718 * Release CPU access to GPU memory
720 * \param buf_handle - \c [in] Buffer handle
722 * \return 0 on success\n
723 * >0 - AMD specific error code\n
724 * <0 - Negative POSIX Error code
726 * \sa amdgpu_bo_cpu_map()
729 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle);
733 * Wait until a buffer is not used by the device.
735 * \param dev - \c [in] Device handle. See #amdgpu_lib_initialize()
736 * \param buf_handle - \c [in] Buffer handle.
737 * \param timeout_ns - Timeout in nanoseconds.
738 * \param buffer_busy - 0 if buffer is idle, all GPU access was completed
739 * and no GPU access is scheduled.
740 * 1 GPU access is in fly or scheduled
742 * \return 0 - on success
743 * <0 - AMD specific error code
745 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle,
750 * Creates a BO list handle for command submission.
752 * \param dev - \c [in] Device handle.
753 * See #amdgpu_device_initialize()
754 * \param number_of_resources - \c [in] Number of BOs in the list
755 * \param resources - \c [in] List of BO handles
756 * \param resource_prios - \c [in] Optional priority for each handle
757 * \param result - \c [out] Created BO list handle
759 * \return 0 on success\n
760 * >0 - AMD specific error code\n
761 * <0 - Negative POSIX Error code
763 * \sa amdgpu_bo_list_destroy()
765 int amdgpu_bo_list_create(amdgpu_device_handle dev,
766 uint32_t number_of_resources,
767 amdgpu_bo_handle *resources,
768 uint8_t *resource_prios,
769 amdgpu_bo_list_handle *result);
772 * Destroys a BO list handle.
774 * \param handle - \c [in] BO list handle.
776 * \return 0 on success\n
777 * >0 - AMD specific error code\n
778 * <0 - Negative POSIX Error code
780 * \sa amdgpu_bo_list_create()
782 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle handle);
785 * Special GPU Resources
792 * Query information about GDS
794 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
795 * \param gds_info - \c [out] Pointer to structure to get GDS information
797 * \return 0 on success\n
798 * >0 - AMD specific error code\n
799 * <0 - Negative POSIX Error code
802 int amdgpu_gpu_resource_query_gds_info(amdgpu_device_handle dev,
803 struct amdgpu_gds_resource_info *
808 * Allocate GDS partitions
810 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
811 * \param gds_size - \c [in] Size of gds allocation. Must be aligned
813 * \param alloc_info - \c [out] Pointer to structure to receive information
816 * \return 0 on success\n
817 * >0 - AMD specific error code\n
818 * <0 - Negative POSIX Error code
822 int amdgpu_gpu_resource_gds_alloc(amdgpu_device_handle dev,
824 struct amdgpu_gds_alloc_info *alloc_info);
830 * Release GDS resource. When GDS and associated resources not needed any
831 * more UMD should free them
833 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
834 * \param handle - \c [in] Handle assigned to GDS allocation
836 * \return 0 on success\n
837 * >0 - AMD specific error code\n
838 * <0 - Negative POSIX Error code
841 int amdgpu_gpu_resource_gds_free(amdgpu_bo_handle handle);
846 * GPU Execution context
851 * Create GPU execution Context
853 * For the purpose of GPU Scheduler and GPU Robustness extensions it is
854 * necessary to have information/identify rendering/compute contexts.
855 * It also may be needed to associate some specific requirements with such
856 * contexts. Kernel driver will guarantee that submission from the same
857 * context will always be executed in order (first come, first serve).
860 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
861 * \param context - \c [out] GPU Context handle
863 * \return 0 on success\n
864 * >0 - AMD specific error code\n
865 * <0 - Negative POSIX Error code
867 * \sa amdgpu_cs_ctx_free()
870 int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
871 amdgpu_context_handle *context);
875 * Destroy GPU execution context when not needed any more
877 * \param context - \c [in] GPU Context handle
879 * \return 0 on success\n
880 * >0 - AMD specific error code\n
881 * <0 - Negative POSIX Error code
883 * \sa amdgpu_cs_ctx_create()
886 int amdgpu_cs_ctx_free(amdgpu_context_handle context);
889 * Query reset state for the specific GPU Context
891 * \param context - \c [in] GPU Context handle
892 * \param state - \c [out] One of AMDGPU_CTX_*_RESET
893 * \param hangs - \c [out] Number of hangs caused by the context.
895 * \return 0 on success\n
896 * >0 - AMD specific error code\n
897 * <0 - Negative POSIX Error code
899 * \sa amdgpu_cs_ctx_create()
902 int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
903 uint32_t *state, uint32_t *hangs);
907 * Command Buffers Management
913 * Allocate memory to be filled with PM4 packets and be served as the first
914 * entry point of execution (a.k.a. Indirect Buffer)
916 * \param context - \c [in] GPU Context which will use IB
917 * \param ib_size - \c [in] Size of allocation
918 * \param output - \c [out] Pointer to structure to get information about
921 * \return 0 on success\n
922 * >0 - AMD specific error code\n
923 * <0 - Negative POSIX Error code
925 * \sa amdgpu_cs_free_ib()
928 int amdgpu_cs_alloc_ib(amdgpu_context_handle context,
929 enum amdgpu_cs_ib_size ib_size,
930 struct amdgpu_cs_ib_alloc_result *output);
933 * If UMD has allocates IBs which doesn’t need any more than those IBs must
934 * be explicitly freed
936 * \param handle - \c [in] IB handle
938 * \return 0 on success\n
939 * >0 - AMD specific error code\n
940 * <0 - Negative POSIX Error code
942 * \note Libdrm_amdgpu will guarantee that it will correctly detect when it
943 * is safe to return IB to free pool
945 * \sa amdgpu_cs_alloc_ib()
948 int amdgpu_cs_free_ib(amdgpu_ib_handle handle);
951 * Send request to submit command buffers to hardware.
953 * Kernel driver could use GPU Scheduler to make decision when physically
954 * sent this request to the hardware. Accordingly this request could be put
955 * in queue and sent for execution later. The only guarantee is that request
956 * from the same GPU context to the same ip:ip_instance:ring will be executed in
960 * \param dev - \c [in] Device handle.
961 * See #amdgpu_device_initialize()
962 * \param context - \c [in] GPU Context
963 * \param flags - \c [in] Global submission flags
964 * \param ibs_request - \c [in] Pointer to submission requests.
965 * We could submit to the several
966 * engines/rings simulteniously as
968 * \param number_of_requests - \c [in] Number of submission requests
969 * \param fences - \c [out] Pointer to array of data to get
970 * fences to identify submission
971 * requests. Timestamps are valid
972 * in this GPU context and could be used
973 * to identify/detect completion of
976 * \return 0 on success\n
977 * >0 - AMD specific error code\n
978 * <0 - Negative POSIX Error code
980 * \note It is assumed that by default IB will be returned to free pool
981 * automatically by libdrm_amdgpu when submission will completed.
982 * It is possible for UMD to make decision to re-use the same IB in
983 * this case it should be explicitly freed.\n
984 * Accordingly, by default, after submission UMD should not touch passed
985 * IBs. If UMD needs to re-use IB then the special flag AMDGPU_CS_REUSE_IB
988 * \note It is required to pass correct resource list with buffer handles
989 * which will be accessible by command buffers from submission
990 * This will allow kernel driver to correctly implement "paging".
991 * Failure to do so will have unpredictable results.
993 * \sa amdgpu_command_buffer_alloc(), amdgpu_command_buffer_free(),
994 * amdgpu_cs_query_fence_status()
997 int amdgpu_cs_submit(amdgpu_context_handle context,
999 struct amdgpu_cs_request *ibs_request,
1000 uint32_t number_of_requests,
1004 * Query status of Command Buffer Submission
1006 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1007 * \param fence - \c [in] Structure describing fence to query
1008 * \param expired - \c [out] If fence expired or not.\n
1009 * 0 – if fence is not expired\n
1012 * \return 0 on success\n
1013 * >0 - AMD specific error code\n
1014 * <0 - Negative POSIX Error code
1016 * \note If UMD wants only to check operation status and returned immediately
1017 * then timeout value as 0 must be passed. In this case success will be
1018 * returned in the case if submission was completed or timeout error
1021 * \sa amdgpu_cs_submit()
1023 int amdgpu_cs_query_fence_status(struct amdgpu_cs_query_fence *fence,
1034 * Query allocation size alignments
1036 * UMD should query information about GPU VM MC size alignments requirements
1037 * to be able correctly choose required allocation size and implement
1038 * internal optimization if needed.
1040 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1041 * \param info - \c [out] Pointer to structure to get size alignment
1044 * \return 0 on success\n
1045 * >0 - AMD specific error code\n
1046 * <0 - Negative POSIX Error code
1049 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
1050 struct amdgpu_buffer_size_alignments
1056 * Query firmware versions
1058 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1059 * \param fw_type - \c [in] AMDGPU_INFO_FW_*
1060 * \param ip_instance - \c [in] Index of the IP block of the same type.
1061 * \param index - \c [in] Index of the engine. (for SDMA and MEC)
1062 * \param version - \c [out] Pointer to to the "version" return value
1063 * \param feature - \c [out] Pointer to to the "feature" return value
1065 * \return 0 on success\n
1066 * >0 - AMD specific error code\n
1067 * <0 - Negative POSIX Error code
1070 int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type,
1071 unsigned ip_instance, unsigned index,
1072 uint32_t *version, uint32_t *feature);
1077 * Query the number of HW IP instances of a certain type.
1079 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1080 * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
1081 * \param count - \c [out] Pointer to structure to get information
1083 * \return 0 on success\n
1084 * >0 - AMD specific error code\n
1085 * <0 - Negative POSIX Error code
1087 int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type,
1093 * Query engine information
1095 * This query allows UMD to query information different engines and their
1098 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1099 * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_*
1100 * \param ip_instance - \c [in] Index of the IP block of the same type.
1101 * \param info - \c [out] Pointer to structure to get information
1103 * \return 0 on success\n
1104 * >0 - AMD specific error code\n
1105 * <0 - Negative POSIX Error code
1107 int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type,
1108 unsigned ip_instance,
1109 struct drm_amdgpu_info_hw_ip *info);
1115 * Query heap information
1117 * This query allows UMD to query potentially available memory resources and
1118 * adjust their logic if necessary.
1120 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1121 * \param heap - \c [in] Heap type
1122 * \param info - \c [in] Pointer to structure to get needed information
1124 * \return 0 on success\n
1125 * >0 - AMD specific error code\n
1126 * <0 - Negative POSIX Error code
1129 int amdgpu_query_heap_info(amdgpu_device_handle dev,
1132 struct amdgpu_heap_info *info);
1137 * Get the CRTC ID from the mode object ID
1139 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1140 * \param id - \c [in] Mode object ID
1141 * \param result - \c [in] Pointer to the CRTC ID
1143 * \return 0 on success\n
1144 * >0 - AMD specific error code\n
1145 * <0 - Negative POSIX Error code
1148 int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id,
1154 * Query GPU H/w Info
1156 * Query hardware specific information
1158 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1159 * \param heap - \c [in] Heap type
1160 * \param info - \c [in] Pointer to structure to get needed information
1162 * \return 0 on success\n
1163 * >0 - AMD specific error code\n
1164 * <0 - Negative POSIX Error code
1167 int amdgpu_query_gpu_info(amdgpu_device_handle dev,
1168 struct amdgpu_gpu_info *info);
1173 * Query hardware or driver information.
1175 * The return size is query-specific and depends on the "info_id" parameter.
1176 * No more than "size" bytes is returned.
1178 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
1179 * \param info_id - \c [in] AMDGPU_INFO_*
1180 * \param size - \c [in] Size of the returned value.
1181 * \param value - \c [out] Pointer to the return value.
1183 * \return 0 on success\n
1184 * >0 - AMD specific error code\n
1185 * <0 - Negative POSIX error code
1188 int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id,
1189 unsigned size, void *value);
1194 * Read a set of consecutive memory-mapped registers.
1195 * Not all registers are allowed to be read by userspace.
1197 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize(
1198 * \param dword_offset - \c [in] Register offset in dwords
1199 * \param count - \c [in] The number of registers to read starting
1201 * \param instance - \c [in] GRBM_GFX_INDEX selector. It may have other
1202 * uses. Set it to 0xffffffff if unsure.
1203 * \param flags - \c [in] Flags with additional information.
1204 * \param values - \c [out] The pointer to return values.
1206 * \return 0 on success\n
1207 * >0 - AMD specific error code\n
1208 * <0 - Negative POSIX error code
1211 int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset,
1212 unsigned count, uint32_t instance, uint32_t flags,
1218 * Request GPU access to user allocated memory e.g. via "malloc"
1220 * \param dev - [in] Device handle. See #amdgpu_device_initialize()
1221 * \param cpu - [in] CPU address of user allocated memory which we
1222 * want to map to GPU address space (make GPU accessible)
1223 * (This address must be correctly aligned).
1224 * \param size - [in] Size of allocation (must be correctly aligned)
1225 * \param amdgpu_bo_alloc_result - [out] Handle of allocation to be passed as resource
1226 * on submission and be used in other operations.(e.g. for VA submission)
1227 * ( Temporally defined amdgpu_bo_alloc_result as parameter for return mc address. )
1230 * \return 0 on success
1231 * >0 - AMD specific error code
1232 * <0 - Negative POSIX Error code
1236 * This call doesn't guarantee that such memory will be persistently
1237 * "locked" / make non-pageable. The purpose of this call is to provide
1238 * opportunity for GPU get access to this resource during submission.
1240 * The maximum amount of memory which could be mapped in this call depends
1241 * if overcommit is disabled or not. If overcommit is disabled than the max.
1242 * amount of memory to be pinned will be limited by left "free" size in total
1243 * amount of memory which could be locked simultaneously ("GART" size).
1245 * Supported (theoretical) max. size of mapping is restricted only by
1248 * It is responsibility of caller to correctly specify access rights
1251 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
1254 struct amdgpu_bo_alloc_result *info);
1257 #endif /* #ifdef _AMDGPU_H_ */