2 * Copyright © 2007, 2011, 2013, 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
37 #include <sys/ioctl.h>
41 #include <pciaccess.h>
46 #include <sys/types.h>
47 #include <sys/syscall.h>
48 #include <sys/utsname.h>
54 #include "intel_chipset.h"
56 #include "igt_debugfs.h"
59 #include "ioctl_wrappers.h"
62 * SECTION:ioctl_wrappers
63 * @short_description: ioctl wrappers and related functions
64 * @title: ioctl wrappers
65 * @include: ioctl_wrappers.h
67 * This helper library contains simple functions to wrap the raw drm/i915 kernel
68 * ioctls. The normal versions never pass any error codes to the caller and use
69 * igt_assert() to check for error conditions instead. For some ioctls raw
70 * wrappers which do pass on error codes are available. These raw wrappers have
73 * For wrappers which check for feature bits there can also be two versions: The
74 * normal one simply returns a boolean to the caller. But when skipping the
75 * testcase entirely is the right action then it's better to use igt_skip()
76 * directly in the wrapper. Such functions have _require_ in their name to
81 * gem_handle_to_libdrm_bo:
82 * @bufmgr: libdrm buffer manager instance
83 * @fd: open i915 drm file descriptor
84 * @name: buffer name in libdrm
85 * @handle: gem buffer object handle
87 * This helper function imports a raw gem buffer handle into the libdrm buffer
90 * Returns: The imported libdrm buffer manager object.
93 gem_handle_to_libdrm_bo(drm_intel_bufmgr *bufmgr, int fd, const char *name, uint32_t handle)
95 struct drm_gem_flink flink;
99 memset(&flink, 0, sizeof(handle));
100 flink.handle = handle;
101 ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
102 igt_assert(ret == 0);
105 bo = drm_intel_bo_gem_create_from_name(bufmgr, name, flink.name);
113 * @fd: open i915 drm file descriptor
114 * @handle: gem buffer object handle
115 * @tiling: (out) tiling mode of the gem buffer
116 * @swizzle: (out) bit 6 swizzle mode
118 * This wraps the GET_TILING ioctl.
121 gem_get_tiling(int fd, uint32_t handle, uint32_t *tiling, uint32_t *swizzle)
123 struct drm_i915_gem_get_tiling get_tiling;
126 memset(&get_tiling, 0, sizeof(get_tiling));
127 get_tiling.handle = handle;
129 ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
130 igt_assert(ret == 0);
132 *tiling = get_tiling.tiling_mode;
133 *swizzle = get_tiling.swizzle_mode;
136 int __gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
138 struct drm_i915_gem_set_tiling st;
141 memset(&st, 0, sizeof(st));
144 st.tiling_mode = tiling;
145 st.stride = tiling ? stride : 0;
147 ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &st);
148 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
153 igt_assert(st.tiling_mode == tiling);
159 * @fd: open i915 drm file descriptor
160 * @handle: gem buffer object handle
161 * @tiling: tiling mode bits
162 * @stride: stride of the buffer when using a tiled mode, otherwise must be 0
164 * This wraps the SET_TILING ioctl.
166 void gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
168 igt_assert(__gem_set_tiling(fd, handle, tiling, stride) == 0);
171 struct local_drm_i915_gem_caching {
176 #define LOCAL_DRM_I915_GEM_SET_CACHEING 0x2f
177 #define LOCAL_DRM_I915_GEM_GET_CACHEING 0x30
178 #define LOCAL_DRM_IOCTL_I915_GEM_SET_CACHEING \
179 DRM_IOW(DRM_COMMAND_BASE + LOCAL_DRM_I915_GEM_SET_CACHEING, struct local_drm_i915_gem_caching)
180 #define LOCAL_DRM_IOCTL_I915_GEM_GET_CACHEING \
181 DRM_IOWR(DRM_COMMAND_BASE + LOCAL_DRM_I915_GEM_GET_CACHEING, struct local_drm_i915_gem_caching)
185 * @fd: open i915 drm file descriptor
186 * @handle: gem buffer object handle
187 * @caching: caching mode bits
189 * This wraps the SET_CACHING ioctl. Note that this function internally calls
190 * igt_require() when SET_CACHING isn't available, hence automatically skips the
191 * test. Therefore always extract test logic which uses this into its own
194 void gem_set_caching(int fd, uint32_t handle, uint32_t caching)
196 struct local_drm_i915_gem_caching arg;
199 memset(&arg, 0, sizeof(arg));
201 arg.caching = caching;
202 ret = ioctl(fd, LOCAL_DRM_IOCTL_I915_GEM_SET_CACHEING, &arg);
204 igt_assert(ret == 0 || (errno == ENOTTY || errno == EINVAL));
205 igt_require(ret == 0);
211 * @fd: open i915 drm file descriptor
212 * @handle: gem buffer object handle
214 * This wraps the GET_CACHING ioctl.
216 * Returns: The current caching mode bits.
218 uint32_t gem_get_caching(int fd, uint32_t handle)
220 struct local_drm_i915_gem_caching arg;
225 ret = ioctl(fd, LOCAL_DRM_IOCTL_I915_GEM_GET_CACHEING, &arg);
226 igt_assert(ret == 0);
234 * @fd: open i915 drm file descriptor
235 * @name: flink buffer name
237 * This wraps the GEM_OPEN ioctl, which is used to import an flink name.
239 * Returns: gem file-private buffer handle of the open object.
241 uint32_t gem_open(int fd, uint32_t name)
243 struct drm_gem_open open_struct;
246 memset(&open_struct, 0, sizeof(open_struct));
247 open_struct.name = name;
248 ret = ioctl(fd, DRM_IOCTL_GEM_OPEN, &open_struct);
249 igt_assert(ret == 0);
250 igt_assert(open_struct.handle != 0);
253 return open_struct.handle;
258 * @fd: open i915 drm file descriptor
259 * @handle: file-private gem buffer object handle
261 * This wraps the GEM_FLINK ioctl, which is used to export a gem buffer object
262 * into the device-global flink namespace. See gem_open() for opening such a
263 * buffer name on a different i915 drm file descriptor.
265 * Returns: The created flink buffer name.
267 uint32_t gem_flink(int fd, uint32_t handle)
269 struct drm_gem_flink flink;
272 memset(&flink, 0, sizeof(flink));
273 flink.handle = handle;
274 ret = ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
275 igt_assert(ret == 0);
283 * @fd: open i915 drm file descriptor
284 * @handle: gem buffer object handle
286 * This wraps the GEM_CLOSE ioctl, which to release a file-private gem buffer
289 void gem_close(int fd, uint32_t handle)
291 struct drm_gem_close close_bo;
293 memset(&close_bo, 0, sizeof(close_bo));
294 close_bo.handle = handle;
295 do_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
300 * @fd: open i915 drm file descriptor
301 * @handle: gem buffer object handle
302 * @offset: offset within the buffer of the subrange
303 * @buf: pointer to the data to write into the buffer
304 * @length: size of the subrange
306 * This wraps the PWRITE ioctl, which is to upload a linear data to a subrange
307 * of a gem buffer object.
309 void gem_write(int fd, uint32_t handle, uint32_t offset, const void *buf, uint32_t length)
311 struct drm_i915_gem_pwrite gem_pwrite;
313 memset(&gem_pwrite, 0, sizeof(gem_pwrite));
314 gem_pwrite.handle = handle;
315 gem_pwrite.offset = offset;
316 gem_pwrite.size = length;
317 gem_pwrite.data_ptr = (uintptr_t)buf;
318 do_ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite);
323 * @fd: open i915 drm file descriptor
324 * @handle: gem buffer object handle
325 * @offset: offset within the buffer of the subrange
326 * @buf: pointer to the data to read into
327 * @length: size of the subrange
329 * This wraps the PREAD ioctl, which is to download a linear data to a subrange
330 * of a gem buffer object.
332 void gem_read(int fd, uint32_t handle, uint32_t offset, void *buf, uint32_t length)
334 struct drm_i915_gem_pread gem_pread;
336 memset(&gem_pread, 0, sizeof(gem_pread));
337 gem_pread.handle = handle;
338 gem_pread.offset = offset;
339 gem_pread.size = length;
340 gem_pread.data_ptr = (uintptr_t)buf;
341 do_ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &gem_pread);
346 * @fd: open i915 drm file descriptor
347 * @handle: gem buffer object handle
348 * @read_domains: gem domain bits for read access
349 * @write_domain: gem domain bit for write access
351 * This wraps the SET_DOMAIN ioctl, which is used to control the coherency of
352 * the gem buffer object between the cpu and gtt mappings. It is also use to
353 * synchronize with outstanding rendering in general, but for that use-case
354 * please have a look at gem_sync().
356 void gem_set_domain(int fd, uint32_t handle,
357 uint32_t read_domains, uint32_t write_domain)
359 struct drm_i915_gem_set_domain set_domain;
361 memset(&set_domain, 0, sizeof(set_domain));
362 set_domain.handle = handle;
363 set_domain.read_domains = read_domains;
364 set_domain.write_domain = write_domain;
366 do_ioctl(fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
371 * @fd: open i915 drm file descriptor
372 * @handle: gem buffer object handle
374 * This is a wrapper around gem_set_domain() which simply blocks for any
375 * outstanding rendering to complete.
377 void gem_sync(int fd, uint32_t handle)
379 gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
382 uint32_t __gem_create(int fd, int size)
384 struct drm_i915_gem_create create;
387 memset(&create, 0, sizeof(create));
390 ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
396 return create.handle;
401 * @fd: open i915 drm file descriptor
402 * @size: desired size of the buffer
404 * This wraps the GEM_CREATE ioctl, which allocates a new gem buffer object of
407 * Returns: The file-private handle of the created buffer object
409 uint32_t gem_create(int fd, int size)
411 struct drm_i915_gem_create create;
413 memset(&create, 0, sizeof(create));
416 do_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
417 igt_assert(create.handle);
419 return create.handle;
424 * @fd: open i915 drm file descriptor
425 * @execbuf: execbuffer data structure
427 * This wraps the EXECBUFFER2 ioctl, which submits a batchbuffer for the gpu to
430 void gem_execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
435 DRM_IOCTL_I915_GEM_EXECBUFFER2,
437 igt_assert(ret == 0);
443 * @fd: open i915 drm file descriptor
444 * @handle: gem buffer object handle
445 * @size: size of the gem buffer
446 * @prot: memory protection bits as used by mmap()
448 * This functions wraps up procedure to establish a memory mapping through the
451 * Returns: A pointer to the created memory mapping.
453 void *gem_mmap__gtt(int fd, uint32_t handle, int size, int prot)
455 struct drm_i915_gem_mmap_gtt mmap_arg;
458 memset(&mmap_arg, 0, sizeof(mmap_arg));
459 mmap_arg.handle = handle;
460 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg))
463 ptr = mmap64(0, size, prot, MAP_SHARED, fd, mmap_arg.offset);
464 if (ptr == MAP_FAILED)
472 * @fd: open i915 drm file descriptor
473 * @handle: gem buffer object handle
474 * @size: size of the gem buffer
475 * @prot: memory protection bits as used by mmap()
477 * This functions wraps up procedure to establish a memory mapping through
478 * direct cpu access, bypassing the gpu completely.
480 * Returns: A pointer to the created memory mapping.
482 void *gem_mmap__cpu(int fd, uint32_t handle, int size, int prot)
484 struct drm_i915_gem_mmap mmap_arg;
486 memset(&mmap_arg, 0, sizeof(mmap_arg));
487 mmap_arg.handle = handle;
489 mmap_arg.size = size;
490 if (drmIoctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg))
494 return (void *)(uintptr_t)mmap_arg.addr_ptr;
499 * @fd: open i915 drm file descriptor
500 * @handle: gem buffer object handle
501 * @state: desired madvise state
503 * This is a wraps the MADVISE ioctl, which is used in libdrm to implement
504 * opportunistic buffer object caching. Objects in the cache are set to DONTNEED
505 * (internally in the kernel tracked as purgeable objects). When such a cached
506 * object is in need again it must be set back to WILLNEED before first use.
508 * Returns: When setting the madvise state to WILLNEED this returns whether the
509 * backing storage was still avialable or not.
511 int gem_madvise(int fd, uint32_t handle, int state)
513 struct drm_i915_gem_madvise madv;
515 memset(&madv, 0, sizeof(madv));
516 madv.handle = handle;
519 do_ioctl(fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
521 return madv.retained;
525 * gem_context_create:
526 * @fd: open i915 drm file descriptor
528 * This is a wraps the CONTEXT_CREATE ioctl, which is used to allocate a new
529 * hardware context. Not that similarly to gem_set_caching() this wrapper calls
530 * igt_require() internally to correctly skip on kernels and platforms where hw
531 * context support is not available.
533 * Returns: The id of the allocated hw context.
535 uint32_t gem_context_create(int fd)
537 struct drm_i915_gem_context_create create;
540 memset(&create, 0, sizeof(create));
541 ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
542 igt_require(ret == 0 || (errno != ENODEV && errno != EINVAL));
543 igt_assert(ret == 0);
546 return create.ctx_id;
551 * @fd: open i915 drm file descriptor
552 * @handle: gem buffer object handle
554 * This is a wraps the SW_FINISH ioctl, which is used to flush out frontbuffer
555 * rendering done through the direct cpu memory mappings. Shipping userspace
556 * does _not_ call this after frontbuffer rendering through gtt memory mappings.
558 void gem_sw_finish(int fd, uint32_t handle)
560 struct drm_i915_gem_sw_finish finish;
562 memset(&finish, 0, sizeof(finish));
563 finish.handle = handle;
565 do_ioctl(fd, DRM_IOCTL_I915_GEM_SW_FINISH, &finish);
570 * @fd: open i915 drm file descriptor
571 * @handle: gem buffer object handle
573 * This is a wraps the BUSY ioctl, which tells whether a buffer object is still
574 * actively used by the gpu in a execbuffer.
576 * Returns: The busy state of the buffer object.
578 bool gem_bo_busy(int fd, uint32_t handle)
580 struct drm_i915_gem_busy busy;
582 memset(&busy, 0, sizeof(busy));
583 busy.handle = handle;
585 do_ioctl(fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
591 /* feature test helpers */
594 * gem_uses_aliasing_ppgtt:
595 * @fd: open i915 drm file descriptor
597 * Feature test macro to check whether the kernel internally uses ppgtt to
598 * execute batches. The /aliasing/ in the function name is a bit a misnomer,
599 * this driver parameter is also true when full ppgtt address spaces are
600 * availabel since for batchbuffer construction only ppgtt or global gtt is
603 * Returns: Whether batches are run through ppgtt.
605 bool gem_uses_aliasing_ppgtt(int fd)
607 struct drm_i915_getparam gp;
610 memset(&gp, 0, sizeof(gp));
611 gp.param = 18; /* HAS_ALIASING_PPGTT */
614 if (ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp)))
622 * gem_uses_aliasing_ppgtt:
623 * @fd: open i915 drm file descriptor
625 * Feature test macro to query the kernel for the number of available fences
626 * useable in a batchbuffer. Only relevant for pre-gen4.
628 * Returns: The number of available fences.
630 int gem_available_fences(int fd)
632 struct drm_i915_getparam gp;
635 memset(&gp, 0, sizeof(gp));
636 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
639 if (ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp)))
648 * @fd: open i915 drm file descriptor
650 * Feature test macro to query the number of avaible rings. This is useful in
651 * test loops which need to step through all rings and similar logic.
653 * For more explicit tests of ring availability see gem_has_enable_ring() and
654 * the ring specific versions like gem_has_bsd().
656 * Returns: The number of available rings.
658 int gem_get_num_rings(int fd)
660 int num_rings = 1; /* render ring is always available */
672 if (gem_has_vebox(fd))
683 * gem_has_enable_ring:
684 * @fd: open i915 drm file descriptor
685 * @param: ring flag bit as used in gem_execbuf()
687 * Feature test macro to query whether a specific ring is available.
689 * Returns: Whether the ring is avaible or not.
691 bool gem_has_enable_ring(int fd,int param)
693 drm_i915_getparam_t gp;
696 memset(&gp, 0, sizeof(gp));
700 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
709 * @fd: open i915 drm file descriptor
711 * Feature test macro to query whether the BSD ring is available. This is simply
712 * a specific version of gem_has_enable_ring() for the BSD ring.
714 * Note that recent Bspec calls this the VCS ring for Video Command Submission.
716 * Returns: Whether the BSD ring is avaible or not.
718 bool gem_has_bsd(int fd)
720 return gem_has_enable_ring(fd,I915_PARAM_HAS_BSD);
725 * @fd: open i915 drm file descriptor
727 * Feature test macro to query whether the blitter ring is available. This is simply
728 * a specific version of gem_has_enable_ring() for the blitter ring.
730 * Note that recent Bspec calls this the BCS ring for Blitter Command Submission.
732 * Returns: Whether the blitter ring is avaible or not.
734 bool gem_has_blt(int fd)
736 return gem_has_enable_ring(fd,I915_PARAM_HAS_BLT);
739 #define LOCAL_I915_PARAM_HAS_VEBOX 22
742 * @fd: open i915 drm file descriptor
744 * Feature test macro to query whether the vebox ring is available. This is simply
745 * a specific version of gem_has_enable_ring() for the vebox ring.
747 * Note that recent Bspec calls this the VECS ring for Video Enhancement Command
750 * Returns: Whether the vebox ring is avaible or not.
752 bool gem_has_vebox(int fd)
754 return gem_has_enable_ring(fd,LOCAL_I915_PARAM_HAS_VEBOX);
758 * gem_available_aperture_size:
759 * @fd: open i915 drm file descriptor
761 * Feature test macro to query the kernel for the available gpu aperture size
762 * useable in a batchbuffer.
764 * Returns: The available gtt address space size.
766 uint64_t gem_available_aperture_size(int fd)
768 struct drm_i915_gem_get_aperture aperture;
770 memset(&aperture, 0, sizeof(aperture));
771 aperture.aper_size = 256*1024*1024;
772 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
774 return aperture.aper_available_size;
779 * @fd: open i915 drm file descriptor
781 * Feature test macro to query the kernel for the total gpu aperture size.
783 * Returns: The total gtt address space size.
785 uint64_t gem_aperture_size(int fd)
787 struct drm_i915_gem_get_aperture aperture;
789 memset(&aperture, 0, sizeof(aperture));
790 aperture.aper_size = 256*1024*1024;
791 do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
793 return aperture.aper_size;
798 * @fd: open i915 drm file descriptor
800 * Feature test macro to query the kernel for the mappable gpu aperture size.
801 * This is the area avaialble for GTT memory mappings.
803 * Returns: The mappable gtt address space size.
805 uint64_t gem_mappable_aperture_size(void)
807 struct pci_device *pci_dev = intel_get_pci_device();
810 if (intel_gen(pci_dev->device_id) < 3)
815 return pci_dev->regions[bar].size;
819 * gem_require_caching:
820 * @fd: open i915 drm file descriptor
822 * Feature test macro to query whether buffer object caching control is
823 * available. Automatically skips through igt_require() if not.
825 void gem_require_caching(int fd)
827 struct local_drm_i915_gem_caching arg;
830 memset(&arg, 0, sizeof(arg));
831 arg.handle = gem_create(fd, 4096);
832 igt_assert(arg.handle != 0);
835 ret = ioctl(fd, LOCAL_DRM_IOCTL_I915_GEM_SET_CACHEING, &arg);
836 gem_close(fd, arg.handle);
838 igt_require(ret == 0);
844 * @fd: open i915 drm file descriptor
845 * @ring_id: ring flag bit as used in gem_execbuf()
847 * Feature test macro to query whether a specific ring is available.
848 * In contrast to gem_has_enable_ring() this automagically skips if the ring
849 * isn't available by calling igt_require().
851 void gem_require_ring(int fd, int ring_id)
854 case I915_EXEC_RENDER:
857 igt_require(HAS_BLT_RING(intel_get_drm_devid(fd)));
860 igt_require(HAS_BSD_RING(intel_get_drm_devid(fd)));
862 #ifdef I915_EXEC_VEBOX
863 case I915_EXEC_VEBOX:
864 igt_require(gem_has_vebox(fd));
876 * prime_handle_to_fd:
877 * @fd: open i915 drm file descriptor
878 * @handle: file-private gem buffer object handle
880 * This wraps the PRIME_HANDLE_TO_FD ioctl, which is used to export a gem buffer
881 * object into a global (i.e. potentially cross-device) dma-buf file-descriptor
884 * Returns: The created dma-buf fd handle.
886 int prime_handle_to_fd(int fd, uint32_t handle)
888 struct drm_prime_handle args;
890 memset(&args, 0, sizeof(args));
891 args.handle = handle;
892 args.flags = DRM_CLOEXEC;
895 do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
901 * prime_fd_to_handle:
902 * @fd: open i915 drm file descriptor
903 * @dma_buf_fd: dma-buf fd handle
905 * This wraps the PRIME_FD_TO_HANDLE ioctl, which is used to import a dma-buf
906 * file-descriptor into a gem buffer object.
908 * Returns: The created gem buffer object handle.
910 uint32_t prime_fd_to_handle(int fd, int dma_buf_fd)
912 struct drm_prime_handle args;
914 memset(&args, 0, sizeof(args));
915 args.fd = dma_buf_fd;
919 do_ioctl(fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
926 * @dma_buf_fd: dma-buf fd handle
928 * This wraps the lseek() protocol used to query the invariant size of a
929 * dma-buf. Not all kernels support this, which is check with igt_require() and
930 * so will result in automagic test skipping.
932 * Returns: The lifetime-invariant size of the dma-buf object.
934 off_t prime_get_size(int dma_buf_fd)
938 ret = lseek(dma_buf_fd, 0, SEEK_END);
939 igt_assert(ret >= 0 || errno == ESPIPE);
940 igt_require(ret >= 0);