expose timeline signal/export/import interfaces v2
[platform/upstream/libdrm.git] / amdgpu / amdgpu_bo.c
1 /*
2  * Copyright © 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24
25 #include <stdlib.h>
26 #include <stdio.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <unistd.h>
32 #include <sys/ioctl.h>
33 #include <sys/mman.h>
34 #include <sys/time.h>
35
36 #include "libdrm_macros.h"
37 #include "xf86drm.h"
38 #include "amdgpu_drm.h"
39 #include "amdgpu_internal.h"
40 #include "util_math.h"
41
42 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
43                                      uint32_t handle)
44 {
45         struct drm_gem_close args = {};
46
47         args.handle = handle;
48         drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
49 }
50
51 static int amdgpu_bo_create(amdgpu_device_handle dev,
52                             uint64_t size,
53                             uint32_t handle,
54                             amdgpu_bo_handle *buf_handle)
55 {
56         struct amdgpu_bo *bo;
57
58         bo = calloc(1, sizeof(struct amdgpu_bo));
59         if (!bo)
60                 return -ENOMEM;
61
62         atomic_set(&bo->refcount, 1);
63         bo->dev = dev;
64         bo->alloc_size = size;
65         bo->handle = handle;
66         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
67
68         *buf_handle = bo;
69         return 0;
70 }
71
72 drm_public int amdgpu_bo_alloc(amdgpu_device_handle dev,
73                                struct amdgpu_bo_alloc_request *alloc_buffer,
74                                amdgpu_bo_handle *buf_handle)
75 {
76         union drm_amdgpu_gem_create args;
77         int r;
78
79         memset(&args, 0, sizeof(args));
80         args.in.bo_size = alloc_buffer->alloc_size;
81         args.in.alignment = alloc_buffer->phys_alignment;
82
83         /* Set the placement. */
84         args.in.domains = alloc_buffer->preferred_heap;
85         args.in.domain_flags = alloc_buffer->flags;
86
87         /* Allocate the buffer with the preferred heap. */
88         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
89                                 &args, sizeof(args));
90         if (r)
91                 goto out;
92
93         r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle,
94                              buf_handle);
95         if (r) {
96                 amdgpu_close_kms_handle(dev, args.out.handle);
97                 goto out;
98         }
99
100         pthread_mutex_lock(&dev->bo_table_mutex);
101         r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
102                                 *buf_handle);
103         pthread_mutex_unlock(&dev->bo_table_mutex);
104         if (r)
105                 amdgpu_bo_free(*buf_handle);
106 out:
107         return r;
108 }
109
110 drm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
111                                       struct amdgpu_bo_metadata *info)
112 {
113         struct drm_amdgpu_gem_metadata args = {};
114
115         args.handle = bo->handle;
116         args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
117         args.data.flags = info->flags;
118         args.data.tiling_info = info->tiling_info;
119
120         if (info->size_metadata > sizeof(args.data.data))
121                 return -EINVAL;
122
123         if (info->size_metadata) {
124                 args.data.data_size_bytes = info->size_metadata;
125                 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
126         }
127
128         return drmCommandWriteRead(bo->dev->fd,
129                                    DRM_AMDGPU_GEM_METADATA,
130                                    &args, sizeof(args));
131 }
132
133 drm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo,
134                                     struct amdgpu_bo_info *info)
135 {
136         struct drm_amdgpu_gem_metadata metadata = {};
137         struct drm_amdgpu_gem_create_in bo_info = {};
138         struct drm_amdgpu_gem_op gem_op = {};
139         int r;
140
141         /* Validate the BO passed in */
142         if (!bo->handle)
143                 return -EINVAL;
144
145         /* Query metadata. */
146         metadata.handle = bo->handle;
147         metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
148
149         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
150                                 &metadata, sizeof(metadata));
151         if (r)
152                 return r;
153
154         if (metadata.data.data_size_bytes >
155             sizeof(info->metadata.umd_metadata))
156                 return -EINVAL;
157
158         /* Query buffer info. */
159         gem_op.handle = bo->handle;
160         gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
161         gem_op.value = (uintptr_t)&bo_info;
162
163         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
164                                 &gem_op, sizeof(gem_op));
165         if (r)
166                 return r;
167
168         memset(info, 0, sizeof(*info));
169         info->alloc_size = bo_info.bo_size;
170         info->phys_alignment = bo_info.alignment;
171         info->preferred_heap = bo_info.domains;
172         info->alloc_flags = bo_info.domain_flags;
173         info->metadata.flags = metadata.data.flags;
174         info->metadata.tiling_info = metadata.data.tiling_info;
175
176         info->metadata.size_metadata = metadata.data.data_size_bytes;
177         if (metadata.data.data_size_bytes > 0)
178                 memcpy(info->metadata.umd_metadata, metadata.data.data,
179                        metadata.data.data_size_bytes);
180
181         return 0;
182 }
183
184 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
185 {
186         struct drm_gem_flink flink;
187         int fd, dma_fd;
188         uint32_t handle;
189         int r;
190
191         fd = bo->dev->fd;
192         handle = bo->handle;
193         if (bo->flink_name)
194                 return 0;
195
196
197         if (bo->dev->flink_fd != bo->dev->fd) {
198                 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
199                                        &dma_fd);
200                 if (!r) {
201                         r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
202                         close(dma_fd);
203                 }
204                 if (r)
205                         return r;
206                 fd = bo->dev->flink_fd;
207         }
208         memset(&flink, 0, sizeof(flink));
209         flink.handle = handle;
210
211         r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
212         if (r)
213                 return r;
214
215         bo->flink_name = flink.name;
216
217         if (bo->dev->flink_fd != bo->dev->fd) {
218                 struct drm_gem_close args = {};
219                 args.handle = handle;
220                 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
221         }
222
223         pthread_mutex_lock(&bo->dev->bo_table_mutex);
224         r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo);
225         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
226
227         return r;
228 }
229
230 drm_public int amdgpu_bo_export(amdgpu_bo_handle bo,
231                                 enum amdgpu_bo_handle_type type,
232                                 uint32_t *shared_handle)
233 {
234         int r;
235
236         switch (type) {
237         case amdgpu_bo_handle_type_gem_flink_name:
238                 r = amdgpu_bo_export_flink(bo);
239                 if (r)
240                         return r;
241
242                 *shared_handle = bo->flink_name;
243                 return 0;
244
245         case amdgpu_bo_handle_type_kms:
246         case amdgpu_bo_handle_type_kms_noimport:
247                 *shared_handle = bo->handle;
248                 return 0;
249
250         case amdgpu_bo_handle_type_dma_buf_fd:
251                 return drmPrimeHandleToFD(bo->dev->fd, bo->handle,
252                                           DRM_CLOEXEC | DRM_RDWR,
253                                           (int*)shared_handle);
254         }
255         return -EINVAL;
256 }
257
258 drm_public int amdgpu_bo_import(amdgpu_device_handle dev,
259                                 enum amdgpu_bo_handle_type type,
260                                 uint32_t shared_handle,
261                      struct amdgpu_bo_import_result *output)
262 {
263         struct drm_gem_open open_arg = {};
264         struct drm_gem_close close_arg = {};
265         struct amdgpu_bo *bo = NULL;
266         uint32_t handle = 0, flink_name = 0;
267         uint64_t alloc_size = 0;
268         int r = 0;
269         int dma_fd;
270         uint64_t dma_buf_size = 0;
271
272         /* We must maintain a list of pairs <handle, bo>, so that we always
273          * return the same amdgpu_bo instance for the same handle. */
274         pthread_mutex_lock(&dev->bo_table_mutex);
275
276         /* Convert a DMA buf handle to a KMS handle now. */
277         if (type == amdgpu_bo_handle_type_dma_buf_fd) {
278                 off_t size;
279
280                 /* Get a KMS handle. */
281                 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
282                 if (r)
283                         goto unlock;
284
285                 /* Query the buffer size. */
286                 size = lseek(shared_handle, 0, SEEK_END);
287                 if (size == (off_t)-1) {
288                         r = -errno;
289                         goto free_bo_handle;
290                 }
291                 lseek(shared_handle, 0, SEEK_SET);
292
293                 dma_buf_size = size;
294                 shared_handle = handle;
295         }
296
297         /* If we have already created a buffer with this handle, find it. */
298         switch (type) {
299         case amdgpu_bo_handle_type_gem_flink_name:
300                 bo = handle_table_lookup(&dev->bo_flink_names, shared_handle);
301                 break;
302
303         case amdgpu_bo_handle_type_dma_buf_fd:
304                 bo = handle_table_lookup(&dev->bo_handles, shared_handle);
305                 break;
306
307         case amdgpu_bo_handle_type_kms:
308         case amdgpu_bo_handle_type_kms_noimport:
309                 /* Importing a KMS handle in not allowed. */
310                 r = -EPERM;
311                 goto unlock;
312
313         default:
314                 r = -EINVAL;
315                 goto unlock;
316         }
317
318         if (bo) {
319                 /* The buffer already exists, just bump the refcount. */
320                 atomic_inc(&bo->refcount);
321                 pthread_mutex_unlock(&dev->bo_table_mutex);
322
323                 output->buf_handle = bo;
324                 output->alloc_size = bo->alloc_size;
325                 return 0;
326         }
327
328         /* Open the handle. */
329         switch (type) {
330         case amdgpu_bo_handle_type_gem_flink_name:
331                 open_arg.name = shared_handle;
332                 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
333                 if (r)
334                         goto unlock;
335
336                 flink_name = shared_handle;
337                 handle = open_arg.handle;
338                 alloc_size = open_arg.size;
339                 if (dev->flink_fd != dev->fd) {
340                         r = drmPrimeHandleToFD(dev->flink_fd, handle,
341                                                DRM_CLOEXEC, &dma_fd);
342                         if (r)
343                                 goto free_bo_handle;
344                         r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle);
345                         close(dma_fd);
346                         if (r)
347                                 goto free_bo_handle;
348                         close_arg.handle = open_arg.handle;
349                         r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE,
350                                      &close_arg);
351                         if (r)
352                                 goto free_bo_handle;
353                 }
354                 break;
355
356         case amdgpu_bo_handle_type_dma_buf_fd:
357                 handle = shared_handle;
358                 alloc_size = dma_buf_size;
359                 break;
360
361         case amdgpu_bo_handle_type_kms:
362         case amdgpu_bo_handle_type_kms_noimport:
363                 assert(0); /* unreachable */
364         }
365
366         /* Initialize it. */
367         r = amdgpu_bo_create(dev, alloc_size, handle, &bo);
368         if (r)
369                 goto free_bo_handle;
370
371         r = handle_table_insert(&dev->bo_handles, bo->handle, bo);
372         if (r)
373                 goto free_bo_handle;
374         if (flink_name) {
375                 bo->flink_name = flink_name;
376                 r = handle_table_insert(&dev->bo_flink_names, flink_name,
377                                         bo);
378                 if (r)
379                         goto remove_handle;
380
381         }
382
383         output->buf_handle = bo;
384         output->alloc_size = bo->alloc_size;
385         pthread_mutex_unlock(&dev->bo_table_mutex);
386         return 0;
387
388 remove_handle:
389         handle_table_remove(&dev->bo_handles, bo->handle);
390 free_bo_handle:
391         if (flink_name && !close_arg.handle && open_arg.handle) {
392                 close_arg.handle = open_arg.handle;
393                 drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &close_arg);
394         }
395         if (bo)
396                 amdgpu_bo_free(bo);
397         else
398                 amdgpu_close_kms_handle(dev, handle);
399 unlock:
400         pthread_mutex_unlock(&dev->bo_table_mutex);
401         return r;
402 }
403
404 drm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
405 {
406         struct amdgpu_device *dev;
407         struct amdgpu_bo *bo = buf_handle;
408
409         assert(bo != NULL);
410         dev = bo->dev;
411         pthread_mutex_lock(&dev->bo_table_mutex);
412
413         if (update_references(&bo->refcount, NULL)) {
414                 /* Remove the buffer from the hash tables. */
415                 handle_table_remove(&dev->bo_handles, bo->handle);
416
417                 if (bo->flink_name)
418                         handle_table_remove(&dev->bo_flink_names,
419                                             bo->flink_name);
420
421                 /* Release CPU access. */
422                 if (bo->cpu_map_count > 0) {
423                         bo->cpu_map_count = 1;
424                         amdgpu_bo_cpu_unmap(bo);
425                 }
426
427                 amdgpu_close_kms_handle(dev, bo->handle);
428                 pthread_mutex_destroy(&bo->cpu_access_mutex);
429                 free(bo);
430         }
431
432         pthread_mutex_unlock(&dev->bo_table_mutex);
433         return 0;
434 }
435
436 drm_public void amdgpu_bo_inc_ref(amdgpu_bo_handle bo)
437 {
438         atomic_inc(&bo->refcount);
439 }
440
441 drm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
442 {
443         union drm_amdgpu_gem_mmap args;
444         void *ptr;
445         int r;
446
447         pthread_mutex_lock(&bo->cpu_access_mutex);
448
449         if (bo->cpu_ptr) {
450                 /* already mapped */
451                 assert(bo->cpu_map_count > 0);
452                 bo->cpu_map_count++;
453                 *cpu = bo->cpu_ptr;
454                 pthread_mutex_unlock(&bo->cpu_access_mutex);
455                 return 0;
456         }
457
458         assert(bo->cpu_map_count == 0);
459
460         memset(&args, 0, sizeof(args));
461
462         /* Query the buffer address (args.addr_ptr).
463          * The kernel driver ignores the offset and size parameters. */
464         args.in.handle = bo->handle;
465
466         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
467                                 sizeof(args));
468         if (r) {
469                 pthread_mutex_unlock(&bo->cpu_access_mutex);
470                 return r;
471         }
472
473         /* Map the buffer. */
474         ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
475                        bo->dev->fd, args.out.addr_ptr);
476         if (ptr == MAP_FAILED) {
477                 pthread_mutex_unlock(&bo->cpu_access_mutex);
478                 return -errno;
479         }
480
481         bo->cpu_ptr = ptr;
482         bo->cpu_map_count = 1;
483         pthread_mutex_unlock(&bo->cpu_access_mutex);
484
485         *cpu = ptr;
486         return 0;
487 }
488
489 drm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
490 {
491         int r;
492
493         pthread_mutex_lock(&bo->cpu_access_mutex);
494         assert(bo->cpu_map_count >= 0);
495
496         if (bo->cpu_map_count == 0) {
497                 /* not mapped */
498                 pthread_mutex_unlock(&bo->cpu_access_mutex);
499                 return -EINVAL;
500         }
501
502         bo->cpu_map_count--;
503         if (bo->cpu_map_count > 0) {
504                 /* mapped multiple times */
505                 pthread_mutex_unlock(&bo->cpu_access_mutex);
506                 return 0;
507         }
508
509         r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
510         bo->cpu_ptr = NULL;
511         pthread_mutex_unlock(&bo->cpu_access_mutex);
512         return r;
513 }
514
515 drm_public int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
516                                 struct amdgpu_buffer_size_alignments *info)
517 {
518         info->size_local = dev->dev_info.pte_fragment_size;
519         info->size_remote = dev->dev_info.gart_page_size;
520         return 0;
521 }
522
523 drm_public int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
524                                        uint64_t timeout_ns,
525                             bool *busy)
526 {
527         union drm_amdgpu_gem_wait_idle args;
528         int r;
529
530         memset(&args, 0, sizeof(args));
531         args.in.handle = bo->handle;
532         args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
533
534         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
535                                 &args, sizeof(args));
536
537         if (r == 0) {
538                 *busy = args.out.status;
539                 return 0;
540         } else {
541                 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
542                 return r;
543         }
544 }
545
546 drm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
547                                              void *cpu,
548                                              uint64_t size,
549                                              amdgpu_bo_handle *buf_handle,
550                                              uint64_t *offset_in_bo)
551 {
552         struct amdgpu_bo *bo;
553         uint32_t i;
554         int r = 0;
555
556         if (cpu == NULL || size == 0)
557                 return -EINVAL;
558
559         /*
560          * Workaround for a buggy application which tries to import previously
561          * exposed CPU pointers. If we find a real world use case we should
562          * improve that by asking the kernel for the right handle.
563          */
564         pthread_mutex_lock(&dev->bo_table_mutex);
565         for (i = 0; i < dev->bo_handles.max_key; i++) {
566                 bo = handle_table_lookup(&dev->bo_handles, i);
567                 if (!bo || !bo->cpu_ptr || size > bo->alloc_size)
568                         continue;
569                 if (cpu >= bo->cpu_ptr &&
570                     cpu < (void*)((uintptr_t)bo->cpu_ptr + bo->alloc_size))
571                         break;
572         }
573
574         if (i < dev->bo_handles.max_key) {
575                 atomic_inc(&bo->refcount);
576                 *buf_handle = bo;
577                 *offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr;
578         } else {
579                 *buf_handle = NULL;
580                 *offset_in_bo = 0;
581                 r = -ENXIO;
582         }
583         pthread_mutex_unlock(&dev->bo_table_mutex);
584
585         return r;
586 }
587
588 drm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
589                                               void *cpu,
590                                               uint64_t size,
591                                               amdgpu_bo_handle *buf_handle)
592 {
593         int r;
594         struct drm_amdgpu_gem_userptr args;
595
596         args.addr = (uintptr_t)cpu;
597         args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER |
598                 AMDGPU_GEM_USERPTR_VALIDATE;
599         args.size = size;
600         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
601                                 &args, sizeof(args));
602         if (r)
603                 goto out;
604
605         r = amdgpu_bo_create(dev, size, args.handle, buf_handle);
606         if (r) {
607                 amdgpu_close_kms_handle(dev, args.handle);
608                 goto out;
609         }
610
611         pthread_mutex_lock(&dev->bo_table_mutex);
612         r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle,
613                                 *buf_handle);
614         pthread_mutex_unlock(&dev->bo_table_mutex);
615         if (r)
616                 amdgpu_bo_free(*buf_handle);
617 out:
618         return r;
619 }
620
621 drm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev,
622                                          uint32_t number_of_buffers,
623                                          struct drm_amdgpu_bo_list_entry *buffers,
624                                          uint32_t *result)
625 {
626         union drm_amdgpu_bo_list args;
627         int r;
628
629         memset(&args, 0, sizeof(args));
630         args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
631         args.in.bo_number = number_of_buffers;
632         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
633         args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers;
634
635         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
636                                 &args, sizeof(args));
637         if (!r)
638                 *result = args.out.list_handle;
639         return r;
640 }
641
642 drm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev,
643                                           uint32_t bo_list)
644 {
645         union drm_amdgpu_bo_list args;
646
647         memset(&args, 0, sizeof(args));
648         args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
649         args.in.list_handle = bo_list;
650
651         return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
652                                    &args, sizeof(args));
653 }
654
655 drm_public int amdgpu_bo_list_create(amdgpu_device_handle dev,
656                                      uint32_t number_of_resources,
657                                      amdgpu_bo_handle *resources,
658                                      uint8_t *resource_prios,
659                                      amdgpu_bo_list_handle *result)
660 {
661         struct drm_amdgpu_bo_list_entry *list;
662         union drm_amdgpu_bo_list args;
663         unsigned i;
664         int r;
665
666         if (!number_of_resources)
667                 return -EINVAL;
668
669         /* overflow check for multiplication */
670         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
671                 return -EINVAL;
672
673         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
674         if (!list)
675                 return -ENOMEM;
676
677         *result = malloc(sizeof(struct amdgpu_bo_list));
678         if (!*result) {
679                 free(list);
680                 return -ENOMEM;
681         }
682
683         memset(&args, 0, sizeof(args));
684         args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
685         args.in.bo_number = number_of_resources;
686         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
687         args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
688
689         for (i = 0; i < number_of_resources; i++) {
690                 list[i].bo_handle = resources[i]->handle;
691                 if (resource_prios)
692                         list[i].bo_priority = resource_prios[i];
693                 else
694                         list[i].bo_priority = 0;
695         }
696
697         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
698                                 &args, sizeof(args));
699         free(list);
700         if (r) {
701                 free(*result);
702                 return r;
703         }
704
705         (*result)->dev = dev;
706         (*result)->handle = args.out.list_handle;
707         return 0;
708 }
709
710 drm_public int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
711 {
712         union drm_amdgpu_bo_list args;
713         int r;
714
715         memset(&args, 0, sizeof(args));
716         args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
717         args.in.list_handle = list->handle;
718
719         r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
720                                 &args, sizeof(args));
721
722         if (!r)
723                 free(list);
724
725         return r;
726 }
727
728 drm_public int amdgpu_bo_list_update(amdgpu_bo_list_handle handle,
729                                      uint32_t number_of_resources,
730                                      amdgpu_bo_handle *resources,
731                                      uint8_t *resource_prios)
732 {
733         struct drm_amdgpu_bo_list_entry *list;
734         union drm_amdgpu_bo_list args;
735         unsigned i;
736         int r;
737
738         if (!number_of_resources)
739                 return -EINVAL;
740
741         /* overflow check for multiplication */
742         if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry))
743                 return -EINVAL;
744
745         list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry));
746         if (!list)
747                 return -ENOMEM;
748
749         args.in.operation = AMDGPU_BO_LIST_OP_UPDATE;
750         args.in.list_handle = handle->handle;
751         args.in.bo_number = number_of_resources;
752         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
753         args.in.bo_info_ptr = (uintptr_t)list;
754
755         for (i = 0; i < number_of_resources; i++) {
756                 list[i].bo_handle = resources[i]->handle;
757                 if (resource_prios)
758                         list[i].bo_priority = resource_prios[i];
759                 else
760                         list[i].bo_priority = 0;
761         }
762
763         r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST,
764                                 &args, sizeof(args));
765         free(list);
766         return r;
767 }
768
769 drm_public int amdgpu_bo_va_op(amdgpu_bo_handle bo,
770                                uint64_t offset,
771                                uint64_t size,
772                                uint64_t addr,
773                                uint64_t flags,
774                                uint32_t ops)
775 {
776         amdgpu_device_handle dev = bo->dev;
777
778         size = ALIGN(size, getpagesize());
779
780         return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr,
781                                    AMDGPU_VM_PAGE_READABLE |
782                                    AMDGPU_VM_PAGE_WRITEABLE |
783                                    AMDGPU_VM_PAGE_EXECUTABLE, ops);
784 }
785
786 drm_public int amdgpu_bo_va_op_raw(amdgpu_device_handle dev,
787                                    amdgpu_bo_handle bo,
788                                    uint64_t offset,
789                                    uint64_t size,
790                                    uint64_t addr,
791                                    uint64_t flags,
792                                    uint32_t ops)
793 {
794         struct drm_amdgpu_gem_va va;
795         int r;
796
797         if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP &&
798             ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR)
799                 return -EINVAL;
800
801         memset(&va, 0, sizeof(va));
802         va.handle = bo ? bo->handle : 0;
803         va.operation = ops;
804         va.flags = flags;
805         va.va_address = addr;
806         va.offset_in_bo = offset;
807         va.map_size = size;
808
809         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
810
811         return r;
812 }