amdgpu: validate the upper limit of virtual address v2
[platform/upstream/libdrm.git] / amdgpu / amdgpu_bo.c
1 /*
2  * Copyright © 2014 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <fcntl.h>
33 #include <unistd.h>
34 #include <sys/ioctl.h>
35 #include <sys/mman.h>
36 #include <sys/time.h>
37
38 #include "libdrm_macros.h"
39 #include "xf86drm.h"
40 #include "amdgpu_drm.h"
41 #include "amdgpu_internal.h"
42 #include "util_hash_table.h"
43
44 static void amdgpu_close_kms_handle(amdgpu_device_handle dev,
45                                      uint32_t handle)
46 {
47         struct drm_gem_close args = {};
48
49         args.handle = handle;
50         drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args);
51 }
52
53 void amdgpu_bo_free_internal(amdgpu_bo_handle bo)
54 {
55         /* Remove the buffer from the hash tables. */
56         pthread_mutex_lock(&bo->dev->bo_table_mutex);
57         util_hash_table_remove(bo->dev->bo_handles,
58                                (void*)(uintptr_t)bo->handle);
59         if (bo->flink_name) {
60                 util_hash_table_remove(bo->dev->bo_flink_names,
61                                        (void*)(uintptr_t)bo->flink_name);
62         }
63         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
64
65         /* Release CPU access. */
66         if (bo->cpu_map_count > 0) {
67                 bo->cpu_map_count = 1;
68                 amdgpu_bo_cpu_unmap(bo);
69         }
70
71         amdgpu_close_kms_handle(bo->dev, bo->handle);
72         pthread_mutex_destroy(&bo->cpu_access_mutex);
73         amdgpu_vamgr_free_va(&bo->dev->vamgr, bo->virtual_mc_base_address, bo->alloc_size);
74         free(bo);
75 }
76
77 int amdgpu_bo_alloc(amdgpu_device_handle dev,
78                     struct amdgpu_bo_alloc_request *alloc_buffer,
79                     struct amdgpu_bo_alloc_result *info)
80 {
81         struct amdgpu_bo *bo;
82         union drm_amdgpu_gem_create args;
83         unsigned heap = alloc_buffer->preferred_heap;
84         int r = 0;
85
86         /* It's an error if the heap is not specified */
87         if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
88                 return -EINVAL;
89
90         bo = calloc(1, sizeof(struct amdgpu_bo));
91         if (!bo)
92                 return -ENOMEM;
93
94         atomic_set(&bo->refcount, 1);
95         bo->dev = dev;
96         bo->alloc_size = alloc_buffer->alloc_size;
97
98         memset(&args, 0, sizeof(args));
99         args.in.bo_size = alloc_buffer->alloc_size;
100         args.in.alignment = alloc_buffer->phys_alignment;
101
102         /* Set the placement. */
103         args.in.domains = heap & AMDGPU_GEM_DOMAIN_MASK;
104         args.in.domain_flags = alloc_buffer->flags & AMDGPU_GEM_CREATE_CPU_GTT_MASK;
105
106         /* Allocate the buffer with the preferred heap. */
107         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE,
108                                 &args, sizeof(args));
109         if (r) {
110                 free(bo);
111                 return r;
112         }
113
114         bo->handle = args.out.handle;
115
116         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
117
118         /* map the buffer to the GPU virtual address space */
119         {
120                 union drm_amdgpu_gem_va va;
121
122                 memset(&va, 0, sizeof(va));
123
124                 bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr,
125                                                  alloc_buffer->alloc_size,
126                                                  alloc_buffer->phys_alignment);
127
128                 if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) {
129                         amdgpu_bo_free_internal(bo);
130                         return -ENOSPC;
131                 }
132
133                 va.in.handle = bo->handle;
134                 va.in.operation = AMDGPU_VA_OP_MAP;
135                 va.in.flags =   AMDGPU_VM_PAGE_READABLE |
136                                 AMDGPU_VM_PAGE_WRITEABLE |
137                                 AMDGPU_VM_PAGE_EXECUTABLE;
138                 va.in.va_address = bo->virtual_mc_base_address;
139
140                 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
141                 if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
142                         amdgpu_bo_free_internal(bo);
143                         return r;
144                 }
145                 pthread_mutex_lock(&dev->bo_table_mutex);
146
147                 util_hash_table_set(dev->bo_vas,
148                                     (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
149                 pthread_mutex_unlock(&dev->bo_table_mutex);
150         }
151
152         info->buf_handle = bo;
153         info->virtual_mc_base_address = bo->virtual_mc_base_address;
154         return 0;
155 }
156
157 int amdgpu_bo_set_metadata(amdgpu_bo_handle bo,
158                            struct amdgpu_bo_metadata *info)
159 {
160         struct drm_amdgpu_gem_metadata args = {};
161
162         args.handle = bo->handle;
163         args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA;
164         args.data.flags = info->flags;
165         args.data.tiling_info = info->tiling_info;
166
167         if (info->size_metadata > sizeof(args.data.data))
168                 return -EINVAL;
169
170         if (info->size_metadata) {
171                 args.data.data_size_bytes = info->size_metadata;
172                 memcpy(args.data.data, info->umd_metadata, info->size_metadata);
173         }
174
175         return drmCommandWriteRead(bo->dev->fd,
176                                    DRM_AMDGPU_GEM_METADATA,
177                                    &args, sizeof(args));
178 }
179
180 int amdgpu_bo_query_info(amdgpu_bo_handle bo,
181                          struct amdgpu_bo_info *info)
182 {
183         struct drm_amdgpu_gem_metadata metadata = {};
184         struct drm_amdgpu_gem_create_in bo_info = {};
185         struct drm_amdgpu_gem_op gem_op = {};
186         int r;
187
188         /* Query metadata. */
189         metadata.handle = bo->handle;
190         metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA;
191
192         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA,
193                                 &metadata, sizeof(metadata));
194         if (r)
195                 return r;
196
197         if (metadata.data.data_size_bytes >
198             sizeof(info->metadata.umd_metadata))
199                 return -EINVAL;
200
201         /* Query buffer info. */
202         gem_op.handle = bo->handle;
203         gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO;
204         gem_op.value = (uintptr_t)&bo_info;
205
206         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP,
207                                 &gem_op, sizeof(gem_op));
208         if (r)
209                 return r;
210
211         memset(info, 0, sizeof(*info));
212         info->alloc_size = bo_info.bo_size;
213         info->phys_alignment = bo_info.alignment;
214         info->virtual_mc_base_address = bo->virtual_mc_base_address;
215         info->preferred_heap = bo_info.domains;
216         info->alloc_flags = bo_info.domain_flags;
217         info->metadata.flags = metadata.data.flags;
218         info->metadata.tiling_info = metadata.data.tiling_info;
219
220         info->metadata.size_metadata = metadata.data.data_size_bytes;
221         if (metadata.data.data_size_bytes > 0)
222                 memcpy(info->metadata.umd_metadata, metadata.data.data,
223                        metadata.data.data_size_bytes);
224
225         return 0;
226 }
227
228 static void amdgpu_add_handle_to_table(amdgpu_bo_handle bo)
229 {
230         pthread_mutex_lock(&bo->dev->bo_table_mutex);
231         util_hash_table_set(bo->dev->bo_handles,
232                             (void*)(uintptr_t)bo->handle, bo);
233         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
234 }
235
236 static int amdgpu_bo_export_flink(amdgpu_bo_handle bo)
237 {
238         struct drm_gem_flink flink;
239         int fd, dma_fd;
240         uint32_t handle;
241         int r;
242
243         fd = bo->dev->fd;
244         handle = bo->handle;
245         if (bo->flink_name)
246                 return 0;
247
248
249         if (bo->dev->flink_fd != bo->dev->fd) {
250                 r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
251                                        &dma_fd);
252                 if (!r) {
253                         r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle);
254                         close(dma_fd);
255                 }
256                 if (r)
257                         return r;
258                 fd = bo->dev->flink_fd;
259         }
260         memset(&flink, 0, sizeof(flink));
261         flink.handle = handle;
262
263         r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink);
264         if (r)
265                 return r;
266
267         bo->flink_name = flink.name;
268
269         if (bo->dev->flink_fd != bo->dev->fd) {
270                 struct drm_gem_close args = {};
271                 args.handle = handle;
272                 drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args);
273         }
274
275         pthread_mutex_lock(&bo->dev->bo_table_mutex);
276         util_hash_table_set(bo->dev->bo_flink_names,
277                             (void*)(uintptr_t)bo->flink_name,
278                             bo);
279         pthread_mutex_unlock(&bo->dev->bo_table_mutex);
280
281         return 0;
282 }
283
284 int amdgpu_bo_export(amdgpu_bo_handle bo,
285                      enum amdgpu_bo_handle_type type,
286                      uint32_t *shared_handle)
287 {
288         int r;
289
290         switch (type) {
291         case amdgpu_bo_handle_type_gem_flink_name:
292                 r = amdgpu_bo_export_flink(bo);
293                 if (r)
294                         return r;
295
296                 *shared_handle = bo->flink_name;
297                 return 0;
298
299         case amdgpu_bo_handle_type_kms:
300                 r = amdgpu_bo_export_flink(bo);
301                 if (r)
302                         return r;
303
304                 amdgpu_add_handle_to_table(bo);
305                 *shared_handle = bo->handle;
306                 return 0;
307
308         case amdgpu_bo_handle_type_dma_buf_fd:
309                 amdgpu_add_handle_to_table(bo);
310                 return drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC,
311                                        (int*)shared_handle);
312         }
313         return -EINVAL;
314 }
315
316 int amdgpu_bo_import(amdgpu_device_handle dev,
317                      enum amdgpu_bo_handle_type type,
318                      uint32_t shared_handle,
319                      struct amdgpu_bo_import_result *output)
320 {
321         struct drm_gem_open open_arg = {};
322         union drm_amdgpu_gem_va va;
323         struct amdgpu_bo *bo = NULL;
324         int r;
325         int dma_fd;
326         uint64_t dma_buf_size = 0;
327
328         /* Convert a DMA buf handle to a KMS handle now. */
329         if (type == amdgpu_bo_handle_type_dma_buf_fd) {
330                 uint32_t handle;
331                 off_t size;
332
333                 /* Get a KMS handle. */
334                 r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle);
335                 if (r) {
336                         return r;
337                 }
338
339                 /* Query the buffer size. */
340                 size = lseek(shared_handle, 0, SEEK_END);
341                 if (size == (off_t)-1) {
342                         amdgpu_close_kms_handle(dev, handle);
343                         return -errno;
344                 }
345                 lseek(shared_handle, 0, SEEK_SET);
346
347                 dma_buf_size = size;
348                 shared_handle = handle;
349         }
350
351         /* We must maintain a list of pairs <handle, bo>, so that we always
352          * return the same amdgpu_bo instance for the same handle. */
353         pthread_mutex_lock(&dev->bo_table_mutex);
354
355         /* If we have already created a buffer with this handle, find it. */
356         switch (type) {
357         case amdgpu_bo_handle_type_gem_flink_name:
358                 bo = util_hash_table_get(dev->bo_flink_names,
359                                          (void*)(uintptr_t)shared_handle);
360                 break;
361
362         case amdgpu_bo_handle_type_dma_buf_fd:
363                 bo = util_hash_table_get(dev->bo_handles,
364                                          (void*)(uintptr_t)shared_handle);
365                 break;
366
367         case amdgpu_bo_handle_type_kms:
368                 /* Importing a KMS handle in not allowed. */
369                 pthread_mutex_unlock(&dev->bo_table_mutex);
370                 return -EPERM;
371
372         default:
373                 pthread_mutex_unlock(&dev->bo_table_mutex);
374                 return -EINVAL;
375         }
376
377         if (bo) {
378                 pthread_mutex_unlock(&dev->bo_table_mutex);
379
380                 /* The buffer already exists, just bump the refcount. */
381                 atomic_inc(&bo->refcount);
382
383                 output->buf_handle = bo;
384                 output->alloc_size = bo->alloc_size;
385                 output->virtual_mc_base_address =
386                         bo->virtual_mc_base_address;
387                 return 0;
388         }
389
390         bo = calloc(1, sizeof(struct amdgpu_bo));
391         if (!bo) {
392                 pthread_mutex_unlock(&dev->bo_table_mutex);
393                 if (type == amdgpu_bo_handle_type_dma_buf_fd) {
394                         amdgpu_close_kms_handle(dev, shared_handle);
395                 }
396                 return -ENOMEM;
397         }
398
399         /* Open the handle. */
400         switch (type) {
401         case amdgpu_bo_handle_type_gem_flink_name:
402                 open_arg.name = shared_handle;
403                 r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg);
404                 if (r) {
405                         free(bo);
406                         pthread_mutex_unlock(&dev->bo_table_mutex);
407                         return r;
408                 }
409
410                 bo->handle = open_arg.handle;
411                 if (dev->flink_fd != dev->fd) {
412                         r = drmPrimeHandleToFD(dev->flink_fd, bo->handle, DRM_CLOEXEC, &dma_fd);
413                         if (r) {
414                                 free(bo);
415                                 pthread_mutex_unlock(&dev->bo_table_mutex);
416                                 return r;
417                         }
418                         r = drmPrimeFDToHandle(dev->fd, dma_fd, &bo->handle );
419
420                         close(dma_fd);
421
422                         if (r) {
423                                 free(bo);
424                                 pthread_mutex_unlock(&dev->bo_table_mutex);
425                                 return r;
426                         }
427                 }
428                 bo->flink_name = shared_handle;
429                 bo->alloc_size = open_arg.size;
430                 util_hash_table_set(dev->bo_flink_names,
431                                     (void*)(uintptr_t)bo->flink_name, bo);
432                 break;
433
434         case amdgpu_bo_handle_type_dma_buf_fd:
435                 bo->handle = shared_handle;
436                 bo->alloc_size = dma_buf_size;
437                 break;
438
439         case amdgpu_bo_handle_type_kms:
440                 assert(0); /* unreachable */
441         }
442
443         /* Initialize it. */
444         atomic_set(&bo->refcount, 1);
445         bo->dev = dev;
446         pthread_mutex_init(&bo->cpu_access_mutex, NULL);
447
448         bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, bo->alloc_size, 1 << 20);
449
450         if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) {
451                 pthread_mutex_unlock(&dev->bo_table_mutex);
452                 amdgpu_bo_reference(&bo, NULL);
453                 return -ENOSPC;
454         }
455
456         memset(&va, 0, sizeof(va));
457         va.in.handle = bo->handle;
458         va.in.operation = AMDGPU_VA_OP_MAP;
459         va.in.va_address = bo->virtual_mc_base_address;
460         va.in.flags =   AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
461                         AMDGPU_VM_PAGE_EXECUTABLE;
462
463         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
464         if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
465                 pthread_mutex_unlock(&dev->bo_table_mutex);
466                 amdgpu_vamgr_free_va(&dev->vamgr, bo->virtual_mc_base_address, bo->alloc_size);
467                 amdgpu_bo_reference(&bo, NULL);
468                 return r;
469         }
470
471         util_hash_table_set(dev->bo_vas,
472                             (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
473         util_hash_table_set(dev->bo_handles, (void*)(uintptr_t)bo->handle, bo);
474         pthread_mutex_unlock(&dev->bo_table_mutex);
475
476         output->buf_handle = bo;
477         output->alloc_size = bo->alloc_size;
478         output->virtual_mc_base_address = bo->virtual_mc_base_address;
479         return 0;
480 }
481
482 int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
483 {
484         /* Just drop the reference. */
485         amdgpu_bo_reference(&buf_handle, NULL);
486         return 0;
487 }
488
489 int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu)
490 {
491         union drm_amdgpu_gem_mmap args;
492         void *ptr;
493         int r;
494
495         pthread_mutex_lock(&bo->cpu_access_mutex);
496
497         if (bo->cpu_ptr) {
498                 /* already mapped */
499                 assert(bo->cpu_map_count > 0);
500                 bo->cpu_map_count++;
501                 *cpu = bo->cpu_ptr;
502                 pthread_mutex_unlock(&bo->cpu_access_mutex);
503                 return 0;
504         }
505
506         assert(bo->cpu_map_count == 0);
507
508         memset(&args, 0, sizeof(args));
509
510         /* Query the buffer address (args.addr_ptr).
511          * The kernel driver ignores the offset and size parameters. */
512         args.in.handle = bo->handle;
513
514         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args,
515                                 sizeof(args));
516         if (r) {
517                 pthread_mutex_unlock(&bo->cpu_access_mutex);
518                 return r;
519         }
520
521         /* Map the buffer. */
522         ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED,
523                        bo->dev->fd, args.out.addr_ptr);
524         if (ptr == MAP_FAILED) {
525                 pthread_mutex_unlock(&bo->cpu_access_mutex);
526                 return -errno;
527         }
528
529         bo->cpu_ptr = ptr;
530         bo->cpu_map_count = 1;
531         pthread_mutex_unlock(&bo->cpu_access_mutex);
532
533         *cpu = ptr;
534         return 0;
535 }
536
537 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo)
538 {
539         int r;
540
541         pthread_mutex_lock(&bo->cpu_access_mutex);
542         assert(bo->cpu_map_count >= 0);
543
544         if (bo->cpu_map_count == 0) {
545                 /* not mapped */
546                 pthread_mutex_unlock(&bo->cpu_access_mutex);
547                 return -EBADMSG;
548         }
549
550         bo->cpu_map_count--;
551         if (bo->cpu_map_count > 0) {
552                 /* mapped multiple times */
553                 pthread_mutex_unlock(&bo->cpu_access_mutex);
554                 return 0;
555         }
556
557         r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno;
558         bo->cpu_ptr = NULL;
559         pthread_mutex_unlock(&bo->cpu_access_mutex);
560         return r;
561 }
562
563 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev,
564                                 struct amdgpu_buffer_size_alignments *info)
565 {
566         info->size_local = dev->dev_info.pte_fragment_size;
567         info->size_remote = dev->dev_info.gart_page_size;
568         return 0;
569 }
570
571 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo,
572                             uint64_t timeout_ns,
573                             bool *busy)
574 {
575         union drm_amdgpu_gem_wait_idle args;
576         int r;
577
578         memset(&args, 0, sizeof(args));
579         args.in.handle = bo->handle;
580         args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
581
582         r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE,
583                                 &args, sizeof(args));
584
585         if (r == 0) {
586                 *busy = args.out.status;
587                 return 0;
588         } else {
589                 fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r);
590                 return r;
591         }
592 }
593
594 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev,
595                                     void *cpu,
596                                     uint64_t size,
597                                     struct amdgpu_bo_alloc_result *info)
598 {
599         int r;
600         struct amdgpu_bo *bo;
601         struct drm_amdgpu_gem_userptr args;
602         union drm_amdgpu_gem_va va;
603         uintptr_t cpu0;
604         uint32_t ps, off;
605
606         memset(&args, 0, sizeof(args));
607         ps = getpagesize();
608
609         cpu0 = ROUND_DOWN((uintptr_t)cpu, ps);
610         off = (uintptr_t)cpu - cpu0;
611         size = ROUND_UP(size + off, ps);
612
613         args.addr = cpu0;
614         args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER;
615         args.size = size;
616         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR,
617                                 &args, sizeof(args));
618         if (r)
619                 return r;
620
621         bo = calloc(1, sizeof(struct amdgpu_bo));
622         if (!bo)
623                 return -ENOMEM;
624
625         atomic_set(&bo->refcount, 1);
626         bo->dev = dev;
627         bo->alloc_size = size;
628         bo->handle = args.handle;
629         bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, size, 4 * 1024);
630
631         if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) {
632                 amdgpu_bo_free_internal(bo);
633                 return -ENOSPC;
634         }
635
636         memset(&va, 0, sizeof(va));
637         va.in.handle = bo->handle;
638         va.in.operation = AMDGPU_VA_OP_MAP;
639         va.in.flags =   AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
640                         AMDGPU_VM_PAGE_EXECUTABLE;
641         va.in.va_address = bo->virtual_mc_base_address;
642         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va));
643         if (r || va.out.result == AMDGPU_VA_RESULT_ERROR) {
644                 amdgpu_bo_free_internal(bo);
645                 return r;
646         }
647         pthread_mutex_lock(&dev->bo_table_mutex);
648         util_hash_table_set(dev->bo_vas,
649                             (void*)(uintptr_t)bo->virtual_mc_base_address, bo);
650         pthread_mutex_unlock(&dev->bo_table_mutex);
651         info->buf_handle = bo;
652         info->virtual_mc_base_address = bo->virtual_mc_base_address;
653         info->virtual_mc_base_address += off;
654
655         return r;
656 }
657
658 int amdgpu_bo_list_create(amdgpu_device_handle dev,
659                           uint32_t number_of_resources,
660                           amdgpu_bo_handle *resources,
661                           uint8_t *resource_prios,
662                           amdgpu_bo_list_handle *result)
663 {
664         struct drm_amdgpu_bo_list_entry *list;
665         union drm_amdgpu_bo_list args;
666         unsigned i;
667         int r;
668
669         list = alloca(sizeof(struct drm_amdgpu_bo_list_entry) * number_of_resources);
670
671         memset(&args, 0, sizeof(args));
672         args.in.operation = AMDGPU_BO_LIST_OP_CREATE;
673         args.in.bo_number = number_of_resources;
674         args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
675         args.in.bo_info_ptr = (uint64_t)(uintptr_t)list;
676
677         for (i = 0; i < number_of_resources; i++) {
678                 list[i].bo_handle = resources[i]->handle;
679                 if (resource_prios)
680                         list[i].bo_priority = resource_prios[i];
681                 else
682                         list[i].bo_priority = 0;
683         }
684
685         r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST,
686                                 &args, sizeof(args));
687         if (r)
688                 return r;
689
690         *result = calloc(1, sizeof(struct amdgpu_bo_list));
691         (*result)->dev = dev;
692         (*result)->handle = args.out.list_handle;
693         return 0;
694 }
695
696 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list)
697 {
698         union drm_amdgpu_bo_list args;
699         int r;
700
701         memset(&args, 0, sizeof(args));
702         args.in.operation = AMDGPU_BO_LIST_OP_DESTROY;
703         args.in.list_handle = list->handle;
704
705         r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST,
706                                 &args, sizeof(args));
707
708         if (!r)
709                 free(list);
710
711         return r;
712 }