anv: check the return value of anv_execbuf_add_bo_bitset()
[platform/upstream/mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include <xf86drm.h>
31
32 #include "anv_private.h"
33 #include "anv_measure.h"
34
35 #include "genxml/gen8_pack.h"
36 #include "genxml/genX_bits.h"
37 #include "perf/intel_perf.h"
38
39 #include "util/u_debug.h"
40 #include "util/perf/u_trace.h"
41
42 /** \file anv_batch_chain.c
43  *
44  * This file contains functions related to anv_cmd_buffer as a data
45  * structure.  This involves everything required to create and destroy
46  * the actual batch buffers as well as link them together.
47  *
48  * It specifically does *not* contain any handling of actual vkCmd calls
49  * beyond vkCmdExecuteCommands.
50  */
51
52 /*-----------------------------------------------------------------------*
53  * Functions related to anv_reloc_list
54  *-----------------------------------------------------------------------*/
55
56 VkResult
57 anv_reloc_list_init(struct anv_reloc_list *list,
58                     const VkAllocationCallbacks *alloc)
59 {
60    memset(list, 0, sizeof(*list));
61    return VK_SUCCESS;
62 }
63
64 static VkResult
65 anv_reloc_list_init_clone(struct anv_reloc_list *list,
66                           const VkAllocationCallbacks *alloc,
67                           const struct anv_reloc_list *other_list)
68 {
69    list->num_relocs = other_list->num_relocs;
70    list->array_length = other_list->array_length;
71
72    if (list->num_relocs > 0) {
73       list->reloc_bos =
74          vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
75                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
76       if (list->reloc_bos == NULL)
77          return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
78
79       memcpy(list->reloc_bos, other_list->reloc_bos,
80              list->array_length * sizeof(*list->reloc_bos));
81    } else {
82       list->reloc_bos = NULL;
83    }
84
85    list->dep_words = other_list->dep_words;
86
87    if (list->dep_words > 0) {
88       list->deps =
89          vk_alloc(alloc, list->dep_words * sizeof(BITSET_WORD), 8,
90                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
91       memcpy(list->deps, other_list->deps,
92              list->dep_words * sizeof(BITSET_WORD));
93    } else {
94       list->deps = NULL;
95    }
96
97    return VK_SUCCESS;
98 }
99
100 void
101 anv_reloc_list_finish(struct anv_reloc_list *list,
102                       const VkAllocationCallbacks *alloc)
103 {
104    vk_free(alloc, list->reloc_bos);
105    vk_free(alloc, list->deps);
106 }
107
108 static VkResult
109 anv_reloc_list_grow(struct anv_reloc_list *list,
110                     const VkAllocationCallbacks *alloc,
111                     size_t num_additional_relocs)
112 {
113    if (list->num_relocs + num_additional_relocs <= list->array_length)
114       return VK_SUCCESS;
115
116    size_t new_length = MAX2(16, list->array_length * 2);
117    while (new_length < list->num_relocs + num_additional_relocs)
118       new_length *= 2;
119
120    struct anv_bo **new_reloc_bos =
121       vk_realloc(alloc, list->reloc_bos,
122                  new_length * sizeof(*list->reloc_bos), 8,
123                  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
124    if (new_reloc_bos == NULL)
125       return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
126    list->reloc_bos = new_reloc_bos;
127
128    list->array_length = new_length;
129
130    return VK_SUCCESS;
131 }
132
133 static VkResult
134 anv_reloc_list_grow_deps(struct anv_reloc_list *list,
135                          const VkAllocationCallbacks *alloc,
136                          uint32_t min_num_words)
137 {
138    if (min_num_words <= list->dep_words)
139       return VK_SUCCESS;
140
141    uint32_t new_length = MAX2(32, list->dep_words * 2);
142    while (new_length < min_num_words)
143       new_length *= 2;
144
145    BITSET_WORD *new_deps =
146       vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
147                  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
148    if (new_deps == NULL)
149       return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
150    list->deps = new_deps;
151
152    /* Zero out the new data */
153    memset(list->deps + list->dep_words, 0,
154           (new_length - list->dep_words) * sizeof(BITSET_WORD));
155    list->dep_words = new_length;
156
157    return VK_SUCCESS;
158 }
159
160 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
161
162 VkResult
163 anv_reloc_list_add_bo(struct anv_reloc_list *list,
164                       const VkAllocationCallbacks *alloc,
165                       struct anv_bo *target_bo)
166 {
167    uint32_t idx = target_bo->gem_handle;
168    VkResult result = anv_reloc_list_grow_deps(list, alloc,
169                                               (idx / BITSET_WORDBITS) + 1);
170    if (unlikely(result != VK_SUCCESS))
171       return result;
172
173    BITSET_SET(list->deps, idx);
174
175    return VK_SUCCESS;
176 }
177
178 static void
179 anv_reloc_list_clear(struct anv_reloc_list *list)
180 {
181    list->num_relocs = 0;
182    if (list->dep_words > 0)
183       memset(list->deps, 0, list->dep_words * sizeof(BITSET_WORD));
184 }
185
186 static VkResult
187 anv_reloc_list_append(struct anv_reloc_list *list,
188                       const VkAllocationCallbacks *alloc,
189                       struct anv_reloc_list *other)
190 {
191    VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
192    if (result != VK_SUCCESS)
193       return result;
194
195    if (other->num_relocs > 0) {
196       memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
197              other->num_relocs * sizeof(other->reloc_bos[0]));
198
199       list->num_relocs += other->num_relocs;
200    }
201
202    anv_reloc_list_grow_deps(list, alloc, other->dep_words);
203    for (uint32_t w = 0; w < other->dep_words; w++)
204       list->deps[w] |= other->deps[w];
205
206    return VK_SUCCESS;
207 }
208
209 /*-----------------------------------------------------------------------*
210  * Functions related to anv_batch
211  *-----------------------------------------------------------------------*/
212
213 void *
214 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
215 {
216    if (batch->next + num_dwords * 4 > batch->end) {
217       VkResult result = batch->extend_cb(batch, batch->user_data);
218       if (result != VK_SUCCESS) {
219          anv_batch_set_error(batch, result);
220          return NULL;
221       }
222    }
223
224    void *p = batch->next;
225
226    batch->next += num_dwords * 4;
227    assert(batch->next <= batch->end);
228
229    return p;
230 }
231
232 struct anv_address
233 anv_batch_address(struct anv_batch *batch, void *batch_location)
234 {
235    assert(batch->start <= batch_location);
236
237    /* Allow a jump at the current location of the batch. */
238    assert(batch->next >= batch_location);
239
240    return anv_address_add(batch->start_addr, batch_location - batch->start);
241 }
242
243 void
244 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
245 {
246    uint32_t size = other->next - other->start;
247    assert(size % 4 == 0);
248
249    if (batch->next + size > batch->end) {
250       VkResult result = batch->extend_cb(batch, batch->user_data);
251       if (result != VK_SUCCESS) {
252          anv_batch_set_error(batch, result);
253          return;
254       }
255    }
256
257    assert(batch->next + size <= batch->end);
258
259    VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
260    memcpy(batch->next, other->start, size);
261
262    VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
263                                            other->relocs);
264    if (result != VK_SUCCESS) {
265       anv_batch_set_error(batch, result);
266       return;
267    }
268
269    batch->next += size;
270 }
271
272 /*-----------------------------------------------------------------------*
273  * Functions related to anv_batch_bo
274  *-----------------------------------------------------------------------*/
275
276 static VkResult
277 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
278                     uint32_t size,
279                     struct anv_batch_bo **bbo_out)
280 {
281    VkResult result;
282
283    struct anv_batch_bo *bbo = vk_zalloc(&cmd_buffer->vk.pool->alloc, sizeof(*bbo),
284                                         8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
285    if (bbo == NULL)
286       return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
287
288    result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
289                               size, &bbo->bo);
290    if (result != VK_SUCCESS)
291       goto fail_alloc;
292
293    result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->vk.pool->alloc);
294    if (result != VK_SUCCESS)
295       goto fail_bo_alloc;
296
297    *bbo_out = bbo;
298
299    return VK_SUCCESS;
300
301  fail_bo_alloc:
302    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
303  fail_alloc:
304    vk_free(&cmd_buffer->vk.pool->alloc, bbo);
305
306    return result;
307 }
308
309 static VkResult
310 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
311                    const struct anv_batch_bo *other_bbo,
312                    struct anv_batch_bo **bbo_out)
313 {
314    VkResult result;
315
316    struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->vk.pool->alloc, sizeof(*bbo),
317                                         8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
318    if (bbo == NULL)
319       return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
320
321    result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
322                               other_bbo->bo->size, &bbo->bo);
323    if (result != VK_SUCCESS)
324       goto fail_alloc;
325
326    result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->vk.pool->alloc,
327                                       &other_bbo->relocs);
328    if (result != VK_SUCCESS)
329       goto fail_bo_alloc;
330
331    bbo->length = other_bbo->length;
332    memcpy(bbo->bo->map, other_bbo->bo->map, other_bbo->length);
333    *bbo_out = bbo;
334
335    return VK_SUCCESS;
336
337  fail_bo_alloc:
338    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
339  fail_alloc:
340    vk_free(&cmd_buffer->vk.pool->alloc, bbo);
341
342    return result;
343 }
344
345 static void
346 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
347                    size_t batch_padding)
348 {
349    anv_batch_set_storage(batch, (struct anv_address) { .bo = bbo->bo, },
350                          bbo->bo->map, bbo->bo->size - batch_padding);
351    batch->relocs = &bbo->relocs;
352    anv_reloc_list_clear(&bbo->relocs);
353 }
354
355 static void
356 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
357                       size_t batch_padding)
358 {
359    batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
360    batch->start = bbo->bo->map;
361    batch->next = bbo->bo->map + bbo->length;
362    batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
363    batch->relocs = &bbo->relocs;
364 }
365
366 static void
367 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
368 {
369    assert(batch->start == bbo->bo->map);
370    bbo->length = batch->next - batch->start;
371    VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
372 }
373
374 static void
375 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
376                   struct anv_batch_bo *prev_bbo,
377                   struct anv_batch_bo *next_bbo,
378                   uint32_t next_bbo_offset)
379 {
380    const uint32_t bb_start_offset =
381       prev_bbo->length - GFX8_MI_BATCH_BUFFER_START_length * 4;
382    ASSERTED const uint32_t *bb_start = prev_bbo->bo->map + bb_start_offset;
383
384    /* Make sure we're looking at a MI_BATCH_BUFFER_START */
385    assert(((*bb_start >> 29) & 0x07) == 0);
386    assert(((*bb_start >> 23) & 0x3f) == 49);
387
388    uint64_t *map = prev_bbo->bo->map + bb_start_offset + 4;
389    *map = intel_canonical_address(next_bbo->bo->offset + next_bbo_offset);
390
391    if (cmd_buffer->device->physical->memory.need_clflush)
392       intel_flush_range(map, sizeof(uint64_t));
393 }
394
395 static void
396 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
397                      struct anv_cmd_buffer *cmd_buffer)
398 {
399    anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->vk.pool->alloc);
400    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
401    vk_free(&cmd_buffer->vk.pool->alloc, bbo);
402 }
403
404 static VkResult
405 anv_batch_bo_list_clone(const struct list_head *list,
406                         struct anv_cmd_buffer *cmd_buffer,
407                         struct list_head *new_list)
408 {
409    VkResult result = VK_SUCCESS;
410
411    list_inithead(new_list);
412
413    struct anv_batch_bo *prev_bbo = NULL;
414    list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
415       struct anv_batch_bo *new_bbo = NULL;
416       result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
417       if (result != VK_SUCCESS)
418          break;
419       list_addtail(&new_bbo->link, new_list);
420
421       if (prev_bbo)
422          anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
423
424       prev_bbo = new_bbo;
425    }
426
427    if (result != VK_SUCCESS) {
428       list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
429          list_del(&bbo->link);
430          anv_batch_bo_destroy(bbo, cmd_buffer);
431       }
432    }
433
434    return result;
435 }
436
437 /*-----------------------------------------------------------------------*
438  * Functions related to anv_batch_bo
439  *-----------------------------------------------------------------------*/
440
441 static struct anv_batch_bo *
442 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
443 {
444    return list_entry(cmd_buffer->batch_bos.prev, struct anv_batch_bo, link);
445 }
446
447 struct anv_address
448 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
449 {
450    struct anv_state_pool *pool = &cmd_buffer->device->binding_table_pool;
451    struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
452    return (struct anv_address) {
453       .bo = pool->block_pool.bo,
454       .offset = bt_block->offset - pool->start_offset,
455    };
456 }
457
458 static void
459 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
460                         struct anv_bo *bo, uint32_t offset)
461 {
462    /* In gfx8+ the address field grew to two dwords to accommodate 48 bit
463     * offsets. The high 16 bits are in the last dword, so we can use the gfx8
464     * version in either case, as long as we set the instruction length in the
465     * header accordingly.  This means that we always emit three dwords here
466     * and all the padding and adjustment we do in this file works for all
467     * gens.
468     */
469
470 #define GFX7_MI_BATCH_BUFFER_START_length      2
471 #define GFX7_MI_BATCH_BUFFER_START_length_bias      2
472
473    const uint32_t gfx7_length =
474       GFX7_MI_BATCH_BUFFER_START_length - GFX7_MI_BATCH_BUFFER_START_length_bias;
475    const uint32_t gfx8_length =
476       GFX8_MI_BATCH_BUFFER_START_length - GFX8_MI_BATCH_BUFFER_START_length_bias;
477
478    anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_START, bbs) {
479       bbs.DWordLength               = cmd_buffer->device->info->ver < 8 ?
480                                       gfx7_length : gfx8_length;
481       bbs.SecondLevelBatchBuffer    = Firstlevelbatch;
482       bbs.AddressSpaceIndicator     = ASI_PPGTT;
483       bbs.BatchBufferStartAddress   = (struct anv_address) { bo, offset };
484    }
485 }
486
487 static void
488 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
489                              struct anv_batch_bo *bbo)
490 {
491    struct anv_batch *batch = &cmd_buffer->batch;
492    struct anv_batch_bo *current_bbo =
493       anv_cmd_buffer_current_batch_bo(cmd_buffer);
494
495    /* We set the end of the batch a little short so we would be sure we
496     * have room for the chaining command.  Since we're about to emit the
497     * chaining command, let's set it back where it should go.
498     */
499    batch->end += GFX8_MI_BATCH_BUFFER_START_length * 4;
500    assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
501
502    emit_batch_buffer_start(cmd_buffer, bbo->bo, 0);
503
504    anv_batch_bo_finish(current_bbo, batch);
505 }
506
507 static void
508 anv_cmd_buffer_record_chain_submit(struct anv_cmd_buffer *cmd_buffer_from,
509                                    struct anv_cmd_buffer *cmd_buffer_to)
510 {
511    uint32_t *bb_start = cmd_buffer_from->batch_end;
512
513    struct anv_batch_bo *last_bbo =
514       list_last_entry(&cmd_buffer_from->batch_bos, struct anv_batch_bo, link);
515    struct anv_batch_bo *first_bbo =
516       list_first_entry(&cmd_buffer_to->batch_bos, struct anv_batch_bo, link);
517
518    struct GFX8_MI_BATCH_BUFFER_START gen_bb_start = {
519       __anv_cmd_header(GFX8_MI_BATCH_BUFFER_START),
520       .SecondLevelBatchBuffer    = Firstlevelbatch,
521       .AddressSpaceIndicator     = ASI_PPGTT,
522       .BatchBufferStartAddress   = (struct anv_address) { first_bbo->bo, 0 },
523    };
524    struct anv_batch local_batch = {
525       .start  = last_bbo->bo->map,
526       .end    = last_bbo->bo->map + last_bbo->bo->size,
527       .relocs = &last_bbo->relocs,
528       .alloc  = &cmd_buffer_from->vk.pool->alloc,
529    };
530
531    __anv_cmd_pack(GFX8_MI_BATCH_BUFFER_START)(&local_batch, bb_start, &gen_bb_start);
532
533    last_bbo->chained = true;
534 }
535
536 static void
537 anv_cmd_buffer_record_end_submit(struct anv_cmd_buffer *cmd_buffer)
538 {
539    struct anv_batch_bo *last_bbo =
540       list_last_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
541    last_bbo->chained = false;
542
543    uint32_t *batch = cmd_buffer->batch_end;
544    anv_pack_struct(batch, GFX8_MI_BATCH_BUFFER_END,
545                    __anv_cmd_header(GFX8_MI_BATCH_BUFFER_END));
546 }
547
548 static VkResult
549 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
550 {
551    struct anv_cmd_buffer *cmd_buffer = _data;
552    struct anv_batch_bo *new_bbo = NULL;
553    /* Cap reallocation to chunk. */
554    uint32_t alloc_size = MIN2(cmd_buffer->total_batch_size,
555                               ANV_MAX_CMD_BUFFER_BATCH_SIZE);
556
557    VkResult result = anv_batch_bo_create(cmd_buffer, alloc_size, &new_bbo);
558    if (result != VK_SUCCESS)
559       return result;
560
561    cmd_buffer->total_batch_size += alloc_size;
562
563    struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
564    if (seen_bbo == NULL) {
565       anv_batch_bo_destroy(new_bbo, cmd_buffer);
566       return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
567    }
568    *seen_bbo = new_bbo;
569
570    cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
571
572    list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
573
574    anv_batch_bo_start(new_bbo, batch, GFX8_MI_BATCH_BUFFER_START_length * 4);
575
576    return VK_SUCCESS;
577 }
578
579 /** Allocate a binding table
580  *
581  * This function allocates a binding table.  This is a bit more complicated
582  * than one would think due to a combination of Vulkan driver design and some
583  * unfortunate hardware restrictions.
584  *
585  * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
586  * the binding table pointer which means that all binding tables need to live
587  * in the bottom 64k of surface state base address.  The way the GL driver has
588  * classically dealt with this restriction is to emit all surface states
589  * on-the-fly into the batch and have a batch buffer smaller than 64k.  This
590  * isn't really an option in Vulkan for a couple of reasons:
591  *
592  *  1) In Vulkan, we have growing (or chaining) batches so surface states have
593  *     to live in their own buffer and we have to be able to re-emit
594  *     STATE_BASE_ADDRESS as needed which requires a full pipeline stall.  In
595  *     order to avoid emitting STATE_BASE_ADDRESS any more often than needed
596  *     (it's not that hard to hit 64k of just binding tables), we allocate
597  *     surface state objects up-front when VkImageView is created.  In order
598  *     for this to work, surface state objects need to be allocated from a
599  *     global buffer.
600  *
601  *  2) We tried to design the surface state system in such a way that it's
602  *     already ready for bindless texturing.  The way bindless texturing works
603  *     on our hardware is that you have a big pool of surface state objects
604  *     (with its own state base address) and the bindless handles are simply
605  *     offsets into that pool.  With the architecture we chose, we already
606  *     have that pool and it's exactly the same pool that we use for regular
607  *     surface states so we should already be ready for bindless.
608  *
609  *  3) For render targets, we need to be able to fill out the surface states
610  *     later in vkBeginRenderPass so that we can assign clear colors
611  *     correctly.  One way to do this would be to just create the surface
612  *     state data and then repeatedly copy it into the surface state BO every
613  *     time we have to re-emit STATE_BASE_ADDRESS.  While this works, it's
614  *     rather annoying and just being able to allocate them up-front and
615  *     re-use them for the entire render pass.
616  *
617  * While none of these are technically blockers for emitting state on the fly
618  * like we do in GL, the ability to have a single surface state pool is
619  * simplifies things greatly.  Unfortunately, it comes at a cost...
620  *
621  * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
622  * place the binding tables just anywhere in surface state base address.
623  * Because 64k isn't a whole lot of space, we can't simply restrict the
624  * surface state buffer to 64k, we have to be more clever.  The solution we've
625  * chosen is to have a block pool with a maximum size of 2G that starts at
626  * zero and grows in both directions.  All surface states are allocated from
627  * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
628  * binding tables from the bottom of the pool (negative offsets).  Every time
629  * we allocate a new binding table block, we set surface state base address to
630  * point to the bottom of the binding table block.  This way all of the
631  * binding tables in the block are in the bottom 64k of surface state base
632  * address.  When we fill out the binding table, we add the distance between
633  * the bottom of our binding table block and zero of the block pool to the
634  * surface state offsets so that they are correct relative to out new surface
635  * state base address at the bottom of the binding table block.
636  *
637  * \param[in]  entries        The number of surface state entries the binding
638  *                            table should be able to hold.
639  *
640  * \param[out] state_offset   The offset surface surface state base address
641  *                            where the surface states live.  This must be
642  *                            added to the surface state offset when it is
643  *                            written into the binding table entry.
644  *
645  * \return                    An anv_state representing the binding table
646  */
647 struct anv_state
648 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
649                                    uint32_t entries, uint32_t *state_offset)
650 {
651    struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
652
653    uint32_t bt_size = align_u32(entries * 4, 32);
654
655    struct anv_state state = cmd_buffer->bt_next;
656    if (bt_size > state.alloc_size)
657       return (struct anv_state) { 0 };
658
659    state.alloc_size = bt_size;
660    cmd_buffer->bt_next.offset += bt_size;
661    cmd_buffer->bt_next.map += bt_size;
662    cmd_buffer->bt_next.alloc_size -= bt_size;
663
664    if (cmd_buffer->device->info->verx10 >= 125) {
665       /* We're using 3DSTATE_BINDING_TABLE_POOL_ALLOC to change the binding
666        * table address independently from surface state base address.  We no
667        * longer need any sort of offsetting.
668        */
669       *state_offset = 0;
670    } else {
671       assert(bt_block->offset < 0);
672       *state_offset = -bt_block->offset;
673    }
674
675    return state;
676 }
677
678 struct anv_state
679 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
680 {
681    struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
682    return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
683                                  isl_dev->ss.size, isl_dev->ss.align);
684 }
685
686 struct anv_state
687 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
688                                    uint32_t size, uint32_t alignment)
689 {
690    return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
691                                  size, alignment);
692 }
693
694 /** Allocate space associated with a command buffer
695  *
696  * Some commands like vkCmdBuildAccelerationStructuresKHR() can end up needing
697  * large amount of temporary buffers. This function is here to deal with those
698  * potentially larger allocations, using a side BO if needed.
699  *
700  */
701 struct anv_cmd_alloc
702 anv_cmd_buffer_alloc_space(struct anv_cmd_buffer *cmd_buffer,
703                            size_t size, uint32_t alignment)
704 {
705    /* Below 16k, source memory from dynamic state, otherwise allocate a BO. */
706    if (size < 16 * 1024) {
707       struct anv_state state =
708          anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
709                                 size, alignment);
710
711       return (struct anv_cmd_alloc) {
712          .address = (struct anv_address) {
713             .bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
714             .offset = state.offset,
715          },
716          .map = state.map,
717          .size = size,
718       };
719    }
720
721    assert(alignment <= 4096);
722
723    struct anv_bo *bo = NULL;
724    VkResult result =
725       anv_device_alloc_bo(cmd_buffer->device,
726                           "cmd-buffer-space",
727                           align_u32(size, 4096),
728                           ANV_BO_ALLOC_MAPPED,
729                           0,
730                           &bo);
731    if (result != VK_SUCCESS) {
732       anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_DEVICE_MEMORY);
733       return ANV_EMPTY_ALLOC;
734    }
735
736    struct anv_bo **bo_entry =
737       u_vector_add(&cmd_buffer->dynamic_bos);
738    if (bo_entry == NULL) {
739       anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
740       return ANV_EMPTY_ALLOC;
741    }
742    *bo_entry = bo;
743
744    return (struct anv_cmd_alloc) {
745       .address = (struct anv_address) { .bo = bo },
746       .map = bo->map,
747       .size = size,
748    };
749 }
750
751 VkResult
752 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
753 {
754    struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
755    if (bt_block == NULL) {
756       anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
757       return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
758    }
759
760    *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
761
762    /* The bt_next state is a rolling state (we update it as we suballocate
763     * from it) which is relative to the start of the binding table block.
764     */
765    cmd_buffer->bt_next = *bt_block;
766    cmd_buffer->bt_next.offset = 0;
767
768    return VK_SUCCESS;
769 }
770
771 VkResult
772 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
773 {
774    struct anv_batch_bo *batch_bo = NULL;
775    VkResult result;
776
777    list_inithead(&cmd_buffer->batch_bos);
778
779    cmd_buffer->total_batch_size = ANV_MIN_CMD_BUFFER_BATCH_SIZE;
780
781    result = anv_batch_bo_create(cmd_buffer,
782                                 cmd_buffer->total_batch_size,
783                                 &batch_bo);
784    if (result != VK_SUCCESS)
785       return result;
786
787    list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
788
789    cmd_buffer->batch.alloc = &cmd_buffer->vk.pool->alloc;
790    cmd_buffer->batch.user_data = cmd_buffer;
791
792    cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
793
794    anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
795                       GFX8_MI_BATCH_BUFFER_START_length * 4);
796
797    int success = u_vector_init_pow2(&cmd_buffer->seen_bbos, 8,
798                                     sizeof(struct anv_bo *));
799    if (!success)
800       goto fail_batch_bo;
801
802    *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
803
804    success = u_vector_init(&cmd_buffer->bt_block_states, 8,
805                            sizeof(struct anv_state));
806    if (!success)
807       goto fail_seen_bbos;
808
809    result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
810                                 &cmd_buffer->vk.pool->alloc);
811    if (result != VK_SUCCESS)
812       goto fail_bt_blocks;
813
814    result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
815    if (result != VK_SUCCESS)
816       goto fail_bt_blocks;
817
818    return VK_SUCCESS;
819
820  fail_bt_blocks:
821    u_vector_finish(&cmd_buffer->bt_block_states);
822  fail_seen_bbos:
823    u_vector_finish(&cmd_buffer->seen_bbos);
824  fail_batch_bo:
825    anv_batch_bo_destroy(batch_bo, cmd_buffer);
826
827    return result;
828 }
829
830 void
831 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
832 {
833    struct anv_state *bt_block;
834    u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
835       anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
836    u_vector_finish(&cmd_buffer->bt_block_states);
837
838    anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->vk.pool->alloc);
839
840    u_vector_finish(&cmd_buffer->seen_bbos);
841
842    /* Destroy all of the batch buffers */
843    list_for_each_entry_safe(struct anv_batch_bo, bbo,
844                             &cmd_buffer->batch_bos, link) {
845       list_del(&bbo->link);
846       anv_batch_bo_destroy(bbo, cmd_buffer);
847    }
848 }
849
850 void
851 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
852 {
853    /* Delete all but the first batch bo */
854    assert(!list_is_empty(&cmd_buffer->batch_bos));
855    while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
856       struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
857       list_del(&bbo->link);
858       anv_batch_bo_destroy(bbo, cmd_buffer);
859    }
860    assert(!list_is_empty(&cmd_buffer->batch_bos));
861
862    anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
863                       &cmd_buffer->batch,
864                       GFX8_MI_BATCH_BUFFER_START_length * 4);
865
866    while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
867       struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
868       anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
869    }
870    assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
871    cmd_buffer->bt_next = *(struct anv_state *)u_vector_head(&cmd_buffer->bt_block_states);
872    cmd_buffer->bt_next.offset = 0;
873
874    anv_reloc_list_clear(&cmd_buffer->surface_relocs);
875
876    /* Reset the list of seen buffers */
877    cmd_buffer->seen_bbos.head = 0;
878    cmd_buffer->seen_bbos.tail = 0;
879
880    struct anv_batch_bo *first_bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
881
882    *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = first_bbo;
883
884
885    assert(first_bbo->bo->size == ANV_MIN_CMD_BUFFER_BATCH_SIZE);
886    cmd_buffer->total_batch_size = first_bbo->bo->size;
887 }
888
889 void
890 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
891 {
892    struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
893
894    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
895       /* When we start a batch buffer, we subtract a certain amount of
896        * padding from the end to ensure that we always have room to emit a
897        * BATCH_BUFFER_START to chain to the next BO.  We need to remove
898        * that padding before we end the batch; otherwise, we may end up
899        * with our BATCH_BUFFER_END in another BO.
900        */
901       cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
902       assert(cmd_buffer->batch.start == batch_bo->bo->map);
903       assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
904
905       /* Save end instruction location to override it later. */
906       cmd_buffer->batch_end = cmd_buffer->batch.next;
907
908       /* If we can chain this command buffer to another one, leave some place
909        * for the jump instruction.
910        */
911       batch_bo->chained = anv_cmd_buffer_is_chainable(cmd_buffer);
912       if (batch_bo->chained)
913          emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
914       else
915          anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_END, bbe);
916
917       /* Round batch up to an even number of dwords. */
918       if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
919          anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
920
921       cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
922    } else {
923       assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
924       /* If this is a secondary command buffer, we need to determine the
925        * mode in which it will be executed with vkExecuteCommands.  We
926        * determine this statically here so that this stays in sync with the
927        * actual ExecuteCommands implementation.
928        */
929       const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
930       if (cmd_buffer->device->physical->use_call_secondary) {
931          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN;
932          /* If the secondary command buffer begins & ends in the same BO and
933           * its length is less than the length of CS prefetch, add some NOOPs
934           * instructions so the last MI_BATCH_BUFFER_START is outside the CS
935           * prefetch.
936           */
937          if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
938             const struct intel_device_info *devinfo = cmd_buffer->device->info;
939             const enum intel_engine_class engine_class = cmd_buffer->queue_family->engine_class;
940             /* Careful to have everything in signed integer. */
941             int32_t prefetch_len = devinfo->engine_class_prefetch[engine_class];
942             int32_t batch_len = cmd_buffer->batch.next - cmd_buffer->batch.start;
943
944             for (int32_t i = 0; i < (prefetch_len - batch_len); i += 4)
945                anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
946          }
947
948          void *jump_addr =
949             anv_batch_emitn(&cmd_buffer->batch,
950                             GFX8_MI_BATCH_BUFFER_START_length,
951                             GFX8_MI_BATCH_BUFFER_START,
952                             .AddressSpaceIndicator = ASI_PPGTT,
953                             .SecondLevelBatchBuffer = Firstlevelbatch) +
954             (GFX8_MI_BATCH_BUFFER_START_BatchBufferStartAddress_start / 8);
955          cmd_buffer->return_addr = anv_batch_address(&cmd_buffer->batch, jump_addr);
956
957          /* The emit above may have caused us to chain batch buffers which
958           * would mean that batch_bo is no longer valid.
959           */
960          batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
961       } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
962                  (length < ANV_MIN_CMD_BUFFER_BATCH_SIZE / 2)) {
963          /* If the secondary has exactly one batch buffer in its list *and*
964           * that batch buffer is less than half of the maximum size, we're
965           * probably better of simply copying it into our batch.
966           */
967          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
968       } else if (!(cmd_buffer->usage_flags &
969                    VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
970          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
971
972          /* In order to chain, we need this command buffer to contain an
973           * MI_BATCH_BUFFER_START which will jump back to the calling batch.
974           * It doesn't matter where it points now so long as has a valid
975           * relocation.  We'll adjust it later as part of the chaining
976           * process.
977           *
978           * We set the end of the batch a little short so we would be sure we
979           * have room for the chaining command.  Since we're about to emit the
980           * chaining command, let's set it back where it should go.
981           */
982          cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
983          assert(cmd_buffer->batch.start == batch_bo->bo->map);
984          assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
985
986          emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
987          assert(cmd_buffer->batch.start == batch_bo->bo->map);
988       } else {
989          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
990       }
991    }
992
993    anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
994 }
995
996 static VkResult
997 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
998                              struct list_head *list)
999 {
1000    list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
1001       struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
1002       if (bbo_ptr == NULL)
1003          return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
1004
1005       *bbo_ptr = bbo;
1006    }
1007
1008    return VK_SUCCESS;
1009 }
1010
1011 void
1012 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1013                              struct anv_cmd_buffer *secondary)
1014 {
1015    anv_measure_add_secondary(primary, secondary);
1016    switch (secondary->exec_mode) {
1017    case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
1018       anv_batch_emit_batch(&primary->batch, &secondary->batch);
1019       break;
1020    case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
1021       struct anv_batch_bo *first_bbo =
1022          list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1023       struct anv_batch_bo *last_bbo =
1024          list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1025
1026       emit_batch_buffer_start(primary, first_bbo->bo, 0);
1027
1028       struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
1029       assert(primary->batch.start == this_bbo->bo->map);
1030       uint32_t offset = primary->batch.next - primary->batch.start;
1031
1032       /* Make the tail of the secondary point back to right after the
1033        * MI_BATCH_BUFFER_START in the primary batch.
1034        */
1035       anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
1036
1037       anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1038       break;
1039    }
1040    case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
1041       struct list_head copy_list;
1042       VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
1043                                                 secondary,
1044                                                 &copy_list);
1045       if (result != VK_SUCCESS)
1046          return; /* FIXME */
1047
1048       anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
1049
1050       struct anv_batch_bo *first_bbo =
1051          list_first_entry(&copy_list, struct anv_batch_bo, link);
1052       struct anv_batch_bo *last_bbo =
1053          list_last_entry(&copy_list, struct anv_batch_bo, link);
1054
1055       cmd_buffer_chain_to_batch_bo(primary, first_bbo);
1056
1057       list_splicetail(&copy_list, &primary->batch_bos);
1058
1059       anv_batch_bo_continue(last_bbo, &primary->batch,
1060                             GFX8_MI_BATCH_BUFFER_START_length * 4);
1061       break;
1062    }
1063    case ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN: {
1064       struct anv_batch_bo *first_bbo =
1065          list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1066
1067       uint64_t *write_return_addr =
1068          anv_batch_emitn(&primary->batch,
1069                          GFX8_MI_STORE_DATA_IMM_length + 1 /* QWord write */,
1070                          GFX8_MI_STORE_DATA_IMM,
1071                          .Address = secondary->return_addr)
1072          + (GFX8_MI_STORE_DATA_IMM_ImmediateData_start / 8);
1073
1074       emit_batch_buffer_start(primary, first_bbo->bo, 0);
1075
1076       *write_return_addr =
1077          anv_address_physical(anv_batch_address(&primary->batch,
1078                                                 primary->batch.next));
1079
1080       anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1081       break;
1082    }
1083    default:
1084       assert(!"Invalid execution mode");
1085    }
1086
1087    anv_reloc_list_append(&primary->surface_relocs, &primary->vk.pool->alloc,
1088                          &secondary->surface_relocs);
1089 }
1090
1091 struct anv_execbuf {
1092    struct drm_i915_gem_execbuffer2           execbuf;
1093
1094    struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
1095
1096    struct drm_i915_gem_exec_object2 *        objects;
1097    uint32_t                                  bo_count;
1098    struct anv_bo **                          bos;
1099
1100    /* Allocated length of the 'objects' and 'bos' arrays */
1101    uint32_t                                  array_length;
1102
1103    uint32_t                                  syncobj_count;
1104    uint32_t                                  syncobj_array_length;
1105    struct drm_i915_gem_exec_fence *          syncobjs;
1106    uint64_t *                                syncobj_values;
1107
1108    /* List of relocations for surface states, only used with platforms not
1109     * using softpin.
1110     */
1111    void *                                    surface_states_relocs;
1112
1113    uint32_t                                  cmd_buffer_count;
1114    struct anv_query_pool                     *perf_query_pool;
1115
1116    const VkAllocationCallbacks *             alloc;
1117    VkSystemAllocationScope                   alloc_scope;
1118
1119    int                                       perf_query_pass;
1120 };
1121
1122 static void
1123 anv_execbuf_finish(struct anv_execbuf *exec)
1124 {
1125    vk_free(exec->alloc, exec->syncobjs);
1126    vk_free(exec->alloc, exec->syncobj_values);
1127    vk_free(exec->alloc, exec->surface_states_relocs);
1128    vk_free(exec->alloc, exec->objects);
1129    vk_free(exec->alloc, exec->bos);
1130 }
1131
1132 static void
1133 anv_execbuf_add_ext(struct anv_execbuf *exec,
1134                     uint32_t ext_name,
1135                     struct i915_user_extension *ext)
1136 {
1137    __u64 *iter = &exec->execbuf.cliprects_ptr;
1138
1139    exec->execbuf.flags |= I915_EXEC_USE_EXTENSIONS;
1140
1141    while (*iter != 0) {
1142       iter = (__u64 *) &((struct i915_user_extension *)(uintptr_t)*iter)->next_extension;
1143    }
1144
1145    ext->name = ext_name;
1146
1147    *iter = (uintptr_t) ext;
1148 }
1149
1150 static VkResult
1151 anv_execbuf_add_bo_bitset(struct anv_device *device,
1152                           struct anv_execbuf *exec,
1153                           uint32_t dep_words,
1154                           BITSET_WORD *deps,
1155                           uint32_t extra_flags);
1156
1157 static VkResult
1158 anv_execbuf_add_bo(struct anv_device *device,
1159                    struct anv_execbuf *exec,
1160                    struct anv_bo *bo,
1161                    struct anv_reloc_list *relocs,
1162                    uint32_t extra_flags)
1163 {
1164    struct drm_i915_gem_exec_object2 *obj = NULL;
1165
1166    if (bo->exec_obj_index < exec->bo_count &&
1167        exec->bos[bo->exec_obj_index] == bo)
1168       obj = &exec->objects[bo->exec_obj_index];
1169
1170    if (obj == NULL) {
1171       /* We've never seen this one before.  Add it to the list and assign
1172        * an id that we can use later.
1173        */
1174       if (exec->bo_count >= exec->array_length) {
1175          uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1176
1177          struct drm_i915_gem_exec_object2 *new_objects =
1178             vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
1179          if (new_objects == NULL)
1180             return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1181
1182          struct anv_bo **new_bos =
1183             vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
1184          if (new_bos == NULL) {
1185             vk_free(exec->alloc, new_objects);
1186             return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1187          }
1188
1189          if (exec->objects) {
1190             memcpy(new_objects, exec->objects,
1191                    exec->bo_count * sizeof(*new_objects));
1192             memcpy(new_bos, exec->bos,
1193                    exec->bo_count * sizeof(*new_bos));
1194          }
1195
1196          vk_free(exec->alloc, exec->objects);
1197          vk_free(exec->alloc, exec->bos);
1198
1199          exec->objects = new_objects;
1200          exec->bos = new_bos;
1201          exec->array_length = new_len;
1202       }
1203
1204       assert(exec->bo_count < exec->array_length);
1205
1206       bo->exec_obj_index = exec->bo_count++;
1207       obj = &exec->objects[bo->exec_obj_index];
1208       exec->bos[bo->exec_obj_index] = bo;
1209
1210       obj->handle = bo->gem_handle;
1211       obj->relocation_count = 0;
1212       obj->relocs_ptr = 0;
1213       obj->alignment = 0;
1214       obj->offset = bo->offset;
1215       obj->flags = bo->flags | extra_flags;
1216       obj->rsvd1 = 0;
1217       obj->rsvd2 = 0;
1218    }
1219
1220    if (extra_flags & EXEC_OBJECT_WRITE) {
1221       obj->flags |= EXEC_OBJECT_WRITE;
1222       obj->flags &= ~EXEC_OBJECT_ASYNC;
1223    }
1224
1225    if (relocs != NULL) {
1226       for (size_t i = 0; i < relocs->num_relocs; i++) {
1227          VkResult result =
1228             anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
1229                                NULL, extra_flags);
1230          if (result != VK_SUCCESS)
1231             return result;
1232       }
1233
1234       return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
1235                                        relocs->deps, extra_flags);
1236    }
1237
1238    return VK_SUCCESS;
1239 }
1240
1241 /* Add BO dependencies to execbuf */
1242 static VkResult
1243 anv_execbuf_add_bo_bitset(struct anv_device *device,
1244                           struct anv_execbuf *exec,
1245                           uint32_t dep_words,
1246                           BITSET_WORD *deps,
1247                           uint32_t extra_flags)
1248 {
1249    for (uint32_t w = 0; w < dep_words; w++) {
1250       BITSET_WORD mask = deps[w];
1251       while (mask) {
1252          int i = u_bit_scan(&mask);
1253          uint32_t gem_handle = w * BITSET_WORDBITS + i;
1254          struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1255          assert(bo->refcount > 0);
1256          VkResult result =
1257             anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
1258          if (result != VK_SUCCESS)
1259             return result;
1260       }
1261    }
1262
1263    return VK_SUCCESS;
1264 }
1265
1266 static VkResult
1267 anv_execbuf_add_syncobj(struct anv_device *device,
1268                         struct anv_execbuf *exec,
1269                         uint32_t syncobj,
1270                         uint32_t flags,
1271                         uint64_t timeline_value)
1272 {
1273    if (exec->syncobj_count >= exec->syncobj_array_length) {
1274       uint32_t new_len = MAX2(exec->syncobj_array_length * 2, 16);
1275
1276       struct drm_i915_gem_exec_fence *new_syncobjs =
1277          vk_alloc(exec->alloc, new_len * sizeof(*new_syncobjs),
1278                   8, exec->alloc_scope);
1279       if (!new_syncobjs)
1280          return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1281
1282       if (exec->syncobjs)
1283          typed_memcpy(new_syncobjs, exec->syncobjs, exec->syncobj_count);
1284
1285       exec->syncobjs = new_syncobjs;
1286
1287       if (exec->syncobj_values) {
1288          uint64_t *new_syncobj_values =
1289             vk_alloc(exec->alloc, new_len * sizeof(*new_syncobj_values),
1290                      8, exec->alloc_scope);
1291          if (!new_syncobj_values)
1292             return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1293
1294          typed_memcpy(new_syncobj_values, exec->syncobj_values,
1295                       exec->syncobj_count);
1296
1297          exec->syncobj_values = new_syncobj_values;
1298       }
1299
1300       exec->syncobj_array_length = new_len;
1301    }
1302
1303    if (timeline_value && !exec->syncobj_values) {
1304       exec->syncobj_values =
1305          vk_zalloc(exec->alloc, exec->syncobj_array_length *
1306                                 sizeof(*exec->syncobj_values),
1307                    8, exec->alloc_scope);
1308       if (!exec->syncobj_values)
1309          return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1310    }
1311
1312    exec->syncobjs[exec->syncobj_count] = (struct drm_i915_gem_exec_fence) {
1313       .handle = syncobj,
1314       .flags = flags,
1315    };
1316    if (exec->syncobj_values)
1317       exec->syncobj_values[exec->syncobj_count] = timeline_value;
1318
1319    exec->syncobj_count++;
1320
1321    return VK_SUCCESS;
1322 }
1323
1324 static VkResult
1325 anv_execbuf_add_sync(struct anv_device *device,
1326                      struct anv_execbuf *execbuf,
1327                      struct vk_sync *sync,
1328                      bool is_signal,
1329                      uint64_t value)
1330 {
1331    /* It's illegal to signal a timeline with value 0 because that's never
1332     * higher than the current value.  A timeline wait on value 0 is always
1333     * trivial because 0 <= uint64_t always.
1334     */
1335    if ((sync->flags & VK_SYNC_IS_TIMELINE) && value == 0)
1336       return VK_SUCCESS;
1337
1338    if (vk_sync_is_anv_bo_sync(sync)) {
1339       struct anv_bo_sync *bo_sync =
1340          container_of(sync, struct anv_bo_sync, sync);
1341
1342       assert(is_signal == (bo_sync->state == ANV_BO_SYNC_STATE_RESET));
1343
1344       return anv_execbuf_add_bo(device, execbuf, bo_sync->bo, NULL,
1345                                 is_signal ? EXEC_OBJECT_WRITE : 0);
1346    } else if (vk_sync_type_is_drm_syncobj(sync->type)) {
1347       struct vk_drm_syncobj *syncobj = vk_sync_as_drm_syncobj(sync);
1348
1349       if (!(sync->flags & VK_SYNC_IS_TIMELINE))
1350          value = 0;
1351
1352       return anv_execbuf_add_syncobj(device, execbuf, syncobj->syncobj,
1353                                      is_signal ? I915_EXEC_FENCE_SIGNAL :
1354                                                  I915_EXEC_FENCE_WAIT,
1355                                      value);
1356    }
1357
1358    unreachable("Invalid sync type");
1359 }
1360
1361 static VkResult
1362 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1363                              struct anv_cmd_buffer *cmd_buffer)
1364 {
1365    VkResult result;
1366    /* Add surface dependencies (BOs) to the execbuf */
1367    result = anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
1368                                       cmd_buffer->surface_relocs.dep_words,
1369                                       cmd_buffer->surface_relocs.deps, 0);
1370    if (result != VK_SUCCESS)
1371       return result;
1372
1373    /* First, we walk over all of the bos we've seen and add them and their
1374     * relocations to the validate list.
1375     */
1376    struct anv_batch_bo **bbo;
1377    u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1378       result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1379                                   (*bbo)->bo, &(*bbo)->relocs, 0);
1380       if (result != VK_SUCCESS)
1381          return result;
1382    }
1383
1384    struct anv_bo **bo_entry;
1385    u_vector_foreach(bo_entry, &cmd_buffer->dynamic_bos) {
1386       result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1387                                   *bo_entry, NULL, 0);
1388       if (result != VK_SUCCESS)
1389          return result;
1390    }
1391
1392    return VK_SUCCESS;
1393 }
1394
1395 static void
1396 chain_command_buffers(struct anv_cmd_buffer **cmd_buffers,
1397                       uint32_t num_cmd_buffers)
1398 {
1399    if (!anv_cmd_buffer_is_chainable(cmd_buffers[0])) {
1400       assert(num_cmd_buffers == 1);
1401       return;
1402    }
1403
1404    /* Chain the N-1 first batch buffers */
1405    for (uint32_t i = 0; i < (num_cmd_buffers - 1); i++)
1406       anv_cmd_buffer_record_chain_submit(cmd_buffers[i], cmd_buffers[i + 1]);
1407
1408    /* Put an end to the last one */
1409    anv_cmd_buffer_record_end_submit(cmd_buffers[num_cmd_buffers - 1]);
1410 }
1411
1412 static VkResult
1413 pin_state_pool(struct anv_device *device,
1414                struct anv_execbuf *execbuf,
1415                struct anv_state_pool *pool)
1416 {
1417    anv_block_pool_foreach_bo(bo, &pool->block_pool) {
1418       VkResult result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1419       if (result != VK_SUCCESS)
1420          return result;
1421    }
1422
1423    return VK_SUCCESS;
1424 }
1425
1426 static VkResult
1427 setup_execbuf_for_cmd_buffers(struct anv_execbuf *execbuf,
1428                               struct anv_queue *queue,
1429                               struct anv_cmd_buffer **cmd_buffers,
1430                               uint32_t num_cmd_buffers)
1431 {
1432    struct anv_device *device = queue->device;
1433    VkResult result;
1434
1435    /* Edit the tail of the command buffers to chain them all together if they
1436     * can be.
1437     */
1438    chain_command_buffers(cmd_buffers, num_cmd_buffers);
1439
1440    for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1441       anv_measure_submit(cmd_buffers[i]);
1442       result = setup_execbuf_for_cmd_buffer(execbuf, cmd_buffers[i]);
1443       if (result != VK_SUCCESS)
1444          return result;
1445    }
1446
1447    /* Add all the global BOs to the object list for softpin case. */
1448    result = pin_state_pool(device, execbuf, &device->scratch_surface_state_pool);
1449    if (result != VK_SUCCESS)
1450       return result;
1451
1452    result = pin_state_pool(device, execbuf, &device->bindless_surface_state_pool);
1453    if (result != VK_SUCCESS)
1454       return result;
1455
1456    result = pin_state_pool(device, execbuf, &device->internal_surface_state_pool);
1457    if (result != VK_SUCCESS)
1458       return result;
1459
1460    result = pin_state_pool(device, execbuf, &device->dynamic_state_pool);
1461    if (result != VK_SUCCESS)
1462       return result;
1463
1464    result = pin_state_pool(device, execbuf, &device->general_state_pool);
1465    if (result != VK_SUCCESS)
1466       return result;
1467
1468    result = pin_state_pool(device, execbuf, &device->instruction_state_pool);
1469    if (result != VK_SUCCESS)
1470       return result;
1471
1472    result = pin_state_pool(device, execbuf, &device->binding_table_pool);
1473    if (result != VK_SUCCESS)
1474       return result;
1475
1476    /* Add the BOs for all user allocated memory objects because we can't
1477     * track after binding updates of VK_EXT_descriptor_indexing.
1478     */
1479    list_for_each_entry(struct anv_device_memory, mem,
1480                        &device->memory_objects, link) {
1481       result = anv_execbuf_add_bo(device, execbuf, mem->bo, NULL, 0);
1482       if (result != VK_SUCCESS)
1483          return result;
1484    }
1485
1486    for (uint32_t i = 0; i < execbuf->bo_count; i++)
1487       execbuf->objects[i].offset = execbuf->bos[i]->offset;
1488
1489    struct anv_batch_bo *first_batch_bo =
1490       list_first_entry(&cmd_buffers[0]->batch_bos, struct anv_batch_bo, link);
1491
1492    /* The kernel requires that the last entry in the validation list be the
1493     * batch buffer to execute.  We can simply swap the element
1494     * corresponding to the first batch_bo in the chain with the last
1495     * element in the list.
1496     */
1497    if (first_batch_bo->bo->exec_obj_index != execbuf->bo_count - 1) {
1498       uint32_t idx = first_batch_bo->bo->exec_obj_index;
1499       uint32_t last_idx = execbuf->bo_count - 1;
1500
1501       struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1502       assert(execbuf->bos[idx] == first_batch_bo->bo);
1503
1504       execbuf->objects[idx] = execbuf->objects[last_idx];
1505       execbuf->bos[idx] = execbuf->bos[last_idx];
1506       execbuf->bos[idx]->exec_obj_index = idx;
1507
1508       execbuf->objects[last_idx] = tmp_obj;
1509       execbuf->bos[last_idx] = first_batch_bo->bo;
1510       first_batch_bo->bo->exec_obj_index = last_idx;
1511    }
1512
1513    if (device->physical->memory.need_clflush) {
1514       __builtin_ia32_mfence();
1515       struct anv_batch_bo **bbo;
1516       for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1517          u_vector_foreach(bbo, &cmd_buffers[i]->seen_bbos) {
1518             for (uint32_t l = 0; l < (*bbo)->length; l += CACHELINE_SIZE)
1519                __builtin_ia32_clflush((*bbo)->bo->map + l);
1520          }
1521       }
1522    }
1523
1524    execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1525       .buffers_ptr = (uintptr_t) execbuf->objects,
1526       .buffer_count = execbuf->bo_count,
1527       .batch_start_offset = 0,
1528       /* We'll fill in batch length later when chaining batches. */
1529       .batch_len = 0,
1530       .cliprects_ptr = 0,
1531       .num_cliprects = 0,
1532       .DR1 = 0,
1533       .DR4 = 0,
1534       .flags = I915_EXEC_NO_RELOC |
1535                I915_EXEC_HANDLE_LUT |
1536                queue->exec_flags,
1537       .rsvd1 = device->context_id,
1538       .rsvd2 = 0,
1539    };
1540
1541    return VK_SUCCESS;
1542 }
1543
1544 static VkResult
1545 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue)
1546 {
1547    struct anv_device *device = queue->device;
1548    VkResult result = anv_execbuf_add_bo(device, execbuf,
1549                                         device->trivial_batch_bo,
1550                                         NULL, 0);
1551    if (result != VK_SUCCESS)
1552       return result;
1553
1554    execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1555       .buffers_ptr = (uintptr_t) execbuf->objects,
1556       .buffer_count = execbuf->bo_count,
1557       .batch_start_offset = 0,
1558       .batch_len = 8, /* GFX7_MI_BATCH_BUFFER_END and NOOP */
1559       .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | I915_EXEC_NO_RELOC,
1560       .rsvd1 = device->context_id,
1561       .rsvd2 = 0,
1562    };
1563
1564    return VK_SUCCESS;
1565 }
1566
1567 static VkResult
1568 setup_utrace_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue,
1569                      struct anv_utrace_flush_copy *flush)
1570 {
1571    struct anv_device *device = queue->device;
1572    VkResult result = anv_execbuf_add_bo(device, execbuf,
1573                                         flush->batch_bo,
1574                                         &flush->relocs, 0);
1575    if (result != VK_SUCCESS)
1576       return result;
1577
1578    result = anv_execbuf_add_sync(device, execbuf, flush->sync,
1579                                  true /* is_signal */, 0 /* value */);
1580    if (result != VK_SUCCESS)
1581       return result;
1582
1583    if (flush->batch_bo->exec_obj_index != execbuf->bo_count - 1) {
1584       uint32_t idx = flush->batch_bo->exec_obj_index;
1585       uint32_t last_idx = execbuf->bo_count - 1;
1586
1587       struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1588       assert(execbuf->bos[idx] == flush->batch_bo);
1589
1590       execbuf->objects[idx] = execbuf->objects[last_idx];
1591       execbuf->bos[idx] = execbuf->bos[last_idx];
1592       execbuf->bos[idx]->exec_obj_index = idx;
1593
1594       execbuf->objects[last_idx] = tmp_obj;
1595       execbuf->bos[last_idx] = flush->batch_bo;
1596       flush->batch_bo->exec_obj_index = last_idx;
1597    }
1598
1599    if (device->physical->memory.need_clflush)
1600       intel_flush_range(flush->batch_bo->map, flush->batch_bo->size);
1601
1602    execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1603       .buffers_ptr = (uintptr_t) execbuf->objects,
1604       .buffer_count = execbuf->bo_count,
1605       .batch_start_offset = 0,
1606       .batch_len = flush->batch.next - flush->batch.start,
1607       .flags = I915_EXEC_NO_RELOC |
1608                I915_EXEC_HANDLE_LUT |
1609                I915_EXEC_FENCE_ARRAY |
1610                queue->exec_flags,
1611       .rsvd1 = device->context_id,
1612       .rsvd2 = 0,
1613       .num_cliprects = execbuf->syncobj_count,
1614       .cliprects_ptr = (uintptr_t)execbuf->syncobjs,
1615    };
1616
1617    return VK_SUCCESS;
1618 }
1619
1620 static VkResult
1621 anv_queue_exec_utrace_locked(struct anv_queue *queue,
1622                              struct anv_utrace_flush_copy *flush)
1623 {
1624    assert(flush->batch_bo);
1625
1626    struct anv_device *device = queue->device;
1627    struct anv_execbuf execbuf = {
1628       .alloc = &device->vk.alloc,
1629       .alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
1630    };
1631
1632    VkResult result = setup_utrace_execbuf(&execbuf, queue, flush);
1633    if (result != VK_SUCCESS)
1634       goto error;
1635
1636    int ret = queue->device->info->no_hw ? 0 :
1637       anv_gem_execbuffer(queue->device, &execbuf.execbuf);
1638    if (ret)
1639       result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
1640
1641  error:
1642    anv_execbuf_finish(&execbuf);
1643
1644    return result;
1645 }
1646
1647 static void
1648 anv_exec_batch_debug(struct anv_queue *queue, uint32_t cmd_buffer_count,
1649                      struct anv_cmd_buffer **cmd_buffers,
1650                      struct anv_query_pool *perf_query_pool,
1651                      uint32_t perf_query_pass)
1652 {
1653    if (!INTEL_DEBUG(DEBUG_BATCH))
1654       return;
1655
1656    struct anv_device *device = queue->device;
1657    const bool has_perf_query = perf_query_pool && perf_query_pass >= 0 &&
1658                                cmd_buffer_count;
1659
1660    fprintf(stderr, "Batch on queue %d\n", (int)(queue - device->queues));
1661    if (cmd_buffer_count) {
1662       if (has_perf_query) {
1663          struct anv_bo *pass_batch_bo = perf_query_pool->bo;
1664          uint64_t pass_batch_offset =
1665             khr_perf_query_preamble_offset(perf_query_pool, perf_query_pass);
1666
1667          intel_print_batch(&device->decoder_ctx,
1668                            pass_batch_bo->map + pass_batch_offset, 64,
1669                            pass_batch_bo->offset + pass_batch_offset, false);
1670       }
1671
1672       for (uint32_t i = 0; i < cmd_buffer_count; i++) {
1673          struct anv_batch_bo **bo = u_vector_tail(&cmd_buffers[i]->seen_bbos);
1674          device->cmd_buffer_being_decoded = cmd_buffers[i];
1675          intel_print_batch(&device->decoder_ctx, (*bo)->bo->map,
1676                            (*bo)->bo->size, (*bo)->bo->offset, false);
1677          device->cmd_buffer_being_decoded = NULL;
1678       }
1679    } else {
1680       intel_print_batch(&device->decoder_ctx, device->trivial_batch_bo->map,
1681                         device->trivial_batch_bo->size,
1682                         device->trivial_batch_bo->offset, false);
1683    }
1684 }
1685
1686 /* We lock around execbuf for three main reasons:
1687  *
1688  *  1) When a block pool is resized, we create a new gem handle with a
1689  *     different size and, in the case of surface states, possibly a different
1690  *     center offset but we re-use the same anv_bo struct when we do so. If
1691  *     this happens in the middle of setting up an execbuf, we could end up
1692  *     with our list of BOs out of sync with our list of gem handles.
1693  *
1694  *  2) The algorithm we use for building the list of unique buffers isn't
1695  *     thread-safe. While the client is supposed to synchronize around
1696  *     QueueSubmit, this would be extremely difficult to debug if it ever came
1697  *     up in the wild due to a broken app. It's better to play it safe and
1698  *     just lock around QueueSubmit.
1699  *
1700  * Since the only other things that ever take the device lock such as block
1701  * pool resize only rarely happen, this will almost never be contended so
1702  * taking a lock isn't really an expensive operation in this case.
1703  */
1704 static VkResult
1705 anv_queue_exec_locked(struct anv_queue *queue,
1706                       uint32_t wait_count,
1707                       const struct vk_sync_wait *waits,
1708                       uint32_t cmd_buffer_count,
1709                       struct anv_cmd_buffer **cmd_buffers,
1710                       uint32_t signal_count,
1711                       const struct vk_sync_signal *signals,
1712                       struct anv_query_pool *perf_query_pool,
1713                       uint32_t perf_query_pass)
1714 {
1715    struct anv_device *device = queue->device;
1716    struct anv_utrace_flush_copy *utrace_flush_data = NULL;
1717    struct anv_execbuf execbuf = {
1718       .alloc = &queue->device->vk.alloc,
1719       .alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
1720       .perf_query_pass = perf_query_pass,
1721    };
1722
1723    /* Flush the trace points first, they need to be moved */
1724    VkResult result =
1725       anv_device_utrace_flush_cmd_buffers(queue,
1726                                           cmd_buffer_count,
1727                                           cmd_buffers,
1728                                           &utrace_flush_data);
1729    if (result != VK_SUCCESS)
1730       goto error;
1731
1732    if (utrace_flush_data && !utrace_flush_data->batch_bo) {
1733       result = anv_execbuf_add_sync(device, &execbuf,
1734                                     utrace_flush_data->sync,
1735                                     true /* is_signal */,
1736                                     0);
1737       if (result != VK_SUCCESS)
1738          goto error;
1739
1740       utrace_flush_data = NULL;
1741    }
1742
1743    /* Always add the workaround BO as it includes a driver identifier for the
1744     * error_state.
1745     */
1746    result =
1747       anv_execbuf_add_bo(device, &execbuf, device->workaround_bo, NULL, 0);
1748    if (result != VK_SUCCESS)
1749       goto error;
1750
1751    for (uint32_t i = 0; i < wait_count; i++) {
1752       result = anv_execbuf_add_sync(device, &execbuf,
1753                                     waits[i].sync,
1754                                     false /* is_signal */,
1755                                     waits[i].wait_value);
1756       if (result != VK_SUCCESS)
1757          goto error;
1758    }
1759
1760    for (uint32_t i = 0; i < signal_count; i++) {
1761       result = anv_execbuf_add_sync(device, &execbuf,
1762                                     signals[i].sync,
1763                                     true /* is_signal */,
1764                                     signals[i].signal_value);
1765       if (result != VK_SUCCESS)
1766          goto error;
1767    }
1768
1769    if (queue->sync) {
1770       result = anv_execbuf_add_sync(device, &execbuf,
1771                                     queue->sync,
1772                                     true /* is_signal */,
1773                                     0 /* signal_value */);
1774       if (result != VK_SUCCESS)
1775          goto error;
1776    }
1777
1778    if (cmd_buffer_count) {
1779       result = setup_execbuf_for_cmd_buffers(&execbuf, queue,
1780                                              cmd_buffers,
1781                                              cmd_buffer_count);
1782    } else {
1783       result = setup_empty_execbuf(&execbuf, queue);
1784    }
1785
1786    if (result != VK_SUCCESS)
1787       goto error;
1788
1789    const bool has_perf_query =
1790       perf_query_pool && perf_query_pass >= 0 && cmd_buffer_count;
1791
1792    if (INTEL_DEBUG(DEBUG_SUBMIT)) {
1793       fprintf(stderr, "Batch offset=0x%x len=0x%x on queue 0\n",
1794               execbuf.execbuf.batch_start_offset, execbuf.execbuf.batch_len);
1795       for (uint32_t i = 0; i < execbuf.bo_count; i++) {
1796          const struct anv_bo *bo = execbuf.bos[i];
1797
1798          fprintf(stderr, "   BO: addr=0x%016"PRIx64"-0x%016"PRIx64" size=0x%010"PRIx64
1799                  " handle=%05u capture=%u name=%s\n",
1800                  bo->offset, bo->offset + bo->size - 1, bo->size, bo->gem_handle,
1801                  (bo->flags & EXEC_OBJECT_CAPTURE) != 0, bo->name);
1802       }
1803    }
1804
1805    anv_exec_batch_debug(queue, cmd_buffer_count, cmd_buffers, perf_query_pool,
1806                         perf_query_pass);
1807
1808    if (execbuf.syncobj_values) {
1809       execbuf.timeline_fences.fence_count = execbuf.syncobj_count;
1810       execbuf.timeline_fences.handles_ptr = (uintptr_t)execbuf.syncobjs;
1811       execbuf.timeline_fences.values_ptr = (uintptr_t)execbuf.syncobj_values;
1812       anv_execbuf_add_ext(&execbuf,
1813                           DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES,
1814                           &execbuf.timeline_fences.base);
1815    } else if (execbuf.syncobjs) {
1816       execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1817       execbuf.execbuf.num_cliprects = execbuf.syncobj_count;
1818       execbuf.execbuf.cliprects_ptr = (uintptr_t)execbuf.syncobjs;
1819    }
1820
1821    if (has_perf_query) {
1822       assert(perf_query_pass < perf_query_pool->n_passes);
1823       struct intel_perf_query_info *query_info =
1824          perf_query_pool->pass_query[perf_query_pass];
1825
1826       /* Some performance queries just the pipeline statistic HW, no need for
1827        * OA in that case, so no need to reconfigure.
1828        */
1829       if (!INTEL_DEBUG(DEBUG_NO_OACONFIG) &&
1830           (query_info->kind == INTEL_PERF_QUERY_TYPE_OA ||
1831            query_info->kind == INTEL_PERF_QUERY_TYPE_RAW)) {
1832          int ret = intel_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
1833                                (void *)(uintptr_t) query_info->oa_metrics_set_id);
1834          if (ret < 0) {
1835             result = vk_device_set_lost(&device->vk,
1836                                         "i915-perf config failed: %s",
1837                                         strerror(errno));
1838          }
1839       }
1840
1841       struct anv_bo *pass_batch_bo = perf_query_pool->bo;
1842
1843       struct drm_i915_gem_exec_object2 query_pass_object = {
1844          .handle = pass_batch_bo->gem_handle,
1845          .offset = pass_batch_bo->offset,
1846          .flags  = pass_batch_bo->flags,
1847       };
1848       struct drm_i915_gem_execbuffer2 query_pass_execbuf = {
1849          .buffers_ptr = (uintptr_t) &query_pass_object,
1850          .buffer_count = 1,
1851          .batch_start_offset = khr_perf_query_preamble_offset(perf_query_pool,
1852                                                               perf_query_pass),
1853          .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags,
1854          .rsvd1 = device->context_id,
1855       };
1856
1857       int ret = queue->device->info->no_hw ? 0 :
1858          anv_gem_execbuffer(queue->device, &query_pass_execbuf);
1859       if (ret)
1860          result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
1861    }
1862
1863    int ret = queue->device->info->no_hw ? 0 :
1864       anv_gem_execbuffer(queue->device, &execbuf.execbuf);
1865    if (ret)
1866       result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
1867
1868    if (result == VK_SUCCESS && queue->sync) {
1869       result = vk_sync_wait(&device->vk, queue->sync, 0,
1870                             VK_SYNC_WAIT_COMPLETE, UINT64_MAX);
1871       if (result != VK_SUCCESS)
1872          result = vk_queue_set_lost(&queue->vk, "sync wait failed");
1873    }
1874
1875  error:
1876    anv_execbuf_finish(&execbuf);
1877
1878    if (result == VK_SUCCESS && utrace_flush_data)
1879       result = anv_queue_exec_utrace_locked(queue, utrace_flush_data);
1880
1881    return result;
1882 }
1883
1884 static inline bool
1885 can_chain_query_pools(struct anv_query_pool *p1, struct anv_query_pool *p2)
1886 {
1887    return (!p1 || !p2 || p1 == p2);
1888 }
1889
1890 static VkResult
1891 anv_queue_submit_locked(struct anv_queue *queue,
1892                         struct vk_queue_submit *submit)
1893 {
1894    VkResult result;
1895
1896    if (submit->command_buffer_count == 0) {
1897       result = anv_queue_exec_locked(queue, submit->wait_count, submit->waits,
1898                                      0 /* cmd_buffer_count */,
1899                                      NULL /* cmd_buffers */,
1900                                      submit->signal_count, submit->signals,
1901                                      NULL /* perf_query_pool */,
1902                                      0 /* perf_query_pass */);
1903       if (result != VK_SUCCESS)
1904          return result;
1905    } else {
1906       /* Everything's easier if we don't have to bother with container_of() */
1907       STATIC_ASSERT(offsetof(struct anv_cmd_buffer, vk) == 0);
1908       struct vk_command_buffer **vk_cmd_buffers = submit->command_buffers;
1909       struct anv_cmd_buffer **cmd_buffers = (void *)vk_cmd_buffers;
1910       uint32_t start = 0;
1911       uint32_t end = submit->command_buffer_count;
1912       struct anv_query_pool *perf_query_pool =
1913          cmd_buffers[start]->perf_query_pool;
1914       for (uint32_t n = 0; n < end; n++) {
1915          bool can_chain = false;
1916          uint32_t next = n + 1;
1917          /* Can we chain the last buffer into the next one? */
1918          if (next < end &&
1919              anv_cmd_buffer_is_chainable(cmd_buffers[next]) &&
1920              can_chain_query_pools
1921              (cmd_buffers[next]->perf_query_pool, perf_query_pool)) {
1922             can_chain = true;
1923             perf_query_pool =
1924                perf_query_pool ? perf_query_pool :
1925                cmd_buffers[next]->perf_query_pool;
1926          }
1927          if (!can_chain) {
1928             /* The next buffer cannot be chained, or we have reached the
1929              * last buffer, submit what have been chained so far.
1930              */
1931             VkResult result =
1932                anv_queue_exec_locked(queue,
1933                                      start == 0 ? submit->wait_count : 0,
1934                                      start == 0 ? submit->waits : NULL,
1935                                      next - start, &cmd_buffers[start],
1936                                      next == end ? submit->signal_count : 0,
1937                                      next == end ? submit->signals : NULL,
1938                                      perf_query_pool,
1939                                      submit->perf_pass_index);
1940             if (result != VK_SUCCESS)
1941                return result;
1942             if (next < end) {
1943                start = next;
1944                perf_query_pool = cmd_buffers[start]->perf_query_pool;
1945             }
1946          }
1947       }
1948    }
1949    for (uint32_t i = 0; i < submit->signal_count; i++) {
1950       if (!vk_sync_is_anv_bo_sync(submit->signals[i].sync))
1951          continue;
1952
1953       struct anv_bo_sync *bo_sync =
1954          container_of(submit->signals[i].sync, struct anv_bo_sync, sync);
1955
1956       /* Once the execbuf has returned, we need to set the fence state to
1957        * SUBMITTED.  We can't do this before calling execbuf because
1958        * anv_GetFenceStatus does take the global device lock before checking
1959        * fence->state.
1960        *
1961        * We set the fence state to SUBMITTED regardless of whether or not the
1962        * execbuf succeeds because we need to ensure that vkWaitForFences() and
1963        * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
1964        * VK_SUCCESS) in a finite amount of time even if execbuf fails.
1965        */
1966       assert(bo_sync->state == ANV_BO_SYNC_STATE_RESET);
1967       bo_sync->state = ANV_BO_SYNC_STATE_SUBMITTED;
1968    }
1969
1970    pthread_cond_broadcast(&queue->device->queue_submit);
1971
1972    return VK_SUCCESS;
1973 }
1974
1975 VkResult
1976 anv_queue_submit(struct vk_queue *vk_queue,
1977                  struct vk_queue_submit *submit)
1978 {
1979    struct anv_queue *queue = container_of(vk_queue, struct anv_queue, vk);
1980    struct anv_device *device = queue->device;
1981    VkResult result;
1982
1983    if (queue->device->info->no_hw) {
1984       for (uint32_t i = 0; i < submit->signal_count; i++) {
1985          result = vk_sync_signal(&device->vk,
1986                                  submit->signals[i].sync,
1987                                  submit->signals[i].signal_value);
1988          if (result != VK_SUCCESS)
1989             return vk_queue_set_lost(&queue->vk, "vk_sync_signal failed");
1990       }
1991       return VK_SUCCESS;
1992    }
1993
1994    uint64_t start_ts = intel_ds_begin_submit(queue->ds);
1995
1996    pthread_mutex_lock(&device->mutex);
1997    result = anv_queue_submit_locked(queue, submit);
1998    /* Take submission ID under lock */
1999    pthread_mutex_unlock(&device->mutex);
2000
2001    intel_ds_end_submit(queue->ds, start_ts);
2002
2003    return result;
2004 }
2005
2006 static VkResult
2007 anv_i915_execute_simple_batch(struct anv_queue *queue,
2008                               struct anv_bo *batch_bo,
2009                               uint32_t batch_bo_size)
2010 {
2011    struct anv_device *device = queue->device;
2012    struct anv_execbuf execbuf = {
2013       .alloc = &queue->device->vk.alloc,
2014       .alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
2015    };
2016
2017    VkResult result = anv_execbuf_add_bo(device, &execbuf, batch_bo, NULL, 0);
2018    if (result != VK_SUCCESS)
2019       return result;
2020
2021    execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
2022       .buffers_ptr = (uintptr_t) execbuf.objects,
2023       .buffer_count = execbuf.bo_count,
2024       .batch_start_offset = 0,
2025       .batch_len = batch_bo_size,
2026       .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | I915_EXEC_NO_RELOC,
2027       .rsvd1 = device->context_id,
2028       .rsvd2 = 0,
2029    };
2030
2031    if (anv_gem_execbuffer(device, &execbuf.execbuf)) {
2032       result = vk_device_set_lost(&device->vk, "anv_gem_execbuffer failed: %m");
2033       goto fail;
2034    }
2035
2036    result = anv_device_wait(device, batch_bo, INT64_MAX);
2037    if (result != VK_SUCCESS)
2038       result = vk_device_set_lost(&device->vk,
2039                                   "anv_device_wait failed: %m");
2040
2041 fail:
2042    anv_execbuf_finish(&execbuf);
2043    return result;
2044 }
2045
2046 VkResult
2047 anv_queue_submit_simple_batch(struct anv_queue *queue,
2048                               struct anv_batch *batch)
2049 {
2050    struct anv_device *device = queue->device;
2051    VkResult result = VK_SUCCESS;
2052
2053    if (queue->device->info->no_hw)
2054       return VK_SUCCESS;
2055
2056    /* This is only used by device init so we can assume the queue is empty and
2057     * we aren't fighting with a submit thread.
2058     */
2059    assert(vk_queue_is_empty(&queue->vk));
2060
2061    uint32_t batch_size = align_u32(batch->next - batch->start, 8);
2062
2063    struct anv_bo *batch_bo = NULL;
2064    result = anv_bo_pool_alloc(&device->batch_bo_pool, batch_size, &batch_bo);
2065    if (result != VK_SUCCESS)
2066       return result;
2067
2068    memcpy(batch_bo->map, batch->start, batch_size);
2069    if (device->physical->memory.need_clflush)
2070       intel_flush_range(batch_bo->map, batch_size);
2071
2072    if (INTEL_DEBUG(DEBUG_BATCH)) {
2073       intel_print_batch(&device->decoder_ctx,
2074                         batch_bo->map,
2075                         batch_bo->size,
2076                         batch_bo->offset, false);
2077    }
2078
2079    result = anv_i915_execute_simple_batch(queue, batch_bo, batch_size);
2080
2081    anv_bo_pool_free(&device->batch_bo_pool, batch_bo);
2082
2083    return result;
2084 }