2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include "anv_private.h"
33 #include "anv_measure.h"
35 #include "genxml/gen8_pack.h"
36 #include "genxml/genX_bits.h"
37 #include "perf/intel_perf.h"
39 #include "util/u_debug.h"
40 #include "util/perf/u_trace.h"
42 /** \file anv_batch_chain.c
44 * This file contains functions related to anv_cmd_buffer as a data
45 * structure. This involves everything required to create and destroy
46 * the actual batch buffers as well as link them together.
48 * It specifically does *not* contain any handling of actual vkCmd calls
49 * beyond vkCmdExecuteCommands.
52 /*-----------------------------------------------------------------------*
53 * Functions related to anv_reloc_list
54 *-----------------------------------------------------------------------*/
57 anv_reloc_list_init(struct anv_reloc_list *list,
58 const VkAllocationCallbacks *alloc)
60 memset(list, 0, sizeof(*list));
65 anv_reloc_list_init_clone(struct anv_reloc_list *list,
66 const VkAllocationCallbacks *alloc,
67 const struct anv_reloc_list *other_list)
69 list->num_relocs = other_list->num_relocs;
70 list->array_length = other_list->array_length;
72 if (list->num_relocs > 0) {
74 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
75 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
76 if (list->reloc_bos == NULL)
77 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
79 memcpy(list->reloc_bos, other_list->reloc_bos,
80 list->array_length * sizeof(*list->reloc_bos));
82 list->reloc_bos = NULL;
85 list->dep_words = other_list->dep_words;
87 if (list->dep_words > 0) {
89 vk_alloc(alloc, list->dep_words * sizeof(BITSET_WORD), 8,
90 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
91 memcpy(list->deps, other_list->deps,
92 list->dep_words * sizeof(BITSET_WORD));
101 anv_reloc_list_finish(struct anv_reloc_list *list,
102 const VkAllocationCallbacks *alloc)
104 vk_free(alloc, list->reloc_bos);
105 vk_free(alloc, list->deps);
109 anv_reloc_list_grow(struct anv_reloc_list *list,
110 const VkAllocationCallbacks *alloc,
111 size_t num_additional_relocs)
113 if (list->num_relocs + num_additional_relocs <= list->array_length)
116 size_t new_length = MAX2(16, list->array_length * 2);
117 while (new_length < list->num_relocs + num_additional_relocs)
120 struct anv_bo **new_reloc_bos =
121 vk_realloc(alloc, list->reloc_bos,
122 new_length * sizeof(*list->reloc_bos), 8,
123 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
124 if (new_reloc_bos == NULL)
125 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
126 list->reloc_bos = new_reloc_bos;
128 list->array_length = new_length;
134 anv_reloc_list_grow_deps(struct anv_reloc_list *list,
135 const VkAllocationCallbacks *alloc,
136 uint32_t min_num_words)
138 if (min_num_words <= list->dep_words)
141 uint32_t new_length = MAX2(32, list->dep_words * 2);
142 while (new_length < min_num_words)
145 BITSET_WORD *new_deps =
146 vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
147 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
148 if (new_deps == NULL)
149 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
150 list->deps = new_deps;
152 /* Zero out the new data */
153 memset(list->deps + list->dep_words, 0,
154 (new_length - list->dep_words) * sizeof(BITSET_WORD));
155 list->dep_words = new_length;
160 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
163 anv_reloc_list_add_bo(struct anv_reloc_list *list,
164 const VkAllocationCallbacks *alloc,
165 struct anv_bo *target_bo)
167 uint32_t idx = target_bo->gem_handle;
168 VkResult result = anv_reloc_list_grow_deps(list, alloc,
169 (idx / BITSET_WORDBITS) + 1);
170 if (unlikely(result != VK_SUCCESS))
173 BITSET_SET(list->deps, idx);
179 anv_reloc_list_clear(struct anv_reloc_list *list)
181 list->num_relocs = 0;
182 if (list->dep_words > 0)
183 memset(list->deps, 0, list->dep_words * sizeof(BITSET_WORD));
187 anv_reloc_list_append(struct anv_reloc_list *list,
188 const VkAllocationCallbacks *alloc,
189 struct anv_reloc_list *other)
191 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
192 if (result != VK_SUCCESS)
195 if (other->num_relocs > 0) {
196 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
197 other->num_relocs * sizeof(other->reloc_bos[0]));
199 list->num_relocs += other->num_relocs;
202 anv_reloc_list_grow_deps(list, alloc, other->dep_words);
203 for (uint32_t w = 0; w < other->dep_words; w++)
204 list->deps[w] |= other->deps[w];
209 /*-----------------------------------------------------------------------*
210 * Functions related to anv_batch
211 *-----------------------------------------------------------------------*/
214 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
216 if (batch->next + num_dwords * 4 > batch->end) {
217 VkResult result = batch->extend_cb(batch, batch->user_data);
218 if (result != VK_SUCCESS) {
219 anv_batch_set_error(batch, result);
224 void *p = batch->next;
226 batch->next += num_dwords * 4;
227 assert(batch->next <= batch->end);
233 anv_batch_address(struct anv_batch *batch, void *batch_location)
235 assert(batch->start <= batch_location);
237 /* Allow a jump at the current location of the batch. */
238 assert(batch->next >= batch_location);
240 return anv_address_add(batch->start_addr, batch_location - batch->start);
244 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
246 uint32_t size = other->next - other->start;
247 assert(size % 4 == 0);
249 if (batch->next + size > batch->end) {
250 VkResult result = batch->extend_cb(batch, batch->user_data);
251 if (result != VK_SUCCESS) {
252 anv_batch_set_error(batch, result);
257 assert(batch->next + size <= batch->end);
259 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
260 memcpy(batch->next, other->start, size);
262 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
264 if (result != VK_SUCCESS) {
265 anv_batch_set_error(batch, result);
272 /*-----------------------------------------------------------------------*
273 * Functions related to anv_batch_bo
274 *-----------------------------------------------------------------------*/
277 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
279 struct anv_batch_bo **bbo_out)
283 struct anv_batch_bo *bbo = vk_zalloc(&cmd_buffer->vk.pool->alloc, sizeof(*bbo),
284 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
286 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
288 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
290 if (result != VK_SUCCESS)
293 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->vk.pool->alloc);
294 if (result != VK_SUCCESS)
302 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
304 vk_free(&cmd_buffer->vk.pool->alloc, bbo);
310 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
311 const struct anv_batch_bo *other_bbo,
312 struct anv_batch_bo **bbo_out)
316 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->vk.pool->alloc, sizeof(*bbo),
317 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
319 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
321 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
322 other_bbo->bo->size, &bbo->bo);
323 if (result != VK_SUCCESS)
326 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->vk.pool->alloc,
328 if (result != VK_SUCCESS)
331 bbo->length = other_bbo->length;
332 memcpy(bbo->bo->map, other_bbo->bo->map, other_bbo->length);
338 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
340 vk_free(&cmd_buffer->vk.pool->alloc, bbo);
346 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
347 size_t batch_padding)
349 anv_batch_set_storage(batch, (struct anv_address) { .bo = bbo->bo, },
350 bbo->bo->map, bbo->bo->size - batch_padding);
351 batch->relocs = &bbo->relocs;
352 anv_reloc_list_clear(&bbo->relocs);
356 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
357 size_t batch_padding)
359 batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
360 batch->start = bbo->bo->map;
361 batch->next = bbo->bo->map + bbo->length;
362 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
363 batch->relocs = &bbo->relocs;
367 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
369 assert(batch->start == bbo->bo->map);
370 bbo->length = batch->next - batch->start;
371 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
375 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
376 struct anv_batch_bo *prev_bbo,
377 struct anv_batch_bo *next_bbo,
378 uint32_t next_bbo_offset)
380 const uint32_t bb_start_offset =
381 prev_bbo->length - GFX8_MI_BATCH_BUFFER_START_length * 4;
382 ASSERTED const uint32_t *bb_start = prev_bbo->bo->map + bb_start_offset;
384 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
385 assert(((*bb_start >> 29) & 0x07) == 0);
386 assert(((*bb_start >> 23) & 0x3f) == 49);
388 uint64_t *map = prev_bbo->bo->map + bb_start_offset + 4;
389 *map = intel_canonical_address(next_bbo->bo->offset + next_bbo_offset);
391 if (cmd_buffer->device->physical->memory.need_clflush)
392 intel_flush_range(map, sizeof(uint64_t));
396 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
397 struct anv_cmd_buffer *cmd_buffer)
399 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->vk.pool->alloc);
400 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
401 vk_free(&cmd_buffer->vk.pool->alloc, bbo);
405 anv_batch_bo_list_clone(const struct list_head *list,
406 struct anv_cmd_buffer *cmd_buffer,
407 struct list_head *new_list)
409 VkResult result = VK_SUCCESS;
411 list_inithead(new_list);
413 struct anv_batch_bo *prev_bbo = NULL;
414 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
415 struct anv_batch_bo *new_bbo = NULL;
416 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
417 if (result != VK_SUCCESS)
419 list_addtail(&new_bbo->link, new_list);
422 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
427 if (result != VK_SUCCESS) {
428 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
429 list_del(&bbo->link);
430 anv_batch_bo_destroy(bbo, cmd_buffer);
437 /*-----------------------------------------------------------------------*
438 * Functions related to anv_batch_bo
439 *-----------------------------------------------------------------------*/
441 static struct anv_batch_bo *
442 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
444 return list_entry(cmd_buffer->batch_bos.prev, struct anv_batch_bo, link);
448 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
450 struct anv_state_pool *pool = &cmd_buffer->device->binding_table_pool;
451 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
452 return (struct anv_address) {
453 .bo = pool->block_pool.bo,
454 .offset = bt_block->offset - pool->start_offset,
459 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
460 struct anv_bo *bo, uint32_t offset)
462 /* In gfx8+ the address field grew to two dwords to accommodate 48 bit
463 * offsets. The high 16 bits are in the last dword, so we can use the gfx8
464 * version in either case, as long as we set the instruction length in the
465 * header accordingly. This means that we always emit three dwords here
466 * and all the padding and adjustment we do in this file works for all
470 #define GFX7_MI_BATCH_BUFFER_START_length 2
471 #define GFX7_MI_BATCH_BUFFER_START_length_bias 2
473 const uint32_t gfx7_length =
474 GFX7_MI_BATCH_BUFFER_START_length - GFX7_MI_BATCH_BUFFER_START_length_bias;
475 const uint32_t gfx8_length =
476 GFX8_MI_BATCH_BUFFER_START_length - GFX8_MI_BATCH_BUFFER_START_length_bias;
478 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_START, bbs) {
479 bbs.DWordLength = cmd_buffer->device->info->ver < 8 ?
480 gfx7_length : gfx8_length;
481 bbs.SecondLevelBatchBuffer = Firstlevelbatch;
482 bbs.AddressSpaceIndicator = ASI_PPGTT;
483 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
488 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
489 struct anv_batch_bo *bbo)
491 struct anv_batch *batch = &cmd_buffer->batch;
492 struct anv_batch_bo *current_bbo =
493 anv_cmd_buffer_current_batch_bo(cmd_buffer);
495 /* We set the end of the batch a little short so we would be sure we
496 * have room for the chaining command. Since we're about to emit the
497 * chaining command, let's set it back where it should go.
499 batch->end += GFX8_MI_BATCH_BUFFER_START_length * 4;
500 assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
502 emit_batch_buffer_start(cmd_buffer, bbo->bo, 0);
504 anv_batch_bo_finish(current_bbo, batch);
508 anv_cmd_buffer_record_chain_submit(struct anv_cmd_buffer *cmd_buffer_from,
509 struct anv_cmd_buffer *cmd_buffer_to)
511 uint32_t *bb_start = cmd_buffer_from->batch_end;
513 struct anv_batch_bo *last_bbo =
514 list_last_entry(&cmd_buffer_from->batch_bos, struct anv_batch_bo, link);
515 struct anv_batch_bo *first_bbo =
516 list_first_entry(&cmd_buffer_to->batch_bos, struct anv_batch_bo, link);
518 struct GFX8_MI_BATCH_BUFFER_START gen_bb_start = {
519 __anv_cmd_header(GFX8_MI_BATCH_BUFFER_START),
520 .SecondLevelBatchBuffer = Firstlevelbatch,
521 .AddressSpaceIndicator = ASI_PPGTT,
522 .BatchBufferStartAddress = (struct anv_address) { first_bbo->bo, 0 },
524 struct anv_batch local_batch = {
525 .start = last_bbo->bo->map,
526 .end = last_bbo->bo->map + last_bbo->bo->size,
527 .relocs = &last_bbo->relocs,
528 .alloc = &cmd_buffer_from->vk.pool->alloc,
531 __anv_cmd_pack(GFX8_MI_BATCH_BUFFER_START)(&local_batch, bb_start, &gen_bb_start);
533 last_bbo->chained = true;
537 anv_cmd_buffer_record_end_submit(struct anv_cmd_buffer *cmd_buffer)
539 struct anv_batch_bo *last_bbo =
540 list_last_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
541 last_bbo->chained = false;
543 uint32_t *batch = cmd_buffer->batch_end;
544 anv_pack_struct(batch, GFX8_MI_BATCH_BUFFER_END,
545 __anv_cmd_header(GFX8_MI_BATCH_BUFFER_END));
549 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
551 struct anv_cmd_buffer *cmd_buffer = _data;
552 struct anv_batch_bo *new_bbo = NULL;
553 /* Cap reallocation to chunk. */
554 uint32_t alloc_size = MIN2(cmd_buffer->total_batch_size,
555 ANV_MAX_CMD_BUFFER_BATCH_SIZE);
557 VkResult result = anv_batch_bo_create(cmd_buffer, alloc_size, &new_bbo);
558 if (result != VK_SUCCESS)
561 cmd_buffer->total_batch_size += alloc_size;
563 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
564 if (seen_bbo == NULL) {
565 anv_batch_bo_destroy(new_bbo, cmd_buffer);
566 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
570 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
572 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
574 anv_batch_bo_start(new_bbo, batch, GFX8_MI_BATCH_BUFFER_START_length * 4);
579 /** Allocate a binding table
581 * This function allocates a binding table. This is a bit more complicated
582 * than one would think due to a combination of Vulkan driver design and some
583 * unfortunate hardware restrictions.
585 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
586 * the binding table pointer which means that all binding tables need to live
587 * in the bottom 64k of surface state base address. The way the GL driver has
588 * classically dealt with this restriction is to emit all surface states
589 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
590 * isn't really an option in Vulkan for a couple of reasons:
592 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
593 * to live in their own buffer and we have to be able to re-emit
594 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
595 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
596 * (it's not that hard to hit 64k of just binding tables), we allocate
597 * surface state objects up-front when VkImageView is created. In order
598 * for this to work, surface state objects need to be allocated from a
601 * 2) We tried to design the surface state system in such a way that it's
602 * already ready for bindless texturing. The way bindless texturing works
603 * on our hardware is that you have a big pool of surface state objects
604 * (with its own state base address) and the bindless handles are simply
605 * offsets into that pool. With the architecture we chose, we already
606 * have that pool and it's exactly the same pool that we use for regular
607 * surface states so we should already be ready for bindless.
609 * 3) For render targets, we need to be able to fill out the surface states
610 * later in vkBeginRenderPass so that we can assign clear colors
611 * correctly. One way to do this would be to just create the surface
612 * state data and then repeatedly copy it into the surface state BO every
613 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
614 * rather annoying and just being able to allocate them up-front and
615 * re-use them for the entire render pass.
617 * While none of these are technically blockers for emitting state on the fly
618 * like we do in GL, the ability to have a single surface state pool is
619 * simplifies things greatly. Unfortunately, it comes at a cost...
621 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
622 * place the binding tables just anywhere in surface state base address.
623 * Because 64k isn't a whole lot of space, we can't simply restrict the
624 * surface state buffer to 64k, we have to be more clever. The solution we've
625 * chosen is to have a block pool with a maximum size of 2G that starts at
626 * zero and grows in both directions. All surface states are allocated from
627 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
628 * binding tables from the bottom of the pool (negative offsets). Every time
629 * we allocate a new binding table block, we set surface state base address to
630 * point to the bottom of the binding table block. This way all of the
631 * binding tables in the block are in the bottom 64k of surface state base
632 * address. When we fill out the binding table, we add the distance between
633 * the bottom of our binding table block and zero of the block pool to the
634 * surface state offsets so that they are correct relative to out new surface
635 * state base address at the bottom of the binding table block.
637 * \param[in] entries The number of surface state entries the binding
638 * table should be able to hold.
640 * \param[out] state_offset The offset surface surface state base address
641 * where the surface states live. This must be
642 * added to the surface state offset when it is
643 * written into the binding table entry.
645 * \return An anv_state representing the binding table
648 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
649 uint32_t entries, uint32_t *state_offset)
651 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
653 uint32_t bt_size = align_u32(entries * 4, 32);
655 struct anv_state state = cmd_buffer->bt_next;
656 if (bt_size > state.alloc_size)
657 return (struct anv_state) { 0 };
659 state.alloc_size = bt_size;
660 cmd_buffer->bt_next.offset += bt_size;
661 cmd_buffer->bt_next.map += bt_size;
662 cmd_buffer->bt_next.alloc_size -= bt_size;
664 if (cmd_buffer->device->info->verx10 >= 125) {
665 /* We're using 3DSTATE_BINDING_TABLE_POOL_ALLOC to change the binding
666 * table address independently from surface state base address. We no
667 * longer need any sort of offsetting.
671 assert(bt_block->offset < 0);
672 *state_offset = -bt_block->offset;
679 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
681 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
682 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
683 isl_dev->ss.size, isl_dev->ss.align);
687 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
688 uint32_t size, uint32_t alignment)
690 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
694 /** Allocate space associated with a command buffer
696 * Some commands like vkCmdBuildAccelerationStructuresKHR() can end up needing
697 * large amount of temporary buffers. This function is here to deal with those
698 * potentially larger allocations, using a side BO if needed.
702 anv_cmd_buffer_alloc_space(struct anv_cmd_buffer *cmd_buffer,
703 size_t size, uint32_t alignment)
705 /* Below 16k, source memory from dynamic state, otherwise allocate a BO. */
706 if (size < 16 * 1024) {
707 struct anv_state state =
708 anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
711 return (struct anv_cmd_alloc) {
712 .address = (struct anv_address) {
713 .bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
714 .offset = state.offset,
721 assert(alignment <= 4096);
723 struct anv_bo *bo = NULL;
725 anv_device_alloc_bo(cmd_buffer->device,
727 align_u32(size, 4096),
731 if (result != VK_SUCCESS) {
732 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_DEVICE_MEMORY);
733 return ANV_EMPTY_ALLOC;
736 struct anv_bo **bo_entry =
737 u_vector_add(&cmd_buffer->dynamic_bos);
738 if (bo_entry == NULL) {
739 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
740 return ANV_EMPTY_ALLOC;
744 return (struct anv_cmd_alloc) {
745 .address = (struct anv_address) { .bo = bo },
752 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
754 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
755 if (bt_block == NULL) {
756 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
757 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
760 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
762 /* The bt_next state is a rolling state (we update it as we suballocate
763 * from it) which is relative to the start of the binding table block.
765 cmd_buffer->bt_next = *bt_block;
766 cmd_buffer->bt_next.offset = 0;
772 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
774 struct anv_batch_bo *batch_bo = NULL;
777 list_inithead(&cmd_buffer->batch_bos);
779 cmd_buffer->total_batch_size = ANV_MIN_CMD_BUFFER_BATCH_SIZE;
781 result = anv_batch_bo_create(cmd_buffer,
782 cmd_buffer->total_batch_size,
784 if (result != VK_SUCCESS)
787 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
789 cmd_buffer->batch.alloc = &cmd_buffer->vk.pool->alloc;
790 cmd_buffer->batch.user_data = cmd_buffer;
792 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
794 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
795 GFX8_MI_BATCH_BUFFER_START_length * 4);
797 int success = u_vector_init_pow2(&cmd_buffer->seen_bbos, 8,
798 sizeof(struct anv_bo *));
802 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
804 success = u_vector_init(&cmd_buffer->bt_block_states, 8,
805 sizeof(struct anv_state));
809 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
810 &cmd_buffer->vk.pool->alloc);
811 if (result != VK_SUCCESS)
814 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
815 if (result != VK_SUCCESS)
821 u_vector_finish(&cmd_buffer->bt_block_states);
823 u_vector_finish(&cmd_buffer->seen_bbos);
825 anv_batch_bo_destroy(batch_bo, cmd_buffer);
831 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
833 struct anv_state *bt_block;
834 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
835 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
836 u_vector_finish(&cmd_buffer->bt_block_states);
838 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->vk.pool->alloc);
840 u_vector_finish(&cmd_buffer->seen_bbos);
842 /* Destroy all of the batch buffers */
843 list_for_each_entry_safe(struct anv_batch_bo, bbo,
844 &cmd_buffer->batch_bos, link) {
845 list_del(&bbo->link);
846 anv_batch_bo_destroy(bbo, cmd_buffer);
851 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
853 /* Delete all but the first batch bo */
854 assert(!list_is_empty(&cmd_buffer->batch_bos));
855 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
856 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
857 list_del(&bbo->link);
858 anv_batch_bo_destroy(bbo, cmd_buffer);
860 assert(!list_is_empty(&cmd_buffer->batch_bos));
862 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
864 GFX8_MI_BATCH_BUFFER_START_length * 4);
866 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
867 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
868 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
870 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
871 cmd_buffer->bt_next = *(struct anv_state *)u_vector_head(&cmd_buffer->bt_block_states);
872 cmd_buffer->bt_next.offset = 0;
874 anv_reloc_list_clear(&cmd_buffer->surface_relocs);
876 /* Reset the list of seen buffers */
877 cmd_buffer->seen_bbos.head = 0;
878 cmd_buffer->seen_bbos.tail = 0;
880 struct anv_batch_bo *first_bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
882 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = first_bbo;
885 assert(first_bbo->bo->size == ANV_MIN_CMD_BUFFER_BATCH_SIZE);
886 cmd_buffer->total_batch_size = first_bbo->bo->size;
890 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
892 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
894 if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
895 /* When we start a batch buffer, we subtract a certain amount of
896 * padding from the end to ensure that we always have room to emit a
897 * BATCH_BUFFER_START to chain to the next BO. We need to remove
898 * that padding before we end the batch; otherwise, we may end up
899 * with our BATCH_BUFFER_END in another BO.
901 cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
902 assert(cmd_buffer->batch.start == batch_bo->bo->map);
903 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
905 /* Save end instruction location to override it later. */
906 cmd_buffer->batch_end = cmd_buffer->batch.next;
908 /* If we can chain this command buffer to another one, leave some place
909 * for the jump instruction.
911 batch_bo->chained = anv_cmd_buffer_is_chainable(cmd_buffer);
912 if (batch_bo->chained)
913 emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
915 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_END, bbe);
917 /* Round batch up to an even number of dwords. */
918 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
919 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
921 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
923 assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
924 /* If this is a secondary command buffer, we need to determine the
925 * mode in which it will be executed with vkExecuteCommands. We
926 * determine this statically here so that this stays in sync with the
927 * actual ExecuteCommands implementation.
929 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
930 if (cmd_buffer->device->physical->use_call_secondary) {
931 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN;
932 /* If the secondary command buffer begins & ends in the same BO and
933 * its length is less than the length of CS prefetch, add some NOOPs
934 * instructions so the last MI_BATCH_BUFFER_START is outside the CS
937 if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
938 const struct intel_device_info *devinfo = cmd_buffer->device->info;
939 const enum intel_engine_class engine_class = cmd_buffer->queue_family->engine_class;
940 /* Careful to have everything in signed integer. */
941 int32_t prefetch_len = devinfo->engine_class_prefetch[engine_class];
942 int32_t batch_len = cmd_buffer->batch.next - cmd_buffer->batch.start;
944 for (int32_t i = 0; i < (prefetch_len - batch_len); i += 4)
945 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
949 anv_batch_emitn(&cmd_buffer->batch,
950 GFX8_MI_BATCH_BUFFER_START_length,
951 GFX8_MI_BATCH_BUFFER_START,
952 .AddressSpaceIndicator = ASI_PPGTT,
953 .SecondLevelBatchBuffer = Firstlevelbatch) +
954 (GFX8_MI_BATCH_BUFFER_START_BatchBufferStartAddress_start / 8);
955 cmd_buffer->return_addr = anv_batch_address(&cmd_buffer->batch, jump_addr);
957 /* The emit above may have caused us to chain batch buffers which
958 * would mean that batch_bo is no longer valid.
960 batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
961 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
962 (length < ANV_MIN_CMD_BUFFER_BATCH_SIZE / 2)) {
963 /* If the secondary has exactly one batch buffer in its list *and*
964 * that batch buffer is less than half of the maximum size, we're
965 * probably better of simply copying it into our batch.
967 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
968 } else if (!(cmd_buffer->usage_flags &
969 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
970 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
972 /* In order to chain, we need this command buffer to contain an
973 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
974 * It doesn't matter where it points now so long as has a valid
975 * relocation. We'll adjust it later as part of the chaining
978 * We set the end of the batch a little short so we would be sure we
979 * have room for the chaining command. Since we're about to emit the
980 * chaining command, let's set it back where it should go.
982 cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
983 assert(cmd_buffer->batch.start == batch_bo->bo->map);
984 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
986 emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
987 assert(cmd_buffer->batch.start == batch_bo->bo->map);
989 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
993 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
997 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
998 struct list_head *list)
1000 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
1001 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
1002 if (bbo_ptr == NULL)
1003 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
1012 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1013 struct anv_cmd_buffer *secondary)
1015 anv_measure_add_secondary(primary, secondary);
1016 switch (secondary->exec_mode) {
1017 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
1018 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1020 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
1021 struct anv_batch_bo *first_bbo =
1022 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1023 struct anv_batch_bo *last_bbo =
1024 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1026 emit_batch_buffer_start(primary, first_bbo->bo, 0);
1028 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
1029 assert(primary->batch.start == this_bbo->bo->map);
1030 uint32_t offset = primary->batch.next - primary->batch.start;
1032 /* Make the tail of the secondary point back to right after the
1033 * MI_BATCH_BUFFER_START in the primary batch.
1035 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
1037 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1040 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
1041 struct list_head copy_list;
1042 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
1045 if (result != VK_SUCCESS)
1048 anv_cmd_buffer_add_seen_bbos(primary, ©_list);
1050 struct anv_batch_bo *first_bbo =
1051 list_first_entry(©_list, struct anv_batch_bo, link);
1052 struct anv_batch_bo *last_bbo =
1053 list_last_entry(©_list, struct anv_batch_bo, link);
1055 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
1057 list_splicetail(©_list, &primary->batch_bos);
1059 anv_batch_bo_continue(last_bbo, &primary->batch,
1060 GFX8_MI_BATCH_BUFFER_START_length * 4);
1063 case ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN: {
1064 struct anv_batch_bo *first_bbo =
1065 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1067 uint64_t *write_return_addr =
1068 anv_batch_emitn(&primary->batch,
1069 GFX8_MI_STORE_DATA_IMM_length + 1 /* QWord write */,
1070 GFX8_MI_STORE_DATA_IMM,
1071 .Address = secondary->return_addr)
1072 + (GFX8_MI_STORE_DATA_IMM_ImmediateData_start / 8);
1074 emit_batch_buffer_start(primary, first_bbo->bo, 0);
1076 *write_return_addr =
1077 anv_address_physical(anv_batch_address(&primary->batch,
1078 primary->batch.next));
1080 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1084 assert(!"Invalid execution mode");
1087 anv_reloc_list_append(&primary->surface_relocs, &primary->vk.pool->alloc,
1088 &secondary->surface_relocs);
1091 struct anv_execbuf {
1092 struct drm_i915_gem_execbuffer2 execbuf;
1094 struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
1096 struct drm_i915_gem_exec_object2 * objects;
1098 struct anv_bo ** bos;
1100 /* Allocated length of the 'objects' and 'bos' arrays */
1101 uint32_t array_length;
1103 uint32_t syncobj_count;
1104 uint32_t syncobj_array_length;
1105 struct drm_i915_gem_exec_fence * syncobjs;
1106 uint64_t * syncobj_values;
1108 /* List of relocations for surface states, only used with platforms not
1111 void * surface_states_relocs;
1113 uint32_t cmd_buffer_count;
1114 struct anv_query_pool *perf_query_pool;
1116 const VkAllocationCallbacks * alloc;
1117 VkSystemAllocationScope alloc_scope;
1119 int perf_query_pass;
1123 anv_execbuf_finish(struct anv_execbuf *exec)
1125 vk_free(exec->alloc, exec->syncobjs);
1126 vk_free(exec->alloc, exec->syncobj_values);
1127 vk_free(exec->alloc, exec->surface_states_relocs);
1128 vk_free(exec->alloc, exec->objects);
1129 vk_free(exec->alloc, exec->bos);
1133 anv_execbuf_add_ext(struct anv_execbuf *exec,
1135 struct i915_user_extension *ext)
1137 __u64 *iter = &exec->execbuf.cliprects_ptr;
1139 exec->execbuf.flags |= I915_EXEC_USE_EXTENSIONS;
1141 while (*iter != 0) {
1142 iter = (__u64 *) &((struct i915_user_extension *)(uintptr_t)*iter)->next_extension;
1145 ext->name = ext_name;
1147 *iter = (uintptr_t) ext;
1151 anv_execbuf_add_bo_bitset(struct anv_device *device,
1152 struct anv_execbuf *exec,
1155 uint32_t extra_flags);
1158 anv_execbuf_add_bo(struct anv_device *device,
1159 struct anv_execbuf *exec,
1161 struct anv_reloc_list *relocs,
1162 uint32_t extra_flags)
1164 struct drm_i915_gem_exec_object2 *obj = NULL;
1166 if (bo->exec_obj_index < exec->bo_count &&
1167 exec->bos[bo->exec_obj_index] == bo)
1168 obj = &exec->objects[bo->exec_obj_index];
1171 /* We've never seen this one before. Add it to the list and assign
1172 * an id that we can use later.
1174 if (exec->bo_count >= exec->array_length) {
1175 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1177 struct drm_i915_gem_exec_object2 *new_objects =
1178 vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
1179 if (new_objects == NULL)
1180 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1182 struct anv_bo **new_bos =
1183 vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
1184 if (new_bos == NULL) {
1185 vk_free(exec->alloc, new_objects);
1186 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1189 if (exec->objects) {
1190 memcpy(new_objects, exec->objects,
1191 exec->bo_count * sizeof(*new_objects));
1192 memcpy(new_bos, exec->bos,
1193 exec->bo_count * sizeof(*new_bos));
1196 vk_free(exec->alloc, exec->objects);
1197 vk_free(exec->alloc, exec->bos);
1199 exec->objects = new_objects;
1200 exec->bos = new_bos;
1201 exec->array_length = new_len;
1204 assert(exec->bo_count < exec->array_length);
1206 bo->exec_obj_index = exec->bo_count++;
1207 obj = &exec->objects[bo->exec_obj_index];
1208 exec->bos[bo->exec_obj_index] = bo;
1210 obj->handle = bo->gem_handle;
1211 obj->relocation_count = 0;
1212 obj->relocs_ptr = 0;
1214 obj->offset = bo->offset;
1215 obj->flags = bo->flags | extra_flags;
1220 if (extra_flags & EXEC_OBJECT_WRITE) {
1221 obj->flags |= EXEC_OBJECT_WRITE;
1222 obj->flags &= ~EXEC_OBJECT_ASYNC;
1225 if (relocs != NULL) {
1226 for (size_t i = 0; i < relocs->num_relocs; i++) {
1228 anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
1230 if (result != VK_SUCCESS)
1234 return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
1235 relocs->deps, extra_flags);
1241 /* Add BO dependencies to execbuf */
1243 anv_execbuf_add_bo_bitset(struct anv_device *device,
1244 struct anv_execbuf *exec,
1247 uint32_t extra_flags)
1249 for (uint32_t w = 0; w < dep_words; w++) {
1250 BITSET_WORD mask = deps[w];
1252 int i = u_bit_scan(&mask);
1253 uint32_t gem_handle = w * BITSET_WORDBITS + i;
1254 struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1255 assert(bo->refcount > 0);
1257 anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
1258 if (result != VK_SUCCESS)
1267 anv_execbuf_add_syncobj(struct anv_device *device,
1268 struct anv_execbuf *exec,
1271 uint64_t timeline_value)
1273 if (exec->syncobj_count >= exec->syncobj_array_length) {
1274 uint32_t new_len = MAX2(exec->syncobj_array_length * 2, 16);
1276 struct drm_i915_gem_exec_fence *new_syncobjs =
1277 vk_alloc(exec->alloc, new_len * sizeof(*new_syncobjs),
1278 8, exec->alloc_scope);
1280 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1283 typed_memcpy(new_syncobjs, exec->syncobjs, exec->syncobj_count);
1285 exec->syncobjs = new_syncobjs;
1287 if (exec->syncobj_values) {
1288 uint64_t *new_syncobj_values =
1289 vk_alloc(exec->alloc, new_len * sizeof(*new_syncobj_values),
1290 8, exec->alloc_scope);
1291 if (!new_syncobj_values)
1292 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1294 typed_memcpy(new_syncobj_values, exec->syncobj_values,
1295 exec->syncobj_count);
1297 exec->syncobj_values = new_syncobj_values;
1300 exec->syncobj_array_length = new_len;
1303 if (timeline_value && !exec->syncobj_values) {
1304 exec->syncobj_values =
1305 vk_zalloc(exec->alloc, exec->syncobj_array_length *
1306 sizeof(*exec->syncobj_values),
1307 8, exec->alloc_scope);
1308 if (!exec->syncobj_values)
1309 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1312 exec->syncobjs[exec->syncobj_count] = (struct drm_i915_gem_exec_fence) {
1316 if (exec->syncobj_values)
1317 exec->syncobj_values[exec->syncobj_count] = timeline_value;
1319 exec->syncobj_count++;
1325 anv_execbuf_add_sync(struct anv_device *device,
1326 struct anv_execbuf *execbuf,
1327 struct vk_sync *sync,
1331 /* It's illegal to signal a timeline with value 0 because that's never
1332 * higher than the current value. A timeline wait on value 0 is always
1333 * trivial because 0 <= uint64_t always.
1335 if ((sync->flags & VK_SYNC_IS_TIMELINE) && value == 0)
1338 if (vk_sync_is_anv_bo_sync(sync)) {
1339 struct anv_bo_sync *bo_sync =
1340 container_of(sync, struct anv_bo_sync, sync);
1342 assert(is_signal == (bo_sync->state == ANV_BO_SYNC_STATE_RESET));
1344 return anv_execbuf_add_bo(device, execbuf, bo_sync->bo, NULL,
1345 is_signal ? EXEC_OBJECT_WRITE : 0);
1346 } else if (vk_sync_type_is_drm_syncobj(sync->type)) {
1347 struct vk_drm_syncobj *syncobj = vk_sync_as_drm_syncobj(sync);
1349 if (!(sync->flags & VK_SYNC_IS_TIMELINE))
1352 return anv_execbuf_add_syncobj(device, execbuf, syncobj->syncobj,
1353 is_signal ? I915_EXEC_FENCE_SIGNAL :
1354 I915_EXEC_FENCE_WAIT,
1358 unreachable("Invalid sync type");
1362 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1363 struct anv_cmd_buffer *cmd_buffer)
1366 /* Add surface dependencies (BOs) to the execbuf */
1367 result = anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
1368 cmd_buffer->surface_relocs.dep_words,
1369 cmd_buffer->surface_relocs.deps, 0);
1370 if (result != VK_SUCCESS)
1373 /* First, we walk over all of the bos we've seen and add them and their
1374 * relocations to the validate list.
1376 struct anv_batch_bo **bbo;
1377 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1378 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1379 (*bbo)->bo, &(*bbo)->relocs, 0);
1380 if (result != VK_SUCCESS)
1384 struct anv_bo **bo_entry;
1385 u_vector_foreach(bo_entry, &cmd_buffer->dynamic_bos) {
1386 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1387 *bo_entry, NULL, 0);
1388 if (result != VK_SUCCESS)
1396 chain_command_buffers(struct anv_cmd_buffer **cmd_buffers,
1397 uint32_t num_cmd_buffers)
1399 if (!anv_cmd_buffer_is_chainable(cmd_buffers[0])) {
1400 assert(num_cmd_buffers == 1);
1404 /* Chain the N-1 first batch buffers */
1405 for (uint32_t i = 0; i < (num_cmd_buffers - 1); i++)
1406 anv_cmd_buffer_record_chain_submit(cmd_buffers[i], cmd_buffers[i + 1]);
1408 /* Put an end to the last one */
1409 anv_cmd_buffer_record_end_submit(cmd_buffers[num_cmd_buffers - 1]);
1413 pin_state_pool(struct anv_device *device,
1414 struct anv_execbuf *execbuf,
1415 struct anv_state_pool *pool)
1417 anv_block_pool_foreach_bo(bo, &pool->block_pool) {
1418 VkResult result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1419 if (result != VK_SUCCESS)
1427 setup_execbuf_for_cmd_buffers(struct anv_execbuf *execbuf,
1428 struct anv_queue *queue,
1429 struct anv_cmd_buffer **cmd_buffers,
1430 uint32_t num_cmd_buffers)
1432 struct anv_device *device = queue->device;
1435 /* Edit the tail of the command buffers to chain them all together if they
1438 chain_command_buffers(cmd_buffers, num_cmd_buffers);
1440 for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1441 anv_measure_submit(cmd_buffers[i]);
1442 result = setup_execbuf_for_cmd_buffer(execbuf, cmd_buffers[i]);
1443 if (result != VK_SUCCESS)
1447 /* Add all the global BOs to the object list for softpin case. */
1448 result = pin_state_pool(device, execbuf, &device->scratch_surface_state_pool);
1449 if (result != VK_SUCCESS)
1452 result = pin_state_pool(device, execbuf, &device->bindless_surface_state_pool);
1453 if (result != VK_SUCCESS)
1456 result = pin_state_pool(device, execbuf, &device->internal_surface_state_pool);
1457 if (result != VK_SUCCESS)
1460 result = pin_state_pool(device, execbuf, &device->dynamic_state_pool);
1461 if (result != VK_SUCCESS)
1464 result = pin_state_pool(device, execbuf, &device->general_state_pool);
1465 if (result != VK_SUCCESS)
1468 result = pin_state_pool(device, execbuf, &device->instruction_state_pool);
1469 if (result != VK_SUCCESS)
1472 result = pin_state_pool(device, execbuf, &device->binding_table_pool);
1473 if (result != VK_SUCCESS)
1476 /* Add the BOs for all user allocated memory objects because we can't
1477 * track after binding updates of VK_EXT_descriptor_indexing.
1479 list_for_each_entry(struct anv_device_memory, mem,
1480 &device->memory_objects, link) {
1481 result = anv_execbuf_add_bo(device, execbuf, mem->bo, NULL, 0);
1482 if (result != VK_SUCCESS)
1486 for (uint32_t i = 0; i < execbuf->bo_count; i++)
1487 execbuf->objects[i].offset = execbuf->bos[i]->offset;
1489 struct anv_batch_bo *first_batch_bo =
1490 list_first_entry(&cmd_buffers[0]->batch_bos, struct anv_batch_bo, link);
1492 /* The kernel requires that the last entry in the validation list be the
1493 * batch buffer to execute. We can simply swap the element
1494 * corresponding to the first batch_bo in the chain with the last
1495 * element in the list.
1497 if (first_batch_bo->bo->exec_obj_index != execbuf->bo_count - 1) {
1498 uint32_t idx = first_batch_bo->bo->exec_obj_index;
1499 uint32_t last_idx = execbuf->bo_count - 1;
1501 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1502 assert(execbuf->bos[idx] == first_batch_bo->bo);
1504 execbuf->objects[idx] = execbuf->objects[last_idx];
1505 execbuf->bos[idx] = execbuf->bos[last_idx];
1506 execbuf->bos[idx]->exec_obj_index = idx;
1508 execbuf->objects[last_idx] = tmp_obj;
1509 execbuf->bos[last_idx] = first_batch_bo->bo;
1510 first_batch_bo->bo->exec_obj_index = last_idx;
1513 if (device->physical->memory.need_clflush) {
1514 __builtin_ia32_mfence();
1515 struct anv_batch_bo **bbo;
1516 for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1517 u_vector_foreach(bbo, &cmd_buffers[i]->seen_bbos) {
1518 for (uint32_t l = 0; l < (*bbo)->length; l += CACHELINE_SIZE)
1519 __builtin_ia32_clflush((*bbo)->bo->map + l);
1524 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1525 .buffers_ptr = (uintptr_t) execbuf->objects,
1526 .buffer_count = execbuf->bo_count,
1527 .batch_start_offset = 0,
1528 /* We'll fill in batch length later when chaining batches. */
1534 .flags = I915_EXEC_NO_RELOC |
1535 I915_EXEC_HANDLE_LUT |
1537 .rsvd1 = device->context_id,
1545 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue)
1547 struct anv_device *device = queue->device;
1548 VkResult result = anv_execbuf_add_bo(device, execbuf,
1549 device->trivial_batch_bo,
1551 if (result != VK_SUCCESS)
1554 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1555 .buffers_ptr = (uintptr_t) execbuf->objects,
1556 .buffer_count = execbuf->bo_count,
1557 .batch_start_offset = 0,
1558 .batch_len = 8, /* GFX7_MI_BATCH_BUFFER_END and NOOP */
1559 .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | I915_EXEC_NO_RELOC,
1560 .rsvd1 = device->context_id,
1568 setup_utrace_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue,
1569 struct anv_utrace_flush_copy *flush)
1571 struct anv_device *device = queue->device;
1572 VkResult result = anv_execbuf_add_bo(device, execbuf,
1575 if (result != VK_SUCCESS)
1578 result = anv_execbuf_add_sync(device, execbuf, flush->sync,
1579 true /* is_signal */, 0 /* value */);
1580 if (result != VK_SUCCESS)
1583 if (flush->batch_bo->exec_obj_index != execbuf->bo_count - 1) {
1584 uint32_t idx = flush->batch_bo->exec_obj_index;
1585 uint32_t last_idx = execbuf->bo_count - 1;
1587 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1588 assert(execbuf->bos[idx] == flush->batch_bo);
1590 execbuf->objects[idx] = execbuf->objects[last_idx];
1591 execbuf->bos[idx] = execbuf->bos[last_idx];
1592 execbuf->bos[idx]->exec_obj_index = idx;
1594 execbuf->objects[last_idx] = tmp_obj;
1595 execbuf->bos[last_idx] = flush->batch_bo;
1596 flush->batch_bo->exec_obj_index = last_idx;
1599 if (device->physical->memory.need_clflush)
1600 intel_flush_range(flush->batch_bo->map, flush->batch_bo->size);
1602 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1603 .buffers_ptr = (uintptr_t) execbuf->objects,
1604 .buffer_count = execbuf->bo_count,
1605 .batch_start_offset = 0,
1606 .batch_len = flush->batch.next - flush->batch.start,
1607 .flags = I915_EXEC_NO_RELOC |
1608 I915_EXEC_HANDLE_LUT |
1609 I915_EXEC_FENCE_ARRAY |
1611 .rsvd1 = device->context_id,
1613 .num_cliprects = execbuf->syncobj_count,
1614 .cliprects_ptr = (uintptr_t)execbuf->syncobjs,
1621 anv_queue_exec_utrace_locked(struct anv_queue *queue,
1622 struct anv_utrace_flush_copy *flush)
1624 assert(flush->batch_bo);
1626 struct anv_device *device = queue->device;
1627 struct anv_execbuf execbuf = {
1628 .alloc = &device->vk.alloc,
1629 .alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
1632 VkResult result = setup_utrace_execbuf(&execbuf, queue, flush);
1633 if (result != VK_SUCCESS)
1636 int ret = queue->device->info->no_hw ? 0 :
1637 anv_gem_execbuffer(queue->device, &execbuf.execbuf);
1639 result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
1642 anv_execbuf_finish(&execbuf);
1648 anv_exec_batch_debug(struct anv_queue *queue, uint32_t cmd_buffer_count,
1649 struct anv_cmd_buffer **cmd_buffers,
1650 struct anv_query_pool *perf_query_pool,
1651 uint32_t perf_query_pass)
1653 if (!INTEL_DEBUG(DEBUG_BATCH))
1656 struct anv_device *device = queue->device;
1657 const bool has_perf_query = perf_query_pool && perf_query_pass >= 0 &&
1660 fprintf(stderr, "Batch on queue %d\n", (int)(queue - device->queues));
1661 if (cmd_buffer_count) {
1662 if (has_perf_query) {
1663 struct anv_bo *pass_batch_bo = perf_query_pool->bo;
1664 uint64_t pass_batch_offset =
1665 khr_perf_query_preamble_offset(perf_query_pool, perf_query_pass);
1667 intel_print_batch(&device->decoder_ctx,
1668 pass_batch_bo->map + pass_batch_offset, 64,
1669 pass_batch_bo->offset + pass_batch_offset, false);
1672 for (uint32_t i = 0; i < cmd_buffer_count; i++) {
1673 struct anv_batch_bo **bo = u_vector_tail(&cmd_buffers[i]->seen_bbos);
1674 device->cmd_buffer_being_decoded = cmd_buffers[i];
1675 intel_print_batch(&device->decoder_ctx, (*bo)->bo->map,
1676 (*bo)->bo->size, (*bo)->bo->offset, false);
1677 device->cmd_buffer_being_decoded = NULL;
1680 intel_print_batch(&device->decoder_ctx, device->trivial_batch_bo->map,
1681 device->trivial_batch_bo->size,
1682 device->trivial_batch_bo->offset, false);
1686 /* We lock around execbuf for three main reasons:
1688 * 1) When a block pool is resized, we create a new gem handle with a
1689 * different size and, in the case of surface states, possibly a different
1690 * center offset but we re-use the same anv_bo struct when we do so. If
1691 * this happens in the middle of setting up an execbuf, we could end up
1692 * with our list of BOs out of sync with our list of gem handles.
1694 * 2) The algorithm we use for building the list of unique buffers isn't
1695 * thread-safe. While the client is supposed to synchronize around
1696 * QueueSubmit, this would be extremely difficult to debug if it ever came
1697 * up in the wild due to a broken app. It's better to play it safe and
1698 * just lock around QueueSubmit.
1700 * Since the only other things that ever take the device lock such as block
1701 * pool resize only rarely happen, this will almost never be contended so
1702 * taking a lock isn't really an expensive operation in this case.
1705 anv_queue_exec_locked(struct anv_queue *queue,
1706 uint32_t wait_count,
1707 const struct vk_sync_wait *waits,
1708 uint32_t cmd_buffer_count,
1709 struct anv_cmd_buffer **cmd_buffers,
1710 uint32_t signal_count,
1711 const struct vk_sync_signal *signals,
1712 struct anv_query_pool *perf_query_pool,
1713 uint32_t perf_query_pass)
1715 struct anv_device *device = queue->device;
1716 struct anv_utrace_flush_copy *utrace_flush_data = NULL;
1717 struct anv_execbuf execbuf = {
1718 .alloc = &queue->device->vk.alloc,
1719 .alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
1720 .perf_query_pass = perf_query_pass,
1723 /* Flush the trace points first, they need to be moved */
1725 anv_device_utrace_flush_cmd_buffers(queue,
1728 &utrace_flush_data);
1729 if (result != VK_SUCCESS)
1732 if (utrace_flush_data && !utrace_flush_data->batch_bo) {
1733 result = anv_execbuf_add_sync(device, &execbuf,
1734 utrace_flush_data->sync,
1735 true /* is_signal */,
1737 if (result != VK_SUCCESS)
1740 utrace_flush_data = NULL;
1743 /* Always add the workaround BO as it includes a driver identifier for the
1747 anv_execbuf_add_bo(device, &execbuf, device->workaround_bo, NULL, 0);
1748 if (result != VK_SUCCESS)
1751 for (uint32_t i = 0; i < wait_count; i++) {
1752 result = anv_execbuf_add_sync(device, &execbuf,
1754 false /* is_signal */,
1755 waits[i].wait_value);
1756 if (result != VK_SUCCESS)
1760 for (uint32_t i = 0; i < signal_count; i++) {
1761 result = anv_execbuf_add_sync(device, &execbuf,
1763 true /* is_signal */,
1764 signals[i].signal_value);
1765 if (result != VK_SUCCESS)
1770 result = anv_execbuf_add_sync(device, &execbuf,
1772 true /* is_signal */,
1773 0 /* signal_value */);
1774 if (result != VK_SUCCESS)
1778 if (cmd_buffer_count) {
1779 result = setup_execbuf_for_cmd_buffers(&execbuf, queue,
1783 result = setup_empty_execbuf(&execbuf, queue);
1786 if (result != VK_SUCCESS)
1789 const bool has_perf_query =
1790 perf_query_pool && perf_query_pass >= 0 && cmd_buffer_count;
1792 if (INTEL_DEBUG(DEBUG_SUBMIT)) {
1793 fprintf(stderr, "Batch offset=0x%x len=0x%x on queue 0\n",
1794 execbuf.execbuf.batch_start_offset, execbuf.execbuf.batch_len);
1795 for (uint32_t i = 0; i < execbuf.bo_count; i++) {
1796 const struct anv_bo *bo = execbuf.bos[i];
1798 fprintf(stderr, " BO: addr=0x%016"PRIx64"-0x%016"PRIx64" size=0x%010"PRIx64
1799 " handle=%05u capture=%u name=%s\n",
1800 bo->offset, bo->offset + bo->size - 1, bo->size, bo->gem_handle,
1801 (bo->flags & EXEC_OBJECT_CAPTURE) != 0, bo->name);
1805 anv_exec_batch_debug(queue, cmd_buffer_count, cmd_buffers, perf_query_pool,
1808 if (execbuf.syncobj_values) {
1809 execbuf.timeline_fences.fence_count = execbuf.syncobj_count;
1810 execbuf.timeline_fences.handles_ptr = (uintptr_t)execbuf.syncobjs;
1811 execbuf.timeline_fences.values_ptr = (uintptr_t)execbuf.syncobj_values;
1812 anv_execbuf_add_ext(&execbuf,
1813 DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES,
1814 &execbuf.timeline_fences.base);
1815 } else if (execbuf.syncobjs) {
1816 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1817 execbuf.execbuf.num_cliprects = execbuf.syncobj_count;
1818 execbuf.execbuf.cliprects_ptr = (uintptr_t)execbuf.syncobjs;
1821 if (has_perf_query) {
1822 assert(perf_query_pass < perf_query_pool->n_passes);
1823 struct intel_perf_query_info *query_info =
1824 perf_query_pool->pass_query[perf_query_pass];
1826 /* Some performance queries just the pipeline statistic HW, no need for
1827 * OA in that case, so no need to reconfigure.
1829 if (!INTEL_DEBUG(DEBUG_NO_OACONFIG) &&
1830 (query_info->kind == INTEL_PERF_QUERY_TYPE_OA ||
1831 query_info->kind == INTEL_PERF_QUERY_TYPE_RAW)) {
1832 int ret = intel_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
1833 (void *)(uintptr_t) query_info->oa_metrics_set_id);
1835 result = vk_device_set_lost(&device->vk,
1836 "i915-perf config failed: %s",
1841 struct anv_bo *pass_batch_bo = perf_query_pool->bo;
1843 struct drm_i915_gem_exec_object2 query_pass_object = {
1844 .handle = pass_batch_bo->gem_handle,
1845 .offset = pass_batch_bo->offset,
1846 .flags = pass_batch_bo->flags,
1848 struct drm_i915_gem_execbuffer2 query_pass_execbuf = {
1849 .buffers_ptr = (uintptr_t) &query_pass_object,
1851 .batch_start_offset = khr_perf_query_preamble_offset(perf_query_pool,
1853 .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags,
1854 .rsvd1 = device->context_id,
1857 int ret = queue->device->info->no_hw ? 0 :
1858 anv_gem_execbuffer(queue->device, &query_pass_execbuf);
1860 result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
1863 int ret = queue->device->info->no_hw ? 0 :
1864 anv_gem_execbuffer(queue->device, &execbuf.execbuf);
1866 result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
1868 if (result == VK_SUCCESS && queue->sync) {
1869 result = vk_sync_wait(&device->vk, queue->sync, 0,
1870 VK_SYNC_WAIT_COMPLETE, UINT64_MAX);
1871 if (result != VK_SUCCESS)
1872 result = vk_queue_set_lost(&queue->vk, "sync wait failed");
1876 anv_execbuf_finish(&execbuf);
1878 if (result == VK_SUCCESS && utrace_flush_data)
1879 result = anv_queue_exec_utrace_locked(queue, utrace_flush_data);
1885 can_chain_query_pools(struct anv_query_pool *p1, struct anv_query_pool *p2)
1887 return (!p1 || !p2 || p1 == p2);
1891 anv_queue_submit_locked(struct anv_queue *queue,
1892 struct vk_queue_submit *submit)
1896 if (submit->command_buffer_count == 0) {
1897 result = anv_queue_exec_locked(queue, submit->wait_count, submit->waits,
1898 0 /* cmd_buffer_count */,
1899 NULL /* cmd_buffers */,
1900 submit->signal_count, submit->signals,
1901 NULL /* perf_query_pool */,
1902 0 /* perf_query_pass */);
1903 if (result != VK_SUCCESS)
1906 /* Everything's easier if we don't have to bother with container_of() */
1907 STATIC_ASSERT(offsetof(struct anv_cmd_buffer, vk) == 0);
1908 struct vk_command_buffer **vk_cmd_buffers = submit->command_buffers;
1909 struct anv_cmd_buffer **cmd_buffers = (void *)vk_cmd_buffers;
1911 uint32_t end = submit->command_buffer_count;
1912 struct anv_query_pool *perf_query_pool =
1913 cmd_buffers[start]->perf_query_pool;
1914 for (uint32_t n = 0; n < end; n++) {
1915 bool can_chain = false;
1916 uint32_t next = n + 1;
1917 /* Can we chain the last buffer into the next one? */
1919 anv_cmd_buffer_is_chainable(cmd_buffers[next]) &&
1920 can_chain_query_pools
1921 (cmd_buffers[next]->perf_query_pool, perf_query_pool)) {
1924 perf_query_pool ? perf_query_pool :
1925 cmd_buffers[next]->perf_query_pool;
1928 /* The next buffer cannot be chained, or we have reached the
1929 * last buffer, submit what have been chained so far.
1932 anv_queue_exec_locked(queue,
1933 start == 0 ? submit->wait_count : 0,
1934 start == 0 ? submit->waits : NULL,
1935 next - start, &cmd_buffers[start],
1936 next == end ? submit->signal_count : 0,
1937 next == end ? submit->signals : NULL,
1939 submit->perf_pass_index);
1940 if (result != VK_SUCCESS)
1944 perf_query_pool = cmd_buffers[start]->perf_query_pool;
1949 for (uint32_t i = 0; i < submit->signal_count; i++) {
1950 if (!vk_sync_is_anv_bo_sync(submit->signals[i].sync))
1953 struct anv_bo_sync *bo_sync =
1954 container_of(submit->signals[i].sync, struct anv_bo_sync, sync);
1956 /* Once the execbuf has returned, we need to set the fence state to
1957 * SUBMITTED. We can't do this before calling execbuf because
1958 * anv_GetFenceStatus does take the global device lock before checking
1961 * We set the fence state to SUBMITTED regardless of whether or not the
1962 * execbuf succeeds because we need to ensure that vkWaitForFences() and
1963 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
1964 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
1966 assert(bo_sync->state == ANV_BO_SYNC_STATE_RESET);
1967 bo_sync->state = ANV_BO_SYNC_STATE_SUBMITTED;
1970 pthread_cond_broadcast(&queue->device->queue_submit);
1976 anv_queue_submit(struct vk_queue *vk_queue,
1977 struct vk_queue_submit *submit)
1979 struct anv_queue *queue = container_of(vk_queue, struct anv_queue, vk);
1980 struct anv_device *device = queue->device;
1983 if (queue->device->info->no_hw) {
1984 for (uint32_t i = 0; i < submit->signal_count; i++) {
1985 result = vk_sync_signal(&device->vk,
1986 submit->signals[i].sync,
1987 submit->signals[i].signal_value);
1988 if (result != VK_SUCCESS)
1989 return vk_queue_set_lost(&queue->vk, "vk_sync_signal failed");
1994 uint64_t start_ts = intel_ds_begin_submit(queue->ds);
1996 pthread_mutex_lock(&device->mutex);
1997 result = anv_queue_submit_locked(queue, submit);
1998 /* Take submission ID under lock */
1999 pthread_mutex_unlock(&device->mutex);
2001 intel_ds_end_submit(queue->ds, start_ts);
2007 anv_i915_execute_simple_batch(struct anv_queue *queue,
2008 struct anv_bo *batch_bo,
2009 uint32_t batch_bo_size)
2011 struct anv_device *device = queue->device;
2012 struct anv_execbuf execbuf = {
2013 .alloc = &queue->device->vk.alloc,
2014 .alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
2017 VkResult result = anv_execbuf_add_bo(device, &execbuf, batch_bo, NULL, 0);
2018 if (result != VK_SUCCESS)
2021 execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
2022 .buffers_ptr = (uintptr_t) execbuf.objects,
2023 .buffer_count = execbuf.bo_count,
2024 .batch_start_offset = 0,
2025 .batch_len = batch_bo_size,
2026 .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | I915_EXEC_NO_RELOC,
2027 .rsvd1 = device->context_id,
2031 if (anv_gem_execbuffer(device, &execbuf.execbuf)) {
2032 result = vk_device_set_lost(&device->vk, "anv_gem_execbuffer failed: %m");
2036 result = anv_device_wait(device, batch_bo, INT64_MAX);
2037 if (result != VK_SUCCESS)
2038 result = vk_device_set_lost(&device->vk,
2039 "anv_device_wait failed: %m");
2042 anv_execbuf_finish(&execbuf);
2047 anv_queue_submit_simple_batch(struct anv_queue *queue,
2048 struct anv_batch *batch)
2050 struct anv_device *device = queue->device;
2051 VkResult result = VK_SUCCESS;
2053 if (queue->device->info->no_hw)
2056 /* This is only used by device init so we can assume the queue is empty and
2057 * we aren't fighting with a submit thread.
2059 assert(vk_queue_is_empty(&queue->vk));
2061 uint32_t batch_size = align_u32(batch->next - batch->start, 8);
2063 struct anv_bo *batch_bo = NULL;
2064 result = anv_bo_pool_alloc(&device->batch_bo_pool, batch_size, &batch_bo);
2065 if (result != VK_SUCCESS)
2068 memcpy(batch_bo->map, batch->start, batch_size);
2069 if (device->physical->memory.need_clflush)
2070 intel_flush_range(batch_bo->map, batch_size);
2072 if (INTEL_DEBUG(DEBUG_BATCH)) {
2073 intel_print_batch(&device->decoder_ctx,
2076 batch_bo->offset, false);
2079 result = anv_i915_execute_simple_batch(queue, batch_bo, batch_size);
2081 anv_bo_pool_free(&device->batch_bo_pool, batch_bo);