bool predicated = !wait && !q->stalled;
struct mi_builder b;
- mi_builder_init(&b, batch);
+ mi_builder_init(&b, &batch->screen->devinfo, batch);
iris_batch_sync_region_start(batch);
q->stalled = true;
struct mi_builder b;
- mi_builder_init(&b, batch);
+ mi_builder_init(&b, &batch->screen->devinfo, batch);
struct mi_value result;
if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
struct mi_builder b;
- mi_builder_init(&b, batch);
+ mi_builder_init(&b, &batch->screen->devinfo, batch);
/* comparison = draw id < draw count */
struct mi_value comparison =
PIPE_CONTROL_CS_STALL);
struct mi_builder b;
- mi_builder_init(&b, batch);
+ mi_builder_init(&b, &batch->screen->devinfo, batch);
struct iris_address addr =
ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
#ifndef MI_BUILDER_H
#define MI_BUILDER_H
+#include "dev/gen_device_info.h"
#include "genxml/genX_bits.h"
#include "util/bitscan.h"
#include "util/fast_idiv_by_const.h"
#endif
struct mi_builder {
+ const struct gen_device_info *devinfo;
__gen_user_data *user_data;
#if GEN_VERSIONx10 >= 75
};
static inline void
-mi_builder_init(struct mi_builder *b, __gen_user_data *user_data)
+mi_builder_init(struct mi_builder *b,
+ const struct gen_device_info *devinfo,
+ __gen_user_data *user_data)
{
memset(b, 0, sizeof(*b));
+ b->devinfo = devinfo;
b->user_data = user_data;
#if GEN_VERSIONx10 >= 75
* but experiment show it doesn't work properly, so for now just get over
* the CS prefetch.
*/
- for (uint32_t i = 0; i < 128; i++)
+ for (uint32_t i = 0; i < (b->devinfo->cs_prefetch_size / 4); i++)
mi_builder_emit(b, GENX(MI_NOOP), noop);
}
memset(data_map, 139, DATA_BO_SIZE);
memset(&canary, 139, sizeof(canary));
- mi_builder_init(&b, this);
+ mi_builder_init(&b, &devinfo, this);
}
void *
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t a = 0; a < layer_count; a++) {
const uint32_t layer = base_layer + a;
enum anv_fast_clear_type fast_clear_supported)
{
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
const struct mi_value fast_clear_type =
mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device,
enum anv_fast_clear_type fast_clear_supported)
{
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value fast_clear_type_mem =
mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device,
#endif
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
if (copy_from_surface_state) {
mi_memcpy(&b, entry_addr, ss_clear_addr, copy_size);
* regardless of conditional rendering being enabled in primary.
*/
struct mi_builder b;
- mi_builder_init(&b, &primary->batch);
+ mi_builder_init(&b, &primary->device->info, &primary->batch);
mi_store(&b, mi_reg64(ANV_PREDICATE_RESULT_REG),
mi_imm(UINT64_MAX));
}
instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value count =
mi_mem32(anv_address_add(counter_buffer->address,
counterBufferOffset));
bool indexed)
{
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_store(&b, mi_reg32(GEN7_3DPRIM_VERTEX_COUNT),
mi_mem32(anv_address_add(addr, 0)));
genX(cmd_buffer_flush_state)(cmd_buffer);
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
struct anv_address count_address =
anv_address_add(count_buffer->address, countBufferOffset);
struct mi_value max =
genX(cmd_buffer_flush_state)(cmd_buffer);
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
struct anv_address count_address =
anv_address_add(count_buffer->address, countBufferOffset);
struct mi_value max =
genX(cmd_buffer_flush_compute_state)(cmd_buffer);
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value size_x = mi_mem32(anv_address_add(addr, 0));
struct mi_value size_y = mi_mem32(anv_address_add(addr, 4));
{
#if GEN_VERSIONx10 >= 75
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
mi_store(&b, mi_reg64(MI_PREDICATE_SRC0),
mi_reg32(ANV_PREDICATE_RESULT_REG));
genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
/* Section 19.4 of the Vulkan 1.1.85 spec says:
*
};
batch.next = batch.start;
- mi_builder_init(&b, &batch);
+ mi_builder_init(&b, &device->info, &batch);
mi_store(&b, mi_reg64(ANV_PERF_QUERY_OFFSET_REG),
mi_imm(p * pool->pass_size));
anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: {
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t i = 0; i < queryCount; i++)
emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
#if GEN_GEN >= 8
case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t i = 0; i < queryCount; i++) {
for (uint32_t p = 0; p < pool->n_passes; p++) {
case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
for (uint32_t i = 0; i < queryCount; i++)
emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
struct anv_address query_addr = anv_query_address(pool, query);
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
struct anv_address query_addr = anv_query_address(pool, query);
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
switch (pipelineStage) {
case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
struct mi_builder b;
- mi_builder_init(&b, &cmd_buffer->batch);
+ mi_builder_init(&b, &cmd_buffer->device->info, &cmd_buffer->batch);
struct mi_value result;
/* If render target writes are ongoing, request a render target cache flush