We can have multiple pipe_screen but only one iris_bufmgr per device.
So better to store intel_device_info into the shared iris_bufmgr and
save some memory.
Also in future patches iris_bufmgr will make more use of
intel_device_info.
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/19650>
INTEL_BATCH_DECODE_FLOATS;
intel_batch_decode_ctx_init(&batch->decoder, &screen->compiler->isa,
- &screen->devinfo,
+ screen->devinfo,
stderr, decode_flags, NULL,
decode_get_bo, decode_get_state_size, batch);
batch->decoder.dynamic_base = IRIS_MEMZONE_DYNAMIC_START;
iris_create_engines_context(struct iris_context *ice, int priority)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
int fd = iris_bufmgr_get_fd(screen->bufmgr);
struct intel_query_engine_info *engines_info = intel_engine_get_info(fd);
{
struct iris_screen *screen = batch->screen;
struct iris_bufmgr *bufmgr = screen->bufmgr;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
u_trace_fini(&batch->trace);
static void
iris_finish_batch(struct iris_batch *batch)
{
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
if (devinfo->ver == 12 && batch->name == IRIS_BATCH_RENDER) {
/* We re-emit constants at the beginning of every batch as a hardware
}
int ret = 0;
- if (!batch->screen->devinfo.no_hw &&
+ if (!batch->screen->devinfo->no_hw &&
intel_ioctl(batch->screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf))
ret = -errno;
iris_batch_mark_flush_sync(struct iris_batch *batch,
enum iris_domain access)
{
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
if (iris_domain_is_l3_coherent(devinfo, access))
batch->l3_coherent_seqnos[access] = batch->next_seqno - 1;
iris_batch_mark_invalidate_sync(struct iris_batch *batch,
enum iris_domain access)
{
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
for (unsigned i = 0; i < NUM_IRIS_DOMAINS; i++) {
if (i == access)
#define iris_foreach_batch(ice, batch) \
for (struct iris_batch *batch = &ice->batches[0]; \
- batch <= &ice->batches[((struct iris_screen *)ice->ctx.screen)->devinfo.ver >= 12 ? IRIS_BATCH_BLITTER : IRIS_BATCH_COMPUTE]; \
+ batch <= &ice->batches[((struct iris_screen *)ice->ctx.screen)->devinfo->ver >= 12 ? IRIS_BATCH_BLITTER : IRIS_BATCH_COMPUTE]; \
++batch)
#endif
iris_init_binder(struct iris_context *ice)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
memset(&ice->state.binder, 0, sizeof(struct iris_binder));
enum isl_format view_format,
enum isl_format surf_format)
{
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
/* The WaSamplerCacheFlushBetweenRedescribedSurfaceReads workaround says:
*
{
struct iris_context *ice = (void *) ctx;
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
enum blorp_batch_flags blorp_flags = iris_blorp_flags_for_batch(batch);
bool is_dest)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
- struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
switch (res->aux.usage) {
case ISL_AUX_USAGE_HIZ:
int next_screen_id;
+ struct intel_device_info devinfo;
bool has_llc:1;
bool has_local_mem:1;
bool has_mmap_offset:1;
list_inithead(&bufmgr->zombie_list);
+ bufmgr->devinfo = *devinfo;
+ devinfo = &bufmgr->devinfo;
bufmgr->has_llc = devinfo->has_llc;
bufmgr->has_local_mem = devinfo->has_local_mem;
bufmgr->has_caching_uapi = devinfo->has_caching_uapi;
{
return bufmgr->sys.size;
}
+
+const struct intel_device_info *
+iris_bufmgr_get_device_info(struct iris_bufmgr *bufmgr)
+{
+ return &bufmgr->devinfo;
+}
uint64_t iris_bufmgr_vram_size(struct iris_bufmgr *bufmgr);
uint64_t iris_bufmgr_sram_size(struct iris_bufmgr *bufmgr);
+const struct intel_device_info *iris_bufmgr_get_device_info(struct iris_bufmgr *bufmgr);
#endif /* IRIS_BUFMGR_H */
const union isl_color_value color)
{
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
if (isl_format_has_int_channel(format)) {
perf_debug(&ice->dbg, "Integer fast clear not enabled for %s\n",
union isl_color_value color)
{
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
struct pipe_resource *p_res = (void *) res;
bool color_changed = res->aux.clear_color_unknown ||
struct iris_resource *res = (void *) p_res;
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
enum blorp_batch_flags blorp_flags = iris_blorp_flags_for_batch(batch);
if (render_condition_enabled) {
struct pipe_resource *p_res = (void *) res;
struct pipe_context *ctx = (void *) ice;
struct iris_screen *screen = (void *) ctx->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
if (INTEL_DEBUG(DEBUG_NO_FAST_CLEAR))
return false;
{
struct iris_context *ice = (void *) ctx;
struct iris_screen *screen = (void *) ctx->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
if (util_format_is_depth_or_stencil(p_res->format)) {
const struct util_format_unpack_description *unpack =
iris_create_context(struct pipe_screen *pscreen, void *priv, unsigned flags)
{
struct iris_screen *screen = (struct iris_screen*)pscreen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct iris_context *ice = rzalloc(NULL, struct iris_context);
if (!ice)
/* array length = print length + nul char + 1 extra to verify it's unused */
char renderer[11];
UNUSED int len =
- snprintf(renderer, sizeof(renderer), "iris_%04x", screen->devinfo.pci_device_id);
+ snprintf(renderer, sizeof(renderer), "iris_%04x", screen->devinfo->pci_device_id);
assert(len == sizeof(renderer) - 2);
const struct build_id_note *note =
const struct pipe_draw_info *info)
{
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
const struct brw_compiler *compiler = screen->compiler;
if (ice->state.prim_mode != info->mode) {
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_screen *screen = (struct iris_screen*)ice->ctx.screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
{
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_screen *screen = (struct iris_screen *) ctx->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct iris_batch *batch = &ice->batches[IRIS_BATCH_COMPUTE];
if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
unsigned usage)
{
struct iris_screen *screen = (struct iris_screen *) pscreen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
uint32_t max_samples = devinfo->ver == 8 ? 8 : 16;
if (sample_count > max_samples ||
* destroyed.
*/
struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
- intel_measure_gather(&screen->measure, &screen->devinfo);
+ intel_measure_gather(&screen->measure, screen->devinfo);
}
void
static int interval = 0;
if (++interval > 10) {
- intel_measure_gather(measure_device, &screen->devinfo);
+ intel_measure_gather(measure_device, screen->devinfo);
interval = 0;
}
}
/* increment frame counter */
intel_measure_frame_transition(p_atomic_inc_return(&measure_device->frame));
- intel_measure_gather(measure_device, &screen->devinfo);
+ intel_measure_gather(measure_device, screen->devinfo);
}
iris_perf_init_vtbl(perf_cfg);
- intel_perf_init_metrics(perf_cfg, &screen->devinfo, screen->fd,
+ intel_perf_init_metrics(perf_cfg, screen->devinfo, screen->fd,
true /* pipeline stats*/,
true /* register snapshots */);
ice,
ice,
screen->bufmgr,
- &screen->devinfo,
+ screen->devinfo,
ice->batches[IRIS_BATCH_RENDER].ctx_id,
screen->fd);
}
iris_perf_init_vtbl(perf_cfg);
- intel_perf_init_metrics(perf_cfg, &screen->devinfo, screen->fd,
+ intel_perf_init_metrics(perf_cfg, screen->devinfo, screen->fd,
true /* pipeline_statistics */,
true /* register snapshots */);
ice,
ice,
screen->bufmgr,
- &screen->devinfo,
+ screen->devinfo,
ice->batches[IRIS_BATCH_RENDER].ctx_id,
screen->fd);
struct iris_bo *bo,
enum iris_domain access)
{
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
const struct brw_compiler *compiler = batch->screen->compiler;
const bool access_via_l3 = iris_domain_is_l3_coherent(devinfo, access);
const struct iris_vs_prog_key *key)
{
return (struct brw_vs_prog_key) {
- BRW_KEY_INIT(screen->devinfo.ver, key->vue.base.program_string_id,
+ BRW_KEY_INIT(screen->devinfo->ver, key->vue.base.program_string_id,
key->vue.base.limit_trig_input_range),
/* Don't tell the backend about our clip plane constants, we've
const struct iris_tcs_prog_key *key)
{
return (struct brw_tcs_prog_key) {
- BRW_KEY_INIT(screen->devinfo.ver, key->vue.base.program_string_id,
+ BRW_KEY_INIT(screen->devinfo->ver, key->vue.base.program_string_id,
key->vue.base.limit_trig_input_range),
._tes_primitive_mode = key->_tes_primitive_mode,
.input_vertices = key->input_vertices,
const struct iris_tes_prog_key *key)
{
return (struct brw_tes_prog_key) {
- BRW_KEY_INIT(screen->devinfo.ver, key->vue.base.program_string_id,
+ BRW_KEY_INIT(screen->devinfo->ver, key->vue.base.program_string_id,
key->vue.base.limit_trig_input_range),
.patch_inputs_read = key->patch_inputs_read,
.inputs_read = key->inputs_read,
const struct iris_gs_prog_key *key)
{
return (struct brw_gs_prog_key) {
- BRW_KEY_INIT(screen->devinfo.ver, key->vue.base.program_string_id,
+ BRW_KEY_INIT(screen->devinfo->ver, key->vue.base.program_string_id,
key->vue.base.limit_trig_input_range),
};
}
const struct iris_fs_prog_key *key)
{
return (struct brw_wm_prog_key) {
- BRW_KEY_INIT(screen->devinfo.ver, key->base.program_string_id,
+ BRW_KEY_INIT(screen->devinfo->ver, key->base.program_string_id,
key->base.limit_trig_input_range),
.nr_color_regions = key->nr_color_regions,
.flat_shade = key->flat_shade,
const struct iris_cs_prog_key *key)
{
return (struct brw_cs_prog_key) {
- BRW_KEY_INIT(screen->devinfo.ver, key->base.program_string_id,
+ BRW_KEY_INIT(screen->devinfo->ver, key->base.program_string_id,
key->base.limit_trig_input_range),
};
}
struct iris_compiled_shader *shader)
{
const struct brw_compiler *compiler = screen->compiler;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
void *mem_ctx = ralloc_context(NULL);
struct brw_vs_prog_data *vs_prog_data =
rzalloc(mem_ctx, struct brw_vs_prog_data);
rzalloc(mem_ctx, struct brw_tcs_prog_data);
struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
enum brw_param_builtin *system_values = NULL;
unsigned num_system_values = 0;
unsigned num_cbufs = 0;
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
struct u_upload_mgr *uploader = ice->shaders.uploader_driver;
const struct brw_compiler *compiler = screen->compiler;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
const struct shader_info *tes_info =
iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
enum brw_param_builtin *system_values;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
unsigned num_system_values;
unsigned num_cbufs;
struct iris_compiled_shader *shader)
{
const struct brw_compiler *compiler = screen->compiler;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
void *mem_ctx = ralloc_context(NULL);
struct brw_gs_prog_data *gs_prog_data =
rzalloc(mem_ctx, struct brw_gs_prog_data);
rzalloc(mem_ctx, struct brw_wm_prog_data);
struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
enum brw_param_builtin *system_values;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
unsigned num_system_values;
unsigned num_cbufs;
rzalloc(mem_ctx, struct brw_cs_prog_data);
struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
enum brw_param_builtin *system_values;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
unsigned num_system_values;
unsigned num_cbufs;
{
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
struct iris_bufmgr *bufmgr = screen->bufmgr;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
unsigned encoded_size = ffs(per_thread_scratch) - 11;
assert(encoded_size < ARRAY_SIZE(ice->shaders.scratch_bos));
unsigned per_thread_scratch)
{
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
- ASSERTED const struct intel_device_info *devinfo = &screen->devinfo;
+ ASSERTED const struct intel_device_info *devinfo = screen->devinfo;
assert(devinfo->verx10 >= 125);
bool can_rearrange_varyings =
util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
key.fs = (struct iris_fs_prog_key) {
KEY_INIT(base),
{
struct iris_context *ice = (struct iris_context *)ctx;
struct iris_screen *screen = (struct iris_screen *) ctx->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
/* Enabling/disabling optional stages requires a URB reconfiguration. */
if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
{
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_screen *screen = (struct iris_screen *) ctx->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct iris_uncompiled_shader *old_ish =
ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
struct iris_uncompiled_shader *new_ish = state;
{
struct iris_screen *screen = (struct iris_screen *)_screen;
struct nir_shader *nir = (struct nir_shader *) nirptr;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
NIR_PASS_V(nir, iris_fix_edge_flags);
const void *key,
const void *assembly)
{
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
u_upload_alloc(uploader, 0, shader->prog_data->program_size, 64,
&shader->assembly.offset, &shader->assembly.res,
enum pipe_control_flags flags,
unsigned offset)
{
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
const unsigned optional_cs_stall =
GFX_VER == 9 && devinfo->gt == 4 ? PIPE_CONTROL_CS_STALL : 0;
struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
iris_check_query_no_flush(struct iris_context *ice, struct iris_query *q)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
if (!q->ready && READ_ONCE(q->map->snapshots_landed)) {
calculate_result_on_cpu(devinfo, q);
return iris_get_monitor_result(ctx, q->monitor, wait, result->batch);
struct iris_screen *screen = (void *) ctx->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
- if (unlikely(screen->devinfo.no_hw)) {
+ if (unlikely(screen->devinfo->no_hw)) {
result->u64 = 0;
return true;
}
struct iris_context *ice = (void *) ctx;
struct iris_query *q = (void *) query;
struct iris_batch *batch = &ice->batches[q->batch_idx];
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
struct iris_resource *res = (void *) p_res;
struct iris_bo *query_bo = iris_resource_bo(q->query_state_ref.res);
struct iris_bo *dst_bo = iris_resource_bo(p_res);
bool predicated = !(flags & PIPE_QUERY_WAIT) && !q->stalled;
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
iris_batch_sync_region_start(batch);
q->stalled = true;
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
struct mi_value result;
{
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
struct iris_screen *screen = (void *) ice->ctx.screen;
- struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct iris_uncompiled_shader *ish =
ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
const nir_shader *nir = ish->nir;
gl_shader_stage stage)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
- ASSERTED struct intel_device_info *devinfo = &screen->devinfo;
+ ASSERTED const struct intel_device_info *devinfo = screen->devinfo;
assert(devinfo->ver >= 12);
iris_postdraw_update_resolve_tracking(struct iris_context *ice)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
- struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
// XXX: front buffer drawing?
unsigned int num_layers, enum isl_aux_op op,
bool update_clear_depth)
{
- ASSERTED struct intel_device_info *devinfo = &batch->screen->devinfo;
+ ASSERTED const struct intel_device_info *devinfo = batch->screen->devinfo;
assert(iris_resource_level_has_hiz(devinfo, res, level));
assert(op != ISL_AUX_OP_NONE);
enum isl_aux_state aux_state)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
- ASSERTED struct intel_device_info *devinfo = &screen->devinfo;
+ ASSERTED const struct intel_device_info *devinfo = screen->devinfo;
num_layers = miptree_layer_range_length(res, level, start_layer, num_layers);
unsigned num_levels)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
- struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
switch (res->aux.usage) {
case ISL_AUX_USAGE_HIZ:
return ISL_AUX_USAGE_NONE;
const struct iris_screen *screen = (void *) ice->ctx.screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct iris_resource *res = (void *) pview->resource;
const unsigned level = res->base.b.target != PIPE_BUFFER ?
uint32_t start_layer, uint32_t num_layers)
{
const struct iris_screen *screen = (void *) ice->ctx.screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
enum isl_aux_usage aux_usage =
iris_resource_texture_aux_usage(ice, res, view_format,
bool draw_aux_disabled)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
- struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
if (draw_aux_disabled)
return ISL_AUX_USAGE_NONE;
}
static uint64_t
-select_best_modifier(struct intel_device_info *devinfo,
+select_best_modifier(const struct intel_device_info *devinfo,
const struct pipe_resource *templ,
const uint64_t *modifiers,
int count)
int *count)
{
struct iris_screen *screen = (void *) pscreen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
uint64_t all_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
bool *external_only)
{
struct iris_screen *screen = (void *) pscreen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
if (modifier_is_supported(devinfo, pfmt, 0, modifier)) {
if (external_only)
const struct pipe_image_view *img)
{
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
enum isl_format isl_fmt =
PIPE_RESOURCE_FLAG_MAP_PERSISTENT))
flags |= BO_ALLOC_SMEM;
- if (screen->devinfo.verx10 >= 125 && isl_aux_usage_has_ccs(aux_usage)) {
+ if (screen->devinfo->verx10 >= 125 && isl_aux_usage_has_ccs(aux_usage)) {
assert((flags & BO_ALLOC_SMEM) == 0);
flags |= BO_ALLOC_LMEM;
}
* sampler via render surface state objects.
*/
if (isl_surf_usage_is_depth(res->surf.usage) &&
- !iris_sample_with_depth_aux(&screen->devinfo, res))
+ !iris_sample_with_depth_aux(screen->devinfo, res))
return 0;
return screen->isl_dev.ss.clear_color_state_size;
const unsigned aux_offset = res->aux.extra_aux.surf.size_B > 0 ?
res->aux.extra_aux.offset : res->aux.offset;
const enum isl_format format =
- iris_format_for_usage(&screen->devinfo, pfmt, res->surf.usage).fmt;
+ iris_format_for_usage(screen->devinfo, pfmt, res->surf.usage).fmt;
const uint64_t format_bits =
intel_aux_map_format_bits(res->surf.tiling, format, plane);
intel_aux_map_add_mapping(aux_map_ctx, res->bo->address + res->offset,
} else if (templ->usage == PIPE_USAGE_STAGING ||
templ->bind & (PIPE_BIND_LINEAR | PIPE_BIND_CURSOR)) {
tiling_flags = ISL_TILING_LINEAR_BIT;
- } else if (!screen->devinfo.has_tiling_uapi &&
+ } else if (!screen->devinfo->has_tiling_uapi &&
(templ->bind & (PIPE_BIND_SCANOUT | PIPE_BIND_SHARED))) {
tiling_flags = ISL_TILING_LINEAR_BIT;
} else if (templ->bind & PIPE_BIND_SCANOUT) {
}
const enum isl_format format =
- iris_format_for_usage(&screen->devinfo, templ->format, usage).fmt;
+ iris_format_for_usage(screen->devinfo, templ->format, usage).fmt;
const struct isl_surf_init_info init_info = {
.dim = target_to_isl_surf_dim(templ->target),
iris_resource_configure_aux(struct iris_screen *screen,
struct iris_resource *res, bool imported)
{
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
const bool has_mcs =
isl_surf_get_mcs_surf(&screen->isl_dev, &res->surf, &res->aux.surf);
int modifiers_count)
{
struct iris_screen *screen = (struct iris_screen *)pscreen;
- struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct iris_resource *res = iris_alloc_resource(pscreen, templ);
if (!res)
{
struct iris_screen *screen = (struct iris_screen *)pscreen;
- intel_uuid_compute_device_id((uint8_t *)uuid, &screen->devinfo, PIPE_UUID_SIZE);
+ intel_uuid_compute_device_id((uint8_t *)uuid, screen->devinfo, PIPE_UUID_SIZE);
}
static void
iris_get_driver_uuid(struct pipe_screen *pscreen, char *uuid)
{
struct iris_screen *screen = (struct iris_screen *)pscreen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
intel_uuid_compute_driver_id((uint8_t *)uuid, devinfo, PIPE_UUID_SIZE);
}
iris_get_name(struct pipe_screen *pscreen)
{
struct iris_screen *screen = (struct iris_screen *)pscreen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
static char buf[128];
snprintf(buf, sizeof(buf), "Mesa %s", devinfo->name);
iris_get_cl_cts_version(struct pipe_screen *pscreen)
{
struct iris_screen *screen = (struct iris_screen *)pscreen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
/* https://www.khronos.org/conformance/adopters/conformant-products/opencl#submission_405 */
if (devinfo->verx10 == 120)
* We should probably never end up here. This is just a fallback to get
* some kind of value in case os_get_available_system_memory fails.
*/
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
/* Once a batch uses more than 75% of the maximum mappable size, we
* assume that there's some fragmentation, and we start doing extra
* flushing, etc. That's the big cliff apps will care about.
iris_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
{
struct iris_screen *screen = (struct iris_screen *)pscreen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
switch (param) {
case PIPE_CAP_NPOT_TEXTURES:
case PIPE_CAP_VENDOR_ID:
return 0x8086;
case PIPE_CAP_DEVICE_ID:
- return screen->devinfo.pci_device_id;
+ return screen->devinfo->pci_device_id;
case PIPE_CAP_VIDEO_MEMORY:
return iris_get_video_memory(screen);
case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
void *ret)
{
struct iris_screen *screen = (struct iris_screen *)pscreen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
const uint32_t max_invocations =
MIN2(1024, 32 * devinfo->max_cs_workgroup_threads);
&result))
return 0;
- result = intel_device_info_timebase_scale(&screen->devinfo, result);
+ result = intel_device_info_timebase_scale(screen->devinfo, result);
result &= (1ull << TIMESTAMP_BITS) - 1;
return result;
if (!screen)
return NULL;
- if (!intel_get_device_info_from_fd(fd, &screen->devinfo))
+ struct intel_device_info devinfo;
+ if (!intel_get_device_info_from_fd(fd, &devinfo))
return NULL;
p_atomic_set(&screen->refcount, 1);
- if (screen->devinfo.ver < 8 || screen->devinfo.platform == INTEL_PLATFORM_CHV)
+ if (devinfo.ver < 8 || devinfo.platform == INTEL_PLATFORM_CHV)
return NULL;
/* Here are the i915 features we need for Iris (in chronological order) :
*
* Checking the last feature availability will include all previous ones.
*/
- if (!screen->devinfo.has_context_isolation) {
+ if (!devinfo.has_context_isolation) {
debug_error("Kernel is too old (4.16+ required) or unusable for Iris.\n"
"Check your dmesg logs for loading failures.\n");
return NULL;
brw_process_intel_debug_variable();
- screen->bufmgr = iris_bufmgr_get_for_fd(&screen->devinfo, fd, bo_reuse);
+ screen->bufmgr = iris_bufmgr_get_for_fd(&devinfo, fd, bo_reuse);
if (!screen->bufmgr)
return NULL;
+ screen->devinfo = iris_bufmgr_get_device_info(screen->bufmgr);
screen->fd = iris_bufmgr_get_fd(screen->bufmgr);
screen->winsys_fd = os_dupfd_cloexec(fd);
screen->precompile = debug_get_bool_option("shader_precompile", true);
- isl_device_init(&screen->isl_dev, &screen->devinfo);
+ isl_device_init(&screen->isl_dev, screen->devinfo);
- screen->compiler = brw_compiler_create(screen, &screen->devinfo);
+ screen->compiler = brw_compiler_create(screen, screen->devinfo);
screen->compiler->shader_debug_log = iris_shader_debug_log;
screen->compiler->shader_perf_log = iris_shader_perf_log;
screen->compiler->supports_shader_constants = true;
- screen->compiler->indirect_ubos_use_sampler = screen->devinfo.ver < 12;
+ screen->compiler->indirect_ubos_use_sampler = screen->devinfo->ver < 12;
- screen->l3_config_3d = iris_get_default_l3_config(&screen->devinfo, false);
- screen->l3_config_cs = iris_get_default_l3_config(&screen->devinfo, true);
+ screen->l3_config_3d = iris_get_default_l3_config(screen->devinfo, false);
+ screen->l3_config_cs = iris_get_default_l3_config(screen->devinfo, true);
iris_disk_cache_init(screen);
pscreen->get_driver_query_info = iris_get_monitor_info;
iris_init_screen_program_functions(pscreen);
- genX_call(&screen->devinfo, init_screen_state, screen);
+ genX_call(screen->devinfo, init_screen_state, screen);
glsl_type_singleton_init_or_ref();
*/
uint64_t last_seqno;
- struct intel_device_info devinfo;
+ const struct intel_device_info *devinfo;
struct isl_device isl_dev;
struct iris_bufmgr *bufmgr;
struct brw_compiler *compiler;
uint32_t src)
{
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
mi_store(&b, mi_reg32(dst), mi_reg32(src));
}
uint32_t src)
{
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
mi_store(&b, mi_reg64(dst), mi_reg64(src));
}
uint32_t val)
{
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
mi_store(&b, mi_reg32(reg), mi_imm(val));
}
uint64_t val)
{
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
mi_store(&b, mi_reg64(reg), mi_imm(val));
}
{
iris_batch_sync_region_start(batch);
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
struct mi_value src = mi_mem32(ro_bo(bo, offset));
mi_store(&b, mi_reg32(reg), src);
iris_batch_sync_region_end(batch);
{
iris_batch_sync_region_start(batch);
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
struct mi_value src = mi_mem64(ro_bo(bo, offset));
mi_store(&b, mi_reg64(reg), src);
iris_batch_sync_region_end(batch);
{
iris_batch_sync_region_start(batch);
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
struct mi_value dst = mi_mem32(rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE));
struct mi_value src = mi_reg32(reg);
if (predicated)
{
iris_batch_sync_region_start(batch);
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
struct mi_value dst = mi_mem64(rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE));
struct mi_value src = mi_reg64(reg);
if (predicated)
{
iris_batch_sync_region_start(batch);
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
struct mi_value dst = mi_mem32(rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE));
struct mi_value src = mi_imm(imm);
mi_store(&b, dst, src);
{
iris_batch_sync_region_start(batch);
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
struct mi_value dst = mi_mem64(rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE));
struct mi_value src = mi_imm(imm);
mi_store(&b, dst, src);
static void
upload_pixel_hashing_tables(struct iris_batch *batch)
{
- UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ UNUSED const struct intel_device_info *devinfo = batch->screen->devinfo;
UNUSED struct iris_context *ice = batch->ice;
assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
static void
iris_alloc_push_constants(struct iris_batch *batch)
{
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
/* For now, we set a static partitioning of the push constant area,
* assuming that all stages could be in use.
static void
iris_init_render_context(struct iris_batch *batch)
{
- UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ UNUSED const struct intel_device_info *devinfo = batch->screen->devinfo;
iris_batch_sync_region_start(batch);
static void
iris_init_compute_context(struct iris_batch *batch)
{
- UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ UNUSED const struct intel_device_info *devinfo = batch->screen->devinfo;
iris_batch_sync_region_start(batch);
want_pma_fix(struct iris_context *ice)
{
UNUSED struct iris_screen *screen = (void *) ice->ctx.screen;
- UNUSED const struct intel_device_info *devinfo = &screen->devinfo;
+ UNUSED const struct intel_device_info *devinfo = screen->devinfo;
const struct brw_wm_prog_data *wm_prog_data = (void *)
ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
const struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
const struct pipe_sampler_state *state)
{
UNUSED struct iris_screen *screen = (void *)ctx->screen;
- UNUSED const struct intel_device_info *devinfo = &screen->devinfo;
+ UNUSED const struct intel_device_info *devinfo = screen->devinfo;
struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
if (!cso)
const struct pipe_sampler_view *tmpl)
{
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct iris_sampler_view *isv = calloc(1, sizeof(struct iris_sampler_view));
if (!isv)
const struct pipe_surface *tmpl)
{
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
isl_surf_usage_flags_t usage = 0;
if (tmpl->writable)
{
struct iris_context *ice = (struct iris_context *) ctx;
UNUSED struct iris_screen *screen = (void *) ctx->screen;
- UNUSED const struct intel_device_info *devinfo = &screen->devinfo;
+ UNUSED const struct intel_device_info *devinfo = screen->devinfo;
gl_shader_stage stage = stage_from_pipe(p_stage);
struct iris_shader_state *shs = &ice->state.shaders[stage];
unsigned i;
{
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct isl_device *isl_dev = &screen->isl_dev;
struct pipe_framebuffer_state *cso = &ice->state.framebuffer;
struct iris_resource *zres;
const struct pipe_vertex_element *state)
{
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct iris_vertex_element_state *cso =
malloc(sizeof(struct iris_vertex_element_state));
assert(ice->shaders.urb.size[i] != 0);
}
- intel_get_urb_config(&screen->devinfo,
+ intel_get_urb_config(screen->devinfo,
screen->l3_config_3d,
ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL,
uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0};
_iris_pack_command(batch, GENX(3DSTATE_PS), ps_state, ps) {
- intel_set_ps_dispatch_state(&ps, &batch->screen->devinfo,
+ intel_set_ps_dispatch_state(&ps, batch->screen->devinfo,
wm_prog_data, cso_fb->samples);
ps.DispatchGRFStartRegisterForConstantSetupData0 =
* 3dstate_so_buffer_index_0/1/2/3 states to ensure so_buffer_index_* state is
* not combined with other state changes.
*/
- if (intel_device_info_is_dg2(&batch->screen->devinfo)) {
+ if (intel_device_info_is_dg2(batch->screen->devinfo)) {
iris_emit_pipe_control_flush(batch,
"SO pre change stall WA",
PIPE_CONTROL_CS_STALL);
}
/* Wa_16011411144 */
- if (intel_device_info_is_dg2(&batch->screen->devinfo)) {
+ if (intel_device_info_is_dg2(batch->screen->devinfo)) {
iris_emit_pipe_control_flush(batch,
"SO post change stall WA",
PIPE_CONTROL_CS_STALL);
* 2. Send SO_DECL NP state
* 3. Send 3D State SOL with SOL Enabled
*/
- if (intel_device_info_is_dg2(&batch->screen->devinfo))
+ if (intel_device_info_is_dg2(batch->screen->devinfo))
iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
uint32_t *decl_list =
RR_FREE;
vfg.DistributionGranularity = BatchLevelGranularity;
/* Wa_14014890652 */
- if (intel_device_info_is_dg2(&batch->screen->devinfo))
+ if (intel_device_info_is_dg2(batch->screen->devinfo))
vfg.GranularityThresholdDisable = 1;
vfg.ListCutIndexEnable = draw->primitive_restart;
/* 192 vertices for TRILIST_ADJ */
#define _3DPRIM_BASE_VERTEX 0x2440
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
if (indirect && !indirect->count_from_stream_output) {
if (indirect->indirect_draw_count) {
struct iris_state_ref *grid_size = &ice->state.grid_size;
struct iris_bo *bo = iris_resource_bo(grid_size->res);
struct mi_builder b;
- mi_builder_init(&b, &batch->screen->devinfo, batch);
+ mi_builder_init(&b, batch->screen->devinfo, batch);
struct mi_value size_x = mi_mem32(ro_bo(bo, grid_size->offset + 0));
struct mi_value size_y = mi_mem32(ro_bo(bo, grid_size->offset + 4));
struct mi_value size_z = mi_mem32(ro_bo(bo, grid_size->offset + 8));
{
const uint64_t stage_dirty = ice->state.stage_dirty;
struct iris_screen *screen = batch->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct iris_binder *binder = &ice->state.binder;
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
struct iris_compiled_shader *shader =
{
const uint64_t stage_dirty = ice->state.stage_dirty;
struct iris_screen *screen = batch->screen;
- const struct intel_device_info *devinfo = &screen->devinfo;
+ const struct intel_device_info *devinfo = screen->devinfo;
struct iris_binder *binder = &ice->state.binder;
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
struct iris_uncompiled_shader *ish =
static void
batch_mark_sync_for_pipe_control(struct iris_batch *batch, uint32_t flags)
{
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
iris_batch_sync_boundary(batch);
uint32_t offset,
uint64_t imm)
{
- UNUSED const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ UNUSED const struct intel_device_info *devinfo = batch->screen->devinfo;
enum pipe_control_flags post_sync_flags = get_post_sync_flags(flags);
enum pipe_control_flags non_lri_post_sync_flags =
post_sync_flags & ~PIPE_CONTROL_LRI_POST_SYNC_OP;
unsigned width, unsigned height, unsigned scale)
{
#if GFX_VER == 9
- const struct intel_device_info *devinfo = &batch->screen->devinfo;
+ const struct intel_device_info *devinfo = batch->screen->devinfo;
const unsigned slice_hashing[] = {
/* Because all Gfx9 platforms with more than one slice require
* three-way subslice hashing, a single "normal" 16x16 slice hashing
void
genX(init_screen_state)(struct iris_screen *screen)
{
- assert(screen->devinfo.verx10 == GFX_VERx10);
+ assert(screen->devinfo->verx10 == GFX_VERx10);
screen->vtbl.destroy_state = iris_destroy_state;
screen->vtbl.init_render_context = iris_init_render_context;
screen->vtbl.init_compute_context = iris_init_compute_context;
if (ts[idx] == U_TRACE_NO_TIMESTAMP)
return U_TRACE_NO_TIMESTAMP;
- return intel_device_info_timebase_scale(&screen->devinfo, ts[idx]);
+ return intel_device_info_timebase_scale(screen->devinfo, ts[idx]);
}
static void
/* We could be dealing with /dev/dri/card0 or /dev/dri/renderD128 so to get
* a GPU ID we % 128 the minor number.
*/
- intel_ds_device_init(&ice->ds, &screen->devinfo, screen->fd, minor % 128,
+ intel_ds_device_init(&ice->ds, screen->devinfo, screen->fd, minor % 128,
INTEL_DS_API_OPENGL);
u_trace_pipe_context_init(&ice->ds.trace_context, &ice->ctx,
iris_utrace_record_ts,