.num_slices = 3,
};
-const struct gen_device_info *
-gen_get_device_info(int devid)
+const bool
+gen_get_device_info(int devid, struct gen_device_info *devinfo)
{
- const struct gen_device_info *devinfo;
switch (devid) {
#undef CHIPSET
#define CHIPSET(id, family, name) \
- case id: devinfo = &gen_device_info_##family; break;
+ case id: *devinfo = gen_device_info_##family; break;
#include "pci_ids/i965_pci_ids.h"
default:
fprintf(stderr, "i965_dri.so does not support the 0x%x PCI ID.\n", devid);
- return NULL;
+ return false;
}
- return devinfo;
+ return true;
}
const char *
/** @} */
};
-const struct gen_device_info *gen_get_device_info(int devid);
+const bool gen_get_device_info(int devid, struct gen_device_info *devinfo);
const char *gen_get_device_name(int devid);
{
bool ok;
+ struct gen_device_info devinfo;
+ t_assert(gen_get_device_info(BDW_GT2_DEVID, &devinfo));
+
struct isl_device dev;
- isl_device_init(&dev, gen_get_device_info(BDW_GT2_DEVID),
- /*bit6_swizzle*/ false);
+ isl_device_init(&dev, &devinfo, /*bit6_swizzle*/ false);
struct isl_surf surf;
ok = isl_surf_init(&dev, &surf,
{
bool ok;
+ struct gen_device_info devinfo;
+ t_assert(gen_get_device_info(BDW_GT2_DEVID, &devinfo));
+
struct isl_device dev;
- isl_device_init(&dev, gen_get_device_info(BDW_GT2_DEVID),
- /*bit6_swizzle*/ false);
+ isl_device_init(&dev, &devinfo, /*bit6_swizzle*/ false);
struct isl_surf surf;
ok = isl_surf_init(&dev, &surf,
{
bool ok;
+ struct gen_device_info devinfo;
+ t_assert(gen_get_device_info(BDW_GT2_DEVID, &devinfo));
+
struct isl_device dev;
- isl_device_init(&dev, gen_get_device_info(BDW_GT2_DEVID),
- /*bit6_swizzle*/ false);
+ isl_device_init(&dev, &devinfo, /*bit6_swizzle*/ false);
struct isl_surf surf;
ok = isl_surf_init(&dev, &surf,
if (gd == NULL)
return NULL;
- gd->devinfo = *gen_get_device_info(pciid);
+ if (!gen_get_device_info(pciid, &gd->devinfo))
+ return NULL;
+
brw_init_compaction_tables(&gd->devinfo);
return gd;
}
device->name = gen_get_device_name(device->chipset_id);
- device->info = gen_get_device_info(device->chipset_id);
- if (!device->info) {
+ if (!gen_get_device_info(device->chipset_id, &device->info)) {
result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
goto fail;
}
- if (device->info->is_haswell) {
+ if (device->info.is_haswell) {
fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
- } else if (device->info->gen == 7 && !device->info->is_baytrail) {
+ } else if (device->info.gen == 7 && !device->info.is_baytrail) {
fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
- } else if (device->info->gen == 7 && device->info->is_baytrail) {
+ } else if (device->info.gen == 7 && device->info.is_baytrail) {
fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
- } else if (device->info->gen >= 8) {
+ } else if (device->info.gen >= 8) {
/* Broadwell, Cherryview, Skylake, Broxton, Kabylake is as fully
* supported as anything */
} else {
}
device->cmd_parser_version = -1;
- if (device->info->gen == 7) {
+ if (device->info.gen == 7) {
device->cmd_parser_version =
anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
if (device->cmd_parser_version == -1) {
goto fail;
}
- if (!device->info->has_llc &&
+ if (!device->info.has_llc &&
anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
"kernel missing wc mmap");
bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
- device->max_vs_threads = device->info->max_vs_threads;
- device->max_hs_threads = device->info->max_hs_threads;
- device->max_ds_threads = device->info->max_ds_threads;
- device->max_gs_threads = device->info->max_gs_threads;
- device->max_wm_threads = device->info->max_wm_threads;
+ device->max_vs_threads = device->info.max_vs_threads;
+ device->max_hs_threads = device->info.max_hs_threads;
+ device->max_ds_threads = device->info.max_ds_threads;
+ device->max_gs_threads = device->info.max_gs_threads;
+ device->max_wm_threads = device->info.max_wm_threads;
/* GENs prior to 8 do not support EU/Subslice info */
- if (device->info->gen >= 8) {
+ if (device->info.gen >= 8) {
device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);
fprintf(stderr, "WARNING: Kernel 4.1 required to properly"
" query GPU properties.\n");
}
- } else if (device->info->gen == 7) {
- device->subslice_total = 1 << (device->info->gt - 1);
+ } else if (device->info.gen == 7) {
+ device->subslice_total = 1 << (device->info.gt - 1);
}
- if (device->info->is_cherryview &&
+ if (device->info.is_cherryview &&
device->subslice_total > 0 && device->eu_total > 0) {
/* Logical CS threads = EUs per subslice * 7 threads per EU */
device->max_cs_threads = device->eu_total / device->subslice_total * 7;
/* Fuse configurations may give more threads than expected, never less. */
- if (device->max_cs_threads < device->info->max_cs_threads)
- device->max_cs_threads = device->info->max_cs_threads;
+ if (device->max_cs_threads < device->info.max_cs_threads)
+ device->max_cs_threads = device->info.max_cs_threads;
} else {
- device->max_cs_threads = device->info->max_cs_threads;
+ device->max_cs_threads = device->info.max_cs_threads;
}
close(fd);
brw_process_intel_debug_variable();
- device->compiler = brw_compiler_create(NULL, device->info);
+ device->compiler = brw_compiler_create(NULL, &device->info);
if (device->compiler == NULL) {
result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail;
goto fail;
/* XXX: Actually detect bit6 swizzling */
- isl_device_init(&device->isl_dev, device->info, swizzled);
+ isl_device_init(&device->isl_dev, &device->info, swizzled);
return VK_SUCCESS;
};
static void *
-default_alloc_func(void *pUserData, size_t size, size_t align,
+default_alloc_func(void *pUserData, size_t size, size_t align,
VkSystemAllocationScope allocationScope)
{
return malloc(size);
.alphaToOne = true,
.multiViewport = true,
.samplerAnisotropy = false, /* FINISHME */
- .textureCompressionETC2 = pdevice->info->gen >= 8 ||
- pdevice->info->is_baytrail,
- .textureCompressionASTC_LDR = pdevice->info->gen >= 9, /* FINISHME CHV */
+ .textureCompressionETC2 = pdevice->info.gen >= 8 ||
+ pdevice->info.is_baytrail,
+ .textureCompressionASTC_LDR = pdevice->info.gen >= 9, /* FINISHME CHV */
.textureCompressionBC = true,
.occlusionQueryPrecise = true,
.pipelineStatisticsQuery = false,
VkPhysicalDeviceProperties* pProperties)
{
ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
- const struct gen_device_info *devinfo = pdevice->info;
+ const struct gen_device_info *devinfo = &pdevice->info;
const float time_stamp_base = devinfo->gen >= 9 ? 83.333 : 80.0;
*/
heap_size = 3 * physical_device->aperture_size / 4;
- if (physical_device->info->has_llc) {
+ if (physical_device->info.has_llc) {
/* Big core GPUs share LLC with the CPU and thus one memory type can be
* both cached and coherent at the same time.
*/
return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
}
- anv_set_dispatch_devinfo(physical_device->info);
+ anv_set_dispatch_devinfo(&physical_device->info);
device = anv_alloc2(&physical_device->instance->alloc, pAllocator,
sizeof(*device), 8,
goto fail_fd;
}
- device->info = *physical_device->info;
+ device->info = physical_device->info;
device->isl_dev = physical_device->isl_dev;
/* On Broadwell and later, we can use batch chaining to more efficiently
VkFormat format,
VkFormatProperties *out_properties)
{
- int gen = physical_device->info->gen * 10;
- if (physical_device->info->is_haswell)
+ int gen = physical_device->info.gen * 10;
+ if (physical_device->info.is_haswell)
gen += 5;
VkFormatFeatureFlags linear = 0, tiled = 0, buffer = 0;
/* Nothing to do here */
} else if (vk_format_is_depth_or_stencil(format)) {
tiled |= VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
- if (physical_device->info->gen >= 8)
+ if (physical_device->info.gen >= 8)
tiled |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
tiled |= VK_FORMAT_FEATURE_BLIT_SRC_BIT |
VK_FORMAT_FEATURE_BLIT_DST_BIT;
} else {
struct anv_format linear_fmt, tiled_fmt;
- linear_fmt = anv_get_format(physical_device->info, format,
+ linear_fmt = anv_get_format(&physical_device->info, format,
VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_TILING_LINEAR);
- tiled_fmt = anv_get_format(physical_device->info, format,
+ tiled_fmt = anv_get_format(&physical_device->info, format,
VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_TILING_OPTIMAL);
- linear = get_image_format_properties(physical_device->info,
+ linear = get_image_format_properties(&physical_device->info,
linear_fmt.isl_format, linear_fmt);
- tiled = get_image_format_properties(physical_device->info,
+ tiled = get_image_format_properties(&physical_device->info,
linear_fmt.isl_format, tiled_fmt);
- buffer = get_buffer_format_properties(physical_device->info,
+ buffer = get_buffer_format_properties(&physical_device->info,
linear_fmt.isl_format);
/* XXX: We handle 3-channel formats by switching them out for RGBX or
uint32_t chipset_id;
char path[20];
const char * name;
- const struct gen_device_info * info;
+ struct gen_device_info info;
uint64_t aperture_size;
struct brw_compiler * compiler;
struct isl_device isl_dev;
/* Begin the compilation:
*/
- brw_init_codegen(brw->screen->devinfo, &c.func, mem_ctx);
+ brw_init_codegen(&brw->screen->devinfo, &c.func, mem_ctx);
c.func.single_program_flow = 1;
if (unlikely(INTEL_DEBUG & DEBUG_CLIP)) {
fprintf(stderr, "clip:\n");
- brw_disassemble(brw->screen->devinfo, c.func.store,
+ brw_disassemble(&brw->screen->devinfo, c.func.store,
0, program_size, stderr);
fprintf(stderr, "\n");
}
const uint32_t brw_format = brw_format_for_mesa_format(intel_tex->_Format);
- if (isl_format_supports_lossless_compression(brw->screen->devinfo,
+ if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
brw_format))
return false;
{
struct gl_context *ctx = &brw->ctx;
const struct intel_screen *screen = brw->screen;
- const struct gen_device_info *devinfo = screen->devinfo;
+ const struct gen_device_info *devinfo = &screen->devinfo;
/* FINISHME: Do this for all platforms that the kernel supports */
if (brw->is_cherryview &&
{
struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
struct intel_screen *screen = driContextPriv->driScreenPriv->driverPrivate;
- const struct gen_device_info *devinfo = screen->devinfo;
+ const struct gen_device_info *devinfo = &screen->devinfo;
struct dd_function_table functions;
/* Only allow the __DRI_CTX_FLAG_ROBUST_BUFFER_ACCESS flag if the kernel
prog_data.base.total_shared = prog->Comp.SharedSize;
}
- assign_cs_binding_table_offsets(brw->screen->devinfo, prog,
+ assign_cs_binding_table_offsets(&brw->screen->devinfo, prog,
&cp->program.Base, &prog_data);
/* Allocate the references to the uniforms that will end up in the
/* Begin the compilation:
*/
- brw_init_codegen(brw->screen->devinfo, &c.func, mem_ctx);
+ brw_init_codegen(&brw->screen->devinfo, &c.func, mem_ctx);
c.func.single_program_flow = 1;
if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
fprintf(stderr, "gs:\n");
- brw_disassemble(brw->screen->devinfo, c.func.store,
+ brw_disassemble(&brw->screen->devinfo, c.func.store,
0, program_size, stderr);
fprintf(stderr, "\n");
}
memset(&prog_data, 0, sizeof(prog_data));
- assign_gs_binding_table_offsets(brw->screen->devinfo, prog,
+ assign_gs_binding_table_offsets(&brw->screen->devinfo, prog,
&gp->program.Base, &prog_data);
/* Allocate the references to the uniforms that will end up in the
((1 << gp->program.Base.CullDistanceArraySize) - 1) <<
gp->program.Base.ClipDistanceArraySize;
- brw_compute_vue_map(brw->screen->devinfo,
+ brw_compute_vue_map(&brw->screen->devinfo,
&prog_data.base.vue_map, outputs_written,
prog->SeparateShader);
mem_ctx = ralloc_context(NULL);
/* Begin the compilation:
*/
- brw_init_codegen(brw->screen->devinfo, &c.func, mem_ctx);
+ brw_init_codegen(&brw->screen->devinfo, &c.func, mem_ctx);
c.key = *key;
c.vue_map = brw->vue_map_geom_out;
if (unlikely(INTEL_DEBUG & DEBUG_SF)) {
fprintf(stderr, "sf:\n");
- brw_disassemble(brw->screen->devinfo,
+ brw_disassemble(&brw->screen->devinfo,
c.func.store, 0, program_size, stderr);
fprintf(stderr, "\n");
}
}
fprintf(stderr, "%s:\n", name);
- brw_disassemble(brw->screen->devinfo, brw->cache.bo->virtual,
+ brw_disassemble(&brw->screen->devinfo, brw->cache.bo->virtual,
item->offset, item->size, stderr);
}
}
void
brw_init_surface_formats(struct brw_context *brw)
{
- const struct gen_device_info *devinfo = brw->screen->devinfo;
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
struct gl_context *ctx = &brw->ctx;
int gen;
mesa_format format;
struct brw_tes_prog_key *key)
{
const struct brw_compiler *compiler = brw->screen->compiler;
- const struct gen_device_info *devinfo = brw->screen->devinfo;
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
struct brw_stage_state *stage_state = &brw->tes.base;
nir_shader *nir = tep->program.Base.nir;
struct brw_tes_prog_data prog_data;
mem_ctx = ralloc_context(NULL);
brw_assign_common_binding_table_offsets(MESA_SHADER_VERTEX,
- brw->screen->devinfo,
+ &brw->screen->devinfo,
prog, &vp->program.Base,
&prog_data.base.base, 0);
((1 << vp->program.Base.CullDistanceArraySize) - 1) <<
vp->program.Base.ClipDistanceArraySize;
- brw_compute_vue_map(brw->screen->devinfo,
+ brw_compute_vue_map(&brw->screen->devinfo,
&prog_data.base.vue_map, outputs_written,
prog ? prog->SeparateShader ||
prog->_LinkedShaders[MESA_SHADER_TESS_EVAL]
if (!prog)
prog_data.base.use_alt_mode = true;
- assign_fs_binding_table_offsets(brw->screen->devinfo, prog,
+ assign_fs_binding_table_offsets(&brw->screen->devinfo, prog,
&fp->program.Base, key, &prog_data);
/* Allocate the references to the uniforms that will end up in the
surf.dim = get_isl_surf_dim(target);
const enum isl_dim_layout dim_layout =
- get_isl_dim_layout(brw->screen->devinfo, mt->tiling, target);
+ get_isl_dim_layout(&brw->screen->devinfo, mt->tiling, target);
if (surf.dim_layout != dim_layout) {
/* The layout of the specified texture target is not compatible with the
if (!intel_miptree_is_lossless_compressed(brw, mt))
return true;
- if (isl_format_supports_lossless_compression(brw->screen->devinfo,
+ if (isl_format_supports_lossless_compression(&brw->screen->devinfo,
format))
return true;
const uint32_t surf_index = render_target_start + i;
const int flags = (_mesa_geometric_layers(fb) > 0 ?
INTEL_RENDERBUFFER_LAYERED : 0) |
- (brw->draw_aux_buffer_disabled[i] ?
+ (brw->draw_aux_buffer_disabled[i] ?
INTEL_AUX_BUFFER_DISABLED : 0);
if (intel_renderbuffer(fb->_ColorDrawBuffers[i])) {
if (irb) {
const unsigned format = brw->render_target_format[
_mesa_get_render_format(ctx, intel_rb_format(irb))];
- assert(isl_format_supports_sampling(brw->screen->devinfo,
+ assert(isl_format_supports_sampling(&brw->screen->devinfo,
format));
/* Override the target of the texture if the render buffer is a
static uint32_t
get_image_format(struct brw_context *brw, mesa_format format, GLenum access)
{
- const struct gen_device_info *devinfo = brw->screen->devinfo;
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
uint32_t hw_format = brw_format_for_mesa_format(format);
if (access == GL_WRITE_ONLY) {
return hw_format;
struct brw_stage_state *stage_state = &brw->cs.base;
struct brw_cs_prog_data *cs_prog_data = brw->cs.prog_data;
struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
- const struct gen_device_info *devinfo = brw->screen->devinfo;
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
brw_emit_buffer_surface_state(
needs_slm |= prog_data && prog_data->total_shared;
}
- return gen_get_default_l3_weights(brw->screen->devinfo,
+ return gen_get_default_l3_weights(&brw->screen->devinfo,
needs_dc, needs_slm);
}
static void
update_urb_size(struct brw_context *brw, const struct gen_l3_config *cfg)
{
- const struct gen_device_info *devinfo = brw->screen->devinfo;
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
const unsigned sz = gen_get_l3_config_urb_size(devinfo, cfg);
if (brw->urb.size != sz) {
if (dw > dw_threshold && brw->can_do_pipelined_register_writes) {
const struct gen_l3_config *const cfg =
- gen_get_l3_config(brw->screen->devinfo, w);
+ gen_get_l3_config(&brw->screen->devinfo, w);
setup_l3_config(brw, cfg);
update_urb_size(brw, cfg);
void
gen7_restore_default_l3_config(struct brw_context *brw)
{
- const struct gen_device_info *devinfo = brw->screen->devinfo;
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
const struct gen_l3_config *const cfg = gen_get_default_l3_config(devinfo);
if (cfg != brw->l3.config && brw->can_do_pipelined_register_writes) {
gen7_upload_urb(struct brw_context *brw, unsigned vs_size,
bool gs_present, bool tess_present)
{
- const struct gen_device_info *devinfo = brw->screen->devinfo;
+ const struct gen_device_info *devinfo = &brw->screen->devinfo;
const int push_size_kB =
(brw->gen >= 8 || (brw->is_haswell && brw->gt == 3)) ? 32 : 16;
if (brw->gen >= 9) {
mesa_format linear_format = _mesa_get_srgb_format_linear(mt->format);
const uint32_t brw_format = brw_format_for_mesa_format(linear_format);
- return isl_format_supports_lossless_compression(brw->screen->devinfo,
+ return isl_format_supports_lossless_compression(&brw->screen->devinfo,
brw_format);
} else
return true;
struct isl_surf *surf)
{
surf->dim = get_isl_surf_dim(mt->target);
- surf->dim_layout = get_isl_dim_layout(brw->screen->devinfo,
+ surf->dim_layout = get_isl_dim_layout(&brw->screen->devinfo,
mt->tiling, mt->target);
if (mt->num_samples > 1) {
if (mesaVis->depthBits == 24) {
assert(mesaVis->stencilBits == 8);
- if (screen->devinfo->has_hiz_and_separate_stencil) {
+ if (screen->devinfo.has_hiz_and_separate_stencil) {
rb = intel_create_private_renderbuffer(MESA_FORMAT_Z24_UNORM_X8_UINT,
num_samples);
_mesa_add_renderbuffer(fb, BUFFER_DEPTH, &rb->Base.Base);
static void
intel_detect_sseu(struct intel_screen *screen)
{
- assert(screen->devinfo->gen >= 8);
+ assert(screen->devinfo.gen >= 8);
int ret;
screen->subslice_total = -1;
static const int gen6_modes[] = {4, 0, -1};
static const int gen4_modes[] = {0, -1};
- if (screen->devinfo->gen >= 9) {
+ if (screen->devinfo.gen >= 9) {
return gen9_modes;
- } else if (screen->devinfo->gen >= 8) {
+ } else if (screen->devinfo.gen >= 8) {
return gen8_modes;
- } else if (screen->devinfo->gen >= 7) {
+ } else if (screen->devinfo.gen >= 7) {
return gen7_modes;
- } else if (screen->devinfo->gen == 6) {
+ } else if (screen->devinfo.gen == 6) {
return gen6_modes;
} else {
return gen4_modes;
static const uint8_t multisample_samples[2] = {4, 8};
struct intel_screen *screen = dri_screen->driverPrivate;
- const struct gen_device_info *devinfo = screen->devinfo;
+ const struct gen_device_info *devinfo = &screen->devinfo;
uint8_t depth_bits[4], stencil_bits[4];
__DRIconfig **configs = NULL;
set_max_gl_versions(struct intel_screen *screen)
{
__DRIscreen *dri_screen = screen->driScrnPriv;
- const bool has_astc = screen->devinfo->gen >= 9;
+ const bool has_astc = screen->devinfo.gen >= 9;
- switch (screen->devinfo->gen) {
+ switch (screen->devinfo.gen) {
case 9:
case 8:
dri_screen->max_gl_core_version = 44;
dri_screen->max_gl_core_version = 33;
dri_screen->max_gl_compat_version = 30;
dri_screen->max_gl_es1_version = 11;
- dri_screen->max_gl_es2_version = screen->devinfo->is_haswell ? 31 : 30;
+ dri_screen->max_gl_es2_version = screen->devinfo.is_haswell ? 31 : 30;
break;
case 6:
dri_screen->max_gl_core_version = 33;
return false;
screen->deviceID = drm_intel_bufmgr_gem_get_devid(screen->bufmgr);
- screen->devinfo = gen_get_device_info(screen->deviceID);
- if (!screen->devinfo)
+ if (!gen_get_device_info(screen->deviceID, &screen->devinfo))
return false;
brw_process_intel_debug_variable();
if (INTEL_DEBUG & DEBUG_BUFMGR)
dri_bufmgr_set_debug(screen->bufmgr, true);
- if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && screen->devinfo->gen < 7) {
+ if ((INTEL_DEBUG & DEBUG_SHADER_TIME) && screen->devinfo.gen < 7) {
fprintf(stderr,
"shader_time debugging requires gen7 (Ivybridge) or better.\n");
INTEL_DEBUG &= ~DEBUG_SHADER_TIME;
screen->hw_has_timestamp = intel_detect_timestamp(screen);
/* GENs prior to 8 do not support EU/Subslice info */
- if (screen->devinfo->gen >= 8) {
+ if (screen->devinfo.gen >= 8) {
intel_detect_sseu(screen);
- } else if (screen->devinfo->gen == 7) {
- screen->subslice_total = 1 << (screen->devinfo->gt - 1);
+ } else if (screen->devinfo.gen == 7) {
+ screen->subslice_total = 1 << (screen->devinfo.gt - 1);
}
const char *force_msaa = getenv("INTEL_FORCE_MSAA");
*
* Don't even try on pre-Gen6, since we don't attempt to use contexts there.
*/
- if (screen->devinfo->gen >= 6) {
+ if (screen->devinfo.gen >= 6) {
struct drm_i915_reset_stats stats;
memset(&stats, 0, sizeof(stats));
* MI_MATH GPR registers, and version 7 in order to use
* MI_LOAD_REGISTER_REG (which all users of MI_MATH use).
*/
- screen->has_mi_math_and_lrr = screen->devinfo->gen >= 8 ||
- (screen->devinfo->is_haswell &&
+ screen->has_mi_math_and_lrr = screen->devinfo.gen >= 8 ||
+ (screen->devinfo.is_haswell &&
screen->cmd_parser_version >= 7);
dri_screen->extensions = !screen->has_context_reset_notification
? screenExtensions : intelRobustScreenExtensions;
screen->compiler = brw_compiler_create(screen,
- screen->devinfo);
+ &screen->devinfo);
screen->compiler->shader_debug_log = shader_debug_log_mesa;
screen->compiler->shader_perf_log = shader_perf_log_mesa;
screen->program_id = 1;
- if (screen->devinfo->has_resource_streamer) {
+ if (screen->devinfo.has_resource_streamer) {
screen->has_resource_streamer =
intel_get_boolean(screen, I915_PARAM_HAS_RESOURCE_STREAMER);
}
struct intel_screen
{
int deviceID;
- const struct gen_device_info *devinfo;
+ struct gen_device_info devinfo;
__DRIscreen *driScrnPriv;