Simplify some macros
authorXiang, Haihao <haihao.xiang@intel.com>
Fri, 18 Apr 2014 16:12:39 +0000 (00:12 +0800)
committerXiang, Haihao <haihao.xiang@intel.com>
Mon, 26 May 2014 04:06:23 +0000 (12:06 +0800)
Now it can directly use the information in intel_device_info instead of
checking the pci id.

Signed-off-by: Xiang, Haihao <haihao.xiang@intel.com>
(cherry picked from commit f1b3f83953cd5f6e39900d98b4858a7cb825dee0)

Conflicts:
src/gen8_post_processing.c
src/i965_post_processing.c
src/intel_driver.h

17 files changed:
src/gen6_mfc_common.c
src/gen75_picture_process.c
src/gen75_vpp_gpe.c
src/gen75_vpp_vebox.c
src/gen7_mfd.c
src/gen8_post_processing.c
src/gen8_render.c
src/i965_avc_bsd.c
src/i965_avc_ildb.c
src/i965_drv_video.c
src/i965_media.c
src/i965_media_h264.c
src/i965_media_mpeg2.c
src/i965_post_processing.c
src/i965_render.c
src/intel_batchbuffer.c
src/intel_driver.h

index 7cf9cc6..33b9d55 100644 (file)
@@ -525,7 +525,7 @@ VAStatus intel_mfc_avc_prepare(VADriverContextP ctx,
     int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
     int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
 
-    if (IS_GEN6(i965->intel.device_id)) {
+    if (IS_GEN6(i965->intel.device_info)) {
        /* On the SNB it should be fixed to 128 for the DMV buffer */
        width_in_mbs = 128;
     }
index 3c4fc0b..7f09b4f 100644 (file)
@@ -86,9 +86,9 @@ gen75_vpp_vebox(VADriverContextP ctx,
      proc_ctx->vpp_vebox_ctx->surface_input_object = proc_ctx->surface_pipeline_input_object;
      proc_ctx->vpp_vebox_ctx->surface_output_object  = proc_ctx->surface_render_output_object;
 
-     if (IS_HASWELL(i965->intel.device_id))
+     if (IS_HASWELL(i965->intel.device_info))
          va_status = gen75_vebox_process_picture(ctx, proc_ctx->vpp_vebox_ctx);
-     else if (IS_GEN8(i965->intel.device_id))
+     else if (IS_GEN8(i965->intel.device_info))
          va_status = gen8_vebox_process_picture(ctx, proc_ctx->vpp_vebox_ctx);
  
      return va_status;
index 637d2bf..2e3b104 100644 (file)
@@ -617,9 +617,9 @@ vpp_gpe_process(VADriverContextP ctx,
                   struct vpp_gpe_context * vpp_gpe_ctx)
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
-    if (IS_HASWELL(i965->intel.device_id))
+    if (IS_HASWELL(i965->intel.device_info))
        return gen75_gpe_process(ctx, vpp_gpe_ctx);
-    else if (IS_GEN8(i965->intel.device_id))
+    else if (IS_GEN8(i965->intel.device_info))
        return gen8_gpe_process(ctx, vpp_gpe_ctx);
 
      return VA_STATUS_ERROR_UNIMPLEMENTED;
@@ -657,9 +657,9 @@ vpp_gpe_process_sharpening(VADriverContextP ctx,
      if(vpp_gpe_ctx->is_first_frame){
          vpp_gpe_ctx->sub_shader_sum = 3;
          struct i965_kernel * vpp_kernels;
-         if (IS_HASWELL(i965->intel.device_id))
+         if (IS_HASWELL(i965->intel.device_info))
              vpp_kernels = gen75_vpp_sharpening_kernels;
-         else if (IS_GEN8(i965->intel.device_id))
+         else if (IS_GEN8(i965->intel.device_info))
              vpp_kernels = gen8_vpp_sharpening_kernels;
 
          vpp_gpe_ctx->gpe_load_kernels(ctx,
@@ -882,8 +882,8 @@ vpp_gpe_context_init(VADriverContextP ctx)
     struct vpp_gpe_context  *vpp_gpe_ctx = calloc(1, sizeof(struct vpp_gpe_context));
     struct i965_gpe_context *gpe_ctx = &(vpp_gpe_ctx->gpe_ctx);
 
-    assert(IS_HASWELL(i965->intel.device_id) ||
-           IS_GEN8(i965->intel.device_id));
+    assert(IS_HASWELL(i965->intel.device_info) ||
+           IS_GEN8(i965->intel.device_info));
 
     vpp_gpe_ctx->surface_tmp = VA_INVALID_ID;
     vpp_gpe_ctx->surface_tmp_object = NULL;
@@ -896,7 +896,7 @@ vpp_gpe_context_init(VADriverContextP ctx)
     gpe_ctx->vfe_state.urb_entry_size = 59 - 1;
     gpe_ctx->vfe_state.curbe_allocation_size = CURBE_ALLOCATION_SIZE - 1;
  
-    if (IS_HASWELL(i965->intel.device_id)) {
+    if (IS_HASWELL(i965->intel.device_info)) {
         vpp_gpe_ctx->gpe_context_init     = i965_gpe_context_init;
         vpp_gpe_ctx->gpe_context_destroy  = i965_gpe_context_destroy;
         vpp_gpe_ctx->gpe_load_kernels     = i965_gpe_load_kernels;
@@ -907,7 +907,7 @@ vpp_gpe_context_init(VADriverContextP ctx)
         gpe_ctx->idrt.max_entries = MAX_INTERFACE_DESC_GEN6;
         gpe_ctx->idrt.entry_size = sizeof(struct gen6_interface_descriptor_data);
 
-    } else if (IS_GEN8(i965->intel.device_id)) {
+    } else if (IS_GEN8(i965->intel.device_info)) {
         vpp_gpe_ctx->gpe_context_init     = gen8_gpe_context_init;
         vpp_gpe_ctx->gpe_context_destroy  = gen8_gpe_context_destroy;
         vpp_gpe_ctx->gpe_load_kernels     = gen8_gpe_load_kernels;
index 788a75e..d63729e 100644 (file)
@@ -152,7 +152,7 @@ void hsw_veb_dndi_table(VADriverContextP ctx, struct intel_vebox_context *proc_c
     */
     p_table = (unsigned int *)proc_ctx->dndi_state_table.ptr;
 
-     if (IS_HASWELL(i965->intel.device_id))
+     if (IS_HASWELL(i965->intel.device_info))
          *p_table ++ = 0;               // reserved  . w0
 
     *p_table ++ = ( 140 << 24 |    // denoise STAD threshold . w1
@@ -224,7 +224,7 @@ void hsw_veb_dndi_table(VADriverContextP ctx, struct intel_vebox_context *proc_c
                     13 << 6   |  // chr temp diff th
                     7 );         // chr temp diff low
 
-    if (IS_GEN8(i965->intel.device_id))
+    if (IS_GEN8(i965->intel.device_info))
         *p_table ++ = 0;         // parameters for hot pixel, 
 }
 
index e91cfd3..f4ccb12 100755 (executable)
@@ -942,7 +942,7 @@ gen7_mfd_mpeg2_pic_state(VADriverContextP ctx,
     assert(decode_state->pic_param && decode_state->pic_param->buffer);
     pic_param = (VAPictureParameterBufferMPEG2 *)decode_state->pic_param->buffer;
 
-    if (IS_HASWELL(i965->intel.device_id)) {
+    if (IS_HASWELL(i965->intel.device_info)) {
         /* XXX: disable concealment for now */
         slice_concealment_disable_bit = 1;
     }
@@ -1086,7 +1086,7 @@ gen7_mfd_mpeg2_bsd_object(VADriverContextP ctx,
                   (slice_param->macroblock_offset & 0x7));
     OUT_BCS_BATCH(batch,
                   (slice_param->quantiser_scale_code << 24) |
-                  (IS_HASWELL(i965->intel.device_id) ? (vpos1 << 8 | hpos1) : 0));
+                  (IS_HASWELL(i965->intel.device_info) ? (vpos1 << 8 | hpos1) : 0));
     ADVANCE_BCS_BATCH(batch);
 }
 
index f076830..82bf3b9 100644 (file)
 #include "i965_render.h"
 #include "intel_media.h"
 
-#define HAS_PP(ctx) (IS_IRONLAKE((ctx)->intel.device_id) ||     \
-                     IS_GEN6((ctx)->intel.device_id) ||         \
-                     IS_GEN7((ctx)->intel.device_id) ||         \
-                     IS_GEN8((ctx)->intel.device_id))
+#define HAS_PP(ctx) (IS_IRONLAKE((ctx)->intel.device_info) ||     \
+                     IS_GEN6((ctx)->intel.device_info) ||         \
+                     IS_GEN7((ctx)->intel.device_info) ||         \
+                     IS_GEN8((ctx)->intel.device_info))
 
 
 #define SURFACE_STATE_PADDED_SIZE               SURFACE_STATE_PADDED_SIZE_GEN8
@@ -1240,7 +1240,7 @@ gen8_pp_curbe_load(VADriverContextP ctx,
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     int param_size = 64;
 
-    if (IS_GEN8(i965->intel.device_id))
+    if (IS_GEN8(i965->intel.device_info))
         param_size = sizeof(struct gen7_pp_static_parameter);
 
     BEGIN_BATCH(batch, 4);
@@ -1264,7 +1264,7 @@ gen8_pp_object_walker(VADriverContextP ctx,
     unsigned int *command_ptr;
 
     param_size = sizeof(struct gen7_pp_inline_parameter);
-    if (IS_GEN8(i965->intel.device_id))
+    if (IS_GEN8(i965->intel.device_info))
         param_size = sizeof(struct gen7_pp_inline_parameter);
 
     x_steps = pp_context->pp_x_steps(pp_context->private_context);
@@ -1306,7 +1306,7 @@ gen8_pp_object_walker(VADriverContextP ctx,
 
     dri_bo_unmap(command_buffer);
 
-    if (IS_GEN8(i965->intel.device_id)) {
+    if (IS_GEN8(i965->intel.device_info)) {
        BEGIN_BATCH(batch, 3);
        OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
        OUT_RELOC(batch, command_buffer,
@@ -1431,7 +1431,7 @@ gen8_post_processing_context_init(VADriverContextP ctx,
 
     assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen8));
 
-    if (IS_GEN8(i965->intel.device_id))
+    if (IS_GEN8(i965->intel.device_info))
         memcpy(pp_context->pp_modules, pp_modules_gen8, sizeof(pp_context->pp_modules));
     else {
         /* should never get here !!! */
@@ -1485,7 +1485,7 @@ gen8_post_processing_context_init(VADriverContextP ctx,
     dri_bo_unmap(pp_context->instruction_state.bo);
 
     /* static & inline parameters */
-    if (IS_GEN8(i965->intel.device_id)) {
+    if (IS_GEN8(i965->intel.device_info)) {
         pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
         pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
     }
index 8f5feaa..54f6225 100644 (file)
@@ -1734,7 +1734,7 @@ gen8_render_init(VADriverContextP ctx)
     render_state->render_put_surface = gen8_render_put_surface;
     render_state->render_put_subpicture = gen8_render_put_subpicture;
 
-    if (IS_GEN8(i965->intel.device_id)) {
+    if (IS_GEN8(i965->intel.device_info)) {
         memcpy(render_state->render_kernels, render_kernels_gen8,
                        sizeof(render_state->render_kernels));
     }
index 72b8307..67c7c95 100644 (file)
@@ -793,7 +793,7 @@ i965_avc_bsd_object(VADriverContextP ctx,
 {
     struct i965_driver_data *i965 = i965_driver_data(ctx);
 
-    if (IS_IRONLAKE(i965->intel.device_id))
+    if (IS_IRONLAKE(i965->intel.device_info))
         ironlake_avc_bsd_object(ctx, decode_state, pic_param, slice_param, slice_index, i965_h264_context);
     else
         g4x_avc_bsd_object(ctx, decode_state, pic_param, slice_param, slice_index, i965_h264_context);
index e0cc743..d414c90 100644 (file)
@@ -342,7 +342,7 @@ i965_avc_ildb_upload_constants(VADriverContextP ctx,
     assert(avc_ildb_context->curbe.bo->virtual);
     root_input = avc_ildb_context->curbe.bo->virtual;
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         root_input->max_concurrent_threads = 76; /* 72 - 2 + 8 - 2 */
     } else {
         root_input->max_concurrent_threads = 54; /* 50 - 2 + 8 - 2 */
@@ -427,7 +427,7 @@ i965_avc_ildb_state_base_address(VADriverContextP ctx, struct i965_h264_context
     struct i965_driver_data *i965 = i965_driver_data(ctx); 
     struct intel_batchbuffer *batch = i965_h264_context->batch;
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         BEGIN_BATCH(batch, 8);
         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
@@ -614,7 +614,7 @@ i965_avc_ildb_decode_init(VADriverContextP ctx, void *h264_context)
     /* kernel offset */
     assert(NUM_AVC_ILDB_INTERFACES == ARRAY_ELEMS(avc_ildb_kernel_offset_gen5));
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         avc_ildb_kernel_offset = avc_ildb_kernel_offset_gen5;
     } else {
         avc_ildb_kernel_offset = avc_ildb_kernel_offset_gen4;
index db8b2d1..96ca997 100755 (executable)
@@ -1116,9 +1116,9 @@ i965_guess_surface_format(VADriverContextP ctx,
     if (!obj_config)
         return;
 
-    if (IS_GEN6(i965->intel.device_id) ||
-        IS_GEN7(i965->intel.device_id) ||
-        IS_GEN8(i965->intel.device_id)) {
+    if (IS_GEN6(i965->intel.device_info) ||
+        IS_GEN7(i965->intel.device_info) ||
+        IS_GEN8(i965->intel.device_info)) {
         *fourcc = VA_FOURCC_NV12;
         *is_tiled = 1;
         return;
@@ -1500,7 +1500,7 @@ i965_CreateContext(VADriverContextP ctx,
         render_state->interleaved_uv = 1;
         break;
     default:
-        render_state->interleaved_uv = !!(IS_GEN6(i965->intel.device_id) || IS_GEN7(i965->intel.device_id) || IS_GEN8(i965->intel.device_id));
+        render_state->interleaved_uv = !!(IS_GEN6(i965->intel.device_info) || IS_GEN7(i965->intel.device_info) || IS_GEN8(i965->intel.device_info));
         break;
     }
 
@@ -4213,7 +4213,7 @@ i965_GetSurfaceAttributes(
             attrib_list[i].flags = VA_SURFACE_ATTRIB_GETTABLE | VA_SURFACE_ATTRIB_SETTABLE;
 
             if (attrib_list[i].value.value.i == 0) {
-                if (IS_G4X(i965->intel.device_id)) {
+                if (IS_G4X(i965->intel.device_info)) {
                     if (obj_config->profile == VAProfileMPEG2Simple ||
                         obj_config->profile == VAProfileMPEG2Main) {
                         attrib_list[i].value.value.i = VA_FOURCC_I420;
@@ -4221,7 +4221,7 @@ i965_GetSurfaceAttributes(
                         assert(0);
                         attrib_list[i].flags = VA_SURFACE_ATTRIB_NOT_SUPPORTED;
                     }
-                } else if (IS_IRONLAKE(i965->intel.device_id)) {
+                } else if (IS_IRONLAKE(i965->intel.device_info)) {
                     if (obj_config->profile == VAProfileMPEG2Simple ||
                         obj_config->profile == VAProfileMPEG2Main) {
                         attrib_list[i].value.value.i = VA_FOURCC_I420;
@@ -4235,17 +4235,17 @@ i965_GetSurfaceAttributes(
                         assert(0);
                         attrib_list[i].flags = VA_SURFACE_ATTRIB_NOT_SUPPORTED;
                     }
-                } else if (IS_GEN6(i965->intel.device_id)) {
+                } else if (IS_GEN6(i965->intel.device_info)) {
                     attrib_list[i].value.value.i = VA_FOURCC_NV12;
-                } else if (IS_GEN7(i965->intel.device_id) ||
-                           IS_GEN8(i965->intel.device_id)) {
+                } else if (IS_GEN7(i965->intel.device_info) ||
+                           IS_GEN8(i965->intel.device_info)) {
                     if (obj_config->profile == VAProfileJPEGBaseline)
                         attrib_list[i].value.value.i = 0; /* internal format */
                     else
                         attrib_list[i].value.value.i = VA_FOURCC_NV12;
                 }
             } else {
-                if (IS_G4X(i965->intel.device_id)) {
+                if (IS_G4X(i965->intel.device_info)) {
                     if (obj_config->profile == VAProfileMPEG2Simple ||
                         obj_config->profile == VAProfileMPEG2Main) {
                         if (attrib_list[i].value.value.i != VA_FOURCC_I420) {
@@ -4256,7 +4256,7 @@ i965_GetSurfaceAttributes(
                         assert(0);
                         attrib_list[i].flags = VA_SURFACE_ATTRIB_NOT_SUPPORTED;
                     }
-                } else if (IS_IRONLAKE(i965->intel.device_id)) {
+                } else if (IS_IRONLAKE(i965->intel.device_info)) {
                     if (obj_config->profile == VAProfileMPEG2Simple ||
                         obj_config->profile == VAProfileMPEG2Main) {
                         if (attrib_list[i].value.value.i != VA_FOURCC_I420) {
@@ -4290,7 +4290,7 @@ i965_GetSurfaceAttributes(
                         assert(0);
                         attrib_list[i].flags = VA_SURFACE_ATTRIB_NOT_SUPPORTED;
                     }
-                } else if (IS_GEN6(i965->intel.device_id)) {
+                } else if (IS_GEN6(i965->intel.device_info)) {
                     if (obj_config->entrypoint == VAEntrypointEncSlice ||
                         obj_config->entrypoint == VAEntrypointVideoProc) {
                         switch (attrib_list[i].value.value.i) {
@@ -4314,8 +4314,8 @@ i965_GetSurfaceAttributes(
                             attrib_list[i].flags &= ~VA_SURFACE_ATTRIB_SETTABLE;
                         }
                     }
-                } else if (IS_GEN7(i965->intel.device_id) ||
-                           IS_GEN8(i965->intel.device_id)) {
+                } else if (IS_GEN7(i965->intel.device_info) ||
+                           IS_GEN8(i965->intel.device_info)) {
                     if (obj_config->entrypoint == VAEntrypointEncSlice ||
                         obj_config->entrypoint == VAEntrypointVideoProc) {
                         switch (attrib_list[i].value.value.i) {
@@ -4398,7 +4398,7 @@ i965_QuerySurfaceAttributes(VADriverContextP ctx,
     if (attribs == NULL)
         return VA_STATUS_ERROR_ALLOCATION_FAILED;
 
-    if (IS_G4X(i965->intel.device_id)) {
+    if (IS_G4X(i965->intel.device_info)) {
         if (obj_config->profile == VAProfileMPEG2Simple ||
             obj_config->profile == VAProfileMPEG2Main) {
             attribs[i].type = VASurfaceAttribPixelFormat;
@@ -4407,7 +4407,7 @@ i965_QuerySurfaceAttributes(VADriverContextP ctx,
             attribs[i].value.value.i = VA_FOURCC_I420;
             i++;
         }
-    } else if (IS_IRONLAKE(i965->intel.device_id)) {
+    } else if (IS_IRONLAKE(i965->intel.device_info)) {
         switch (obj_config->profile) {
         case VAProfileMPEG2Simple:
         case VAProfileMPEG2Main:
@@ -4446,7 +4446,7 @@ i965_QuerySurfaceAttributes(VADriverContextP ctx,
         default:
             break;
         }
-    } else if (IS_GEN6(i965->intel.device_id)) {
+    } else if (IS_GEN6(i965->intel.device_info)) {
         if (obj_config->entrypoint == VAEntrypointVLD) { /* decode */
             attribs[i].type = VASurfaceAttribPixelFormat;
             attribs[i].value.type = VAGenericValueTypeInteger;
@@ -4493,7 +4493,7 @@ i965_QuerySurfaceAttributes(VADriverContextP ctx,
                 i++;
             }
         }
-    } else if (IS_GEN7(i965->intel.device_id)) {
+    } else if (IS_GEN7(i965->intel.device_info)) {
         if (obj_config->entrypoint == VAEntrypointVLD) { /* decode */
             if (obj_config->profile == VAProfileJPEGBaseline) {
                 attribs[i].type = VASurfaceAttribPixelFormat;
@@ -4596,7 +4596,7 @@ i965_QuerySurfaceAttributes(VADriverContextP ctx,
                 i++;
             }
         }
-    } else if (IS_GEN8(i965->intel.device_id)) {
+    } else if (IS_GEN8(i965->intel.device_info)) {
         if (obj_config->entrypoint == VAEntrypointVLD) { /* decode */
             if (obj_config->profile == VAProfileJPEGBaseline) {
                 attribs[i].type = VASurfaceAttribPixelFormat;
index 5654109..a13c233 100644 (file)
@@ -77,7 +77,7 @@ i965_media_state_base_address(VADriverContextP ctx, struct i965_media_context *m
     struct i965_driver_data *i965 = i965_driver_data(ctx); 
     struct intel_batchbuffer *batch = media_context->base.batch;
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         BEGIN_BATCH(batch, 8);
         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
index abfecef..9de4e09 100644 (file)
@@ -852,7 +852,7 @@ i965_media_h264_dec_context_init(VADriverContextP ctx, struct i965_media_context
                                     sizeof(h264_avc_kernels_gen5[0])));
     assert(NUM_AVC_MC_INTERFACES == (sizeof(avc_mc_kernel_offset_gen5) /
                                      sizeof(avc_mc_kernel_offset_gen5[0])));
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         memcpy(i965_h264_context->avc_kernels, h264_avc_kernels_gen5, sizeof(i965_h264_context->avc_kernels));
         avc_mc_kernel_offset = avc_mc_kernel_offset_gen5;
         intra_kernel_header = &intra_kernel_header_gen5;
@@ -886,7 +886,7 @@ i965_media_h264_dec_context_init(VADriverContextP ctx, struct i965_media_context
     media_context->free_private_context = i965_media_h264_free_private_context;
 
     /* URB */
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         media_context->urb.num_vfe_entries = 63;
     } else {
         media_context->urb.num_vfe_entries = 23;
index a5c757f..245c8e7 100644 (file)
@@ -988,7 +988,7 @@ i965_media_mpeg2_dec_context_init(VADriverContextP ctx, struct i965_media_contex
                                      sizeof(mpeg2_vld_kernels_gen5[0])));
     assert(NUM_MPEG2_VLD_KERNELS <= MAX_INTERFACE_DESC);
 
-    if (IS_IRONLAKE(i965->intel.device_id))
+    if (IS_IRONLAKE(i965->intel.device_info))
         memcpy(i965_mpeg2_context->vld_kernels, mpeg2_vld_kernels_gen5, sizeof(i965_mpeg2_context->vld_kernels));
     else
         memcpy(i965_mpeg2_context->vld_kernels, mpeg2_vld_kernels_gen4, sizeof(i965_mpeg2_context->vld_kernels));
index 6358033..bdab73a 100755 (executable)
 #include "i965_render.h"
 #include "intel_media.h"
 
-#define HAS_PP(ctx) (IS_IRONLAKE((ctx)->intel.device_id) ||     \
-                     IS_GEN6((ctx)->intel.device_id) ||         \
-                     IS_GEN7((ctx)->intel.device_id) ||         \
-                     IS_GEN8((ctx)->intel.device_id))
-
+#define HAS_PP(ctx) (IS_IRONLAKE((ctx)->intel.device_info) ||     \
+                     IS_GEN6((ctx)->intel.device_info) ||         \
+                     IS_GEN7((ctx)->intel.device_info) ||         \
+                     IS_GEN8((ctx)->intel.device_info))
 
 #define SURFACE_STATE_PADDED_SIZE               MAX(SURFACE_STATE_PADDED_SIZE_GEN8,\
                        MAX(SURFACE_STATE_PADDED_SIZE_GEN6, SURFACE_STATE_PADDED_SIZE_GEN7))
@@ -1640,7 +1639,7 @@ gen7_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_cont
     ss->ss2.height = height - 1;
     ss->ss3.pitch = pitch - 1;
     gen7_pp_set_surface_tiling(ss, tiling);
-    if (IS_HASWELL(i965->intel.device_id))
+    if (IS_HASWELL(i965->intel.device_info))
         gen7_render_set_surface_scs(ss);
     dri_bo_emit_reloc(ss_bo,
                       I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
@@ -2937,7 +2936,7 @@ gen7_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_con
 
     pp_static_parameter->grf1.pointer_to_inline_parameter = 7;
     pp_static_parameter->grf2.avs_wa_enable = 1; /* must be set for GEN7 */
-    if (IS_HASWELL(i965->intel.device_id))
+    if (IS_HASWELL(i965->intel.device_info))
        pp_static_parameter->grf2.avs_wa_enable = 0; /* HSW don't use the WA */
 
     if (pp_static_parameter->grf2.avs_wa_enable) {
@@ -4110,7 +4109,7 @@ gen6_pp_initialize(
     assert(bo);
     pp_context->vfe_state.bo = bo;
     
-    if (IS_GEN7(i965->intel.device_id)) {
+    if (IS_GEN7(i965->intel.device_info)) {
         static_param_size = sizeof(struct gen7_pp_static_parameter);
         inline_param_size = sizeof(struct gen7_pp_inline_parameter);
     } else {
@@ -4166,7 +4165,7 @@ gen6_pp_interface_descriptor_table(VADriverContextP   ctx,
     desc->desc3.binding_table_pointer = (BINDING_TABLE_OFFSET >> 5);
     desc->desc4.constant_urb_entry_read_offset = 0;
 
-    if (IS_GEN7(i965->intel.device_id))
+    if (IS_GEN7(i965->intel.device_info))
         desc->desc4.constant_urb_entry_read_length = 6; /* grf 1-6 */
     else
         desc->desc4.constant_urb_entry_read_length = 4; /* grf 1-4 */
@@ -4198,8 +4197,8 @@ gen6_pp_upload_constants(VADriverContextP ctx,
     assert(sizeof(struct pp_static_parameter) == 128);
     assert(sizeof(struct gen7_pp_static_parameter) == 192);
 
-    if (IS_GEN7(i965->intel.device_id) ||
-        IS_GEN8(i965->intel.device_id))
+    if (IS_GEN7(i965->intel.device_info) ||
+        IS_GEN8(i965->intel.device_info))
         param_size = sizeof(struct gen7_pp_static_parameter);
     else
         param_size = sizeof(struct pp_static_parameter);
@@ -4282,8 +4281,8 @@ gen6_pp_curbe_load(VADriverContextP ctx,
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     int param_size;
 
-    if (IS_GEN7(i965->intel.device_id) ||
-        IS_GEN8(i965->intel.device_id))
+    if (IS_GEN7(i965->intel.device_info) ||
+        IS_GEN8(i965->intel.device_info))
         param_size = sizeof(struct gen7_pp_static_parameter);
     else
         param_size = sizeof(struct pp_static_parameter);
@@ -4369,8 +4368,8 @@ gen6_pp_object_walker(VADriverContextP ctx,
     dri_bo *command_buffer;
     unsigned int *command_ptr;
 
-    if (IS_GEN7(i965->intel.device_id) ||
-        IS_GEN8(i965->intel.device_id))
+    if (IS_GEN7(i965->intel.device_info) ||
+        IS_GEN8(i965->intel.device_info))
         param_size = sizeof(struct gen7_pp_inline_parameter);
     else
         param_size = sizeof(struct pp_inline_parameter);
@@ -4390,7 +4389,7 @@ gen6_pp_object_walker(VADriverContextP ctx,
         for (x = 0; x < x_steps; x++) {
             if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
                 // some common block parameter update goes here, apply to all pp functions
-                if (IS_GEN6(i965->intel.device_id))
+                if (IS_GEN6(i965->intel.device_info))
                     update_block_mask_parameter (pp_context, x, y, x_steps, y_steps);
                 
                 *command_ptr++ = (CMD_MEDIA_OBJECT | (command_length_in_dws - 2));
@@ -4412,7 +4411,7 @@ gen6_pp_object_walker(VADriverContextP ctx,
 
     dri_bo_unmap(command_buffer);
 
-    if (IS_GEN8(i965->intel.device_id)) {
+    if (IS_GEN8(i965->intel.device_info)) {
        BEGIN_BATCH(batch, 3);
        OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
        OUT_RELOC(batch, command_buffer, 
@@ -4570,9 +4569,9 @@ i965_vpp_clear_surface(VADriverContextP ctx,
     br13 |= BR13_8;
     br13 |= pitch;
 
-    if (IS_GEN6(i965->intel.device_id) ||
-        IS_GEN7(i965->intel.device_id) ||
-        IS_GEN8(i965->intel.device_id)) {
+    if (IS_GEN6(i965->intel.device_info) ||
+        IS_GEN7(i965->intel.device_info) ||
+        IS_GEN8(i965->intel.device_info)) {
         intel_batchbuffer_start_atomic_blt(batch, 48);
         BEGIN_BLT_BATCH(batch, 12);
     } else {
@@ -5185,7 +5184,7 @@ i965_post_processing_terminate(VADriverContextP ctx)
     struct i965_post_processing_context *pp_context = i965->pp_context;
 
     if (pp_context) {
-        if (IS_GEN8(i965->intel.device_id)) {
+        if (IS_GEN8(i965->intel.device_info)) {
             gen8_post_processing_context_finalize(pp_context);
         } else {
            i965_post_processing_context_finalize(pp_context);
@@ -5206,12 +5205,12 @@ i965_post_processing_context_init(VADriverContextP ctx,
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     int i;
 
-    if (IS_GEN8(i965->intel.device_id)) {
+    if (IS_GEN8(i965->intel.device_info)) {
         gen8_post_processing_context_init(ctx, pp_context, batch);
         return;
     };
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
        pp_context->urb.size = i965->intel.device_info->urb_size;
        pp_context->urb.num_vfe_entries = 32;
        pp_context->urb.size_vfe_entry = 1;     /* in 512 bits unit */
@@ -5238,13 +5237,13 @@ i965_post_processing_context_init(VADriverContextP ctx,
     assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen7));
     assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen75));
 
-    if (IS_HASWELL(i965->intel.device_id))
+    if (IS_HASWELL(i965->intel.device_info))
         memcpy(pp_context->pp_modules, pp_modules_gen75, sizeof(pp_context->pp_modules));
-    else if (IS_GEN7(i965->intel.device_id))
+    else if (IS_GEN7(i965->intel.device_info))
         memcpy(pp_context->pp_modules, pp_modules_gen7, sizeof(pp_context->pp_modules));
-    else if (IS_GEN6(i965->intel.device_id))
+    else if (IS_GEN6(i965->intel.device_info))
         memcpy(pp_context->pp_modules, pp_modules_gen6, sizeof(pp_context->pp_modules));
-    else if (IS_IRONLAKE(i965->intel.device_id))
+    else if (IS_IRONLAKE(i965->intel.device_info))
         memcpy(pp_context->pp_modules, pp_modules_gen5, sizeof(pp_context->pp_modules));
 
     for (i = 0; i < NUM_PP_MODULES; i++) {
@@ -5263,8 +5262,8 @@ i965_post_processing_context_init(VADriverContextP ctx,
     }
 
     /* static & inline parameters */
-    if (IS_GEN7(i965->intel.device_id) ||
-        IS_GEN8(i965->intel.device_id)) {
+    if (IS_GEN7(i965->intel.device_info) ||
+        IS_GEN8(i965->intel.device_info)) {
         pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
         pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
     } else {
index 15643f3..6b0cb19 100644 (file)
@@ -341,7 +341,7 @@ i965_render_vs_unit(VADriverContextP ctx)
     vs_state = render_state->vs.state->virtual;
     memset(vs_state, 0, sizeof(*vs_state));
 
-    if (IS_IRONLAKE(i965->intel.device_id))
+    if (IS_IRONLAKE(i965->intel.device_info))
         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2;
     else
         vs_state->thread4.nr_urb_entries = URB_VS_ENTRIES;
@@ -455,7 +455,7 @@ i965_subpic_render_wm_unit(VADriverContextP ctx)
 
     wm_state->thread1.single_program_flow = 1; /* XXX */
 
-    if (IS_IRONLAKE(i965->intel.device_id))
+    if (IS_IRONLAKE(i965->intel.device_info))
         wm_state->thread1.binding_table_entry_count = 0; /* hardware requirement */
     else
         wm_state->thread1.binding_table_entry_count = 7;
@@ -472,7 +472,7 @@ i965_subpic_render_wm_unit(VADriverContextP ctx)
     wm_state->wm4.stats_enable = 0;
     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
     } else {
         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
@@ -519,7 +519,7 @@ i965_render_wm_unit(VADriverContextP ctx)
 
     wm_state->thread1.single_program_flow = 1; /* XXX */
 
-    if (IS_IRONLAKE(i965->intel.device_id))
+    if (IS_IRONLAKE(i965->intel.device_info))
         wm_state->thread1.binding_table_entry_count = 0;        /* hardware requirement */
     else
         wm_state->thread1.binding_table_entry_count = 7;
@@ -536,7 +536,7 @@ i965_render_wm_unit(VADriverContextP ctx)
     wm_state->wm4.stats_enable = 0;
     wm_state->wm4.sampler_state_pointer = render_state->wm.sampler->offset >> 5; 
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         wm_state->wm4.sampler_count = 0;        /* hardware requirement */
     } else {
         wm_state->wm4.sampler_count = (render_state->wm.sampler_count + 3) / 4;
@@ -828,12 +828,12 @@ i965_render_src_surface_state(
     assert(ss_bo->virtual);
     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
 
-    if (IS_GEN7(i965->intel.device_id)) {
+    if (IS_GEN7(i965->intel.device_info)) {
         gen7_render_set_surface_state(ss,
                                       region, offset,
                                       w, h,
                                       pitch, format, flags);
-        if (IS_HASWELL(i965->intel.device_id))
+        if (IS_HASWELL(i965->intel.device_info))
             gen7_render_set_surface_scs(ss);
         dri_bo_emit_reloc(ss_bo,
                           I915_GEM_DOMAIN_SAMPLER, 0,
@@ -943,12 +943,12 @@ i965_render_dest_surface_state(VADriverContextP ctx, int index)
     assert(ss_bo->virtual);
     ss = (char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index);
 
-    if (IS_GEN7(i965->intel.device_id)) {
+    if (IS_GEN7(i965->intel.device_info)) {
         gen7_render_set_surface_state(ss,
                                       dest_region->bo, 0,
                                       dest_region->width, dest_region->height,
                                       dest_region->pitch, format, 0);
-        if (IS_HASWELL(i965->intel.device_id))
+        if (IS_HASWELL(i965->intel.device_info))
             gen7_render_set_surface_scs(ss);
         dri_bo_emit_reloc(ss_bo,
                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
@@ -1230,7 +1230,7 @@ i965_render_state_base_address(VADriverContextP ctx)
     struct intel_batchbuffer *batch = i965->batch;
     struct i965_render_state *render_state = &i965->render_state;
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         BEGIN_BATCH(batch, 8);
         OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
         OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
@@ -1394,7 +1394,7 @@ i965_render_vertex_elements(VADriverContextP ctx)
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct intel_batchbuffer *batch = i965->batch;
 
-    if (IS_IRONLAKE(i965->intel.device_id)) {
+    if (IS_IRONLAKE(i965->intel.device_info)) {
         BEGIN_BATCH(batch, 5);
         OUT_BATCH(batch, CMD_VERTEX_ELEMENTS | 3);
         /* offset 0: X,Y -> {X, Y, 1.0, 1.0} */
@@ -1486,7 +1486,7 @@ i965_render_startup(VADriverContextP ctx)
               ((4 * 4) << VB0_BUFFER_PITCH_SHIFT));
     OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 0);
 
-    if (IS_IRONLAKE(i965->intel.device_id))
+    if (IS_IRONLAKE(i965->intel.device_info))
         OUT_RELOC(batch, render_state->vb.vertex_buffer, I915_GEM_DOMAIN_VERTEX, 0, 12 * 4);
     else
         OUT_BATCH(batch, 3);
@@ -1536,9 +1536,9 @@ i965_clear_dest_region(VADriverContextP ctx)
 
     br13 |= pitch;
 
-    if (IS_GEN6(i965->intel.device_id) ||
-        IS_GEN7(i965->intel.device_id) ||
-        IS_GEN8(i965->intel.device_id)) {
+    if (IS_GEN6(i965->intel.device_info) ||
+        IS_GEN7(i965->intel.device_info) ||
+        IS_GEN8(i965->intel.device_info)) {
         intel_batchbuffer_start_atomic_blt(batch, 24);
         BEGIN_BLT_BATCH(batch, 6);
     } else {
@@ -2553,7 +2553,7 @@ gen7_emit_urb(VADriverContextP ctx)
     struct intel_batchbuffer *batch = i965->batch;
     unsigned int num_urb_entries = 32;
 
-    if (IS_HASWELL(i965->intel.device_id))
+    if (IS_HASWELL(i965->intel.device_info))
         num_urb_entries = 64;
 
     BEGIN_BATCH(batch, 2);
@@ -2862,7 +2862,7 @@ gen7_emit_wm_state(VADriverContextP ctx, int kernel)
     unsigned int max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_IVB;
     unsigned int num_samples = 0;
 
-    if (IS_HASWELL(i965->intel.device_id)) {
+    if (IS_HASWELL(i965->intel.device_info)) {
         max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_HSW;
         num_samples = 1 << GEN7_PS_SAMPLE_MASK_SHIFT_HSW;
     }
@@ -3146,19 +3146,19 @@ i965_render_init(VADriverContextP ctx)
     assert(NUM_RENDER_KERNEL == (sizeof(render_kernels_gen6) / 
                                  sizeof(render_kernels_gen6[0])));
 
-    if (IS_GEN8(i965->intel.device_id)) {
+    if (IS_GEN8(i965->intel.device_info)) {
         return gen8_render_init(ctx);
-    } else  if (IS_GEN7(i965->intel.device_id)) {
+    } else  if (IS_GEN7(i965->intel.device_info)) {
         memcpy(render_state->render_kernels,
-               (IS_HASWELL(i965->intel.device_id) ? render_kernels_gen7_haswell : render_kernels_gen7),
+               (IS_HASWELL(i965->intel.device_info) ? render_kernels_gen7_haswell : render_kernels_gen7),
                sizeof(render_state->render_kernels));
         render_state->render_put_surface = gen7_render_put_surface;
         render_state->render_put_subpicture = gen7_render_put_subpicture;
-    } else if (IS_GEN6(i965->intel.device_id)) {
+    } else if (IS_GEN6(i965->intel.device_info)) {
         memcpy(render_state->render_kernels, render_kernels_gen6, sizeof(render_state->render_kernels));
         render_state->render_put_surface = gen6_render_put_surface;
         render_state->render_put_subpicture = gen6_render_put_subpicture;
-    } else if (IS_IRONLAKE(i965->intel.device_id)) {
+    } else if (IS_IRONLAKE(i965->intel.device_info)) {
         memcpy(render_state->render_kernels, render_kernels_gen5, sizeof(render_state->render_kernels));
         render_state->render_put_surface = i965_render_put_surface;
         render_state->render_put_subpicture = i965_render_put_subpicture;
@@ -3197,7 +3197,7 @@ i965_render_terminate(VADriverContextP ctx)
     struct i965_driver_data *i965 = i965_driver_data(ctx);
     struct i965_render_state *render_state = &i965->render_state;
 
-    if (IS_GEN8(i965->intel.device_id)) {
+    if (IS_GEN8(i965->intel.device_info)) {
         gen8_render_terminate(ctx);
         return;
     } 
index 9dc496d..c6d3769 100644 (file)
@@ -87,7 +87,7 @@ intel_batchbuffer_new(struct intel_driver_data *intel, int flag, int buffer_size
     batch->flag = flag;
     batch->run = drm_intel_bo_mrb_exec;
 
-    if (IS_GEN6(intel->device_id) &&
+    if (IS_GEN6(intel->device_info) &&
         flag == I915_EXEC_RENDER)
         batch->wa_render_bo = dri_bo_alloc(intel->bufmgr,
                                            "wa scratch",
@@ -183,11 +183,11 @@ intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
 {
     struct intel_driver_data *intel = batch->intel; 
 
-    if (IS_GEN6(intel->device_id) ||
-        IS_GEN7(intel->device_id) ||
-        IS_GEN8(intel->device_id)) {
+    if (IS_GEN6(intel->device_info) ||
+        IS_GEN7(intel->device_info) ||
+        IS_GEN8(intel->device_info)) {
         if (batch->flag == I915_EXEC_RENDER) {
-            if (IS_GEN8(intel->device_id)) {
+            if (IS_GEN8(intel->device_info)) {
                 BEGIN_BATCH(batch, 6);
                 OUT_BATCH(batch, CMD_PIPE_CONTROL | (6 - 2));
 
@@ -202,7 +202,7 @@ intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
                 OUT_BATCH(batch, 0); /* write data */
                 OUT_BATCH(batch, 0);
                 ADVANCE_BATCH(batch);
-            } else if (IS_GEN6(intel->device_id)) {
+            } else if (IS_GEN6(intel->device_info)) {
                 assert(batch->wa_render_bo);
 
                 BEGIN_BATCH(batch, 4 * 3);
index 18bbfe6..8636b21 100644 (file)
@@ -174,271 +174,15 @@ struct intel_region
     dri_bo *bo;
 };
 
-#define PCI_CHIP_GM45_GM                0x2A42
-#define PCI_CHIP_IGD_E_G                0x2E02
-#define PCI_CHIP_Q45_G                  0x2E12
-#define PCI_CHIP_G45_G                  0x2E22
-#define PCI_CHIP_G41_G                  0x2E32
-#define PCI_CHIP_B43_G                  0x2E42
-#define PCI_CHIP_B43_G1                 0x2E92
-
-#define PCI_CHIP_IRONLAKE_D_G           0x0042
-#define PCI_CHIP_IRONLAKE_M_G           0x0046
-
-#ifndef PCI_CHIP_SANDYBRIDGE_GT1
-#define PCI_CHIP_SANDYBRIDGE_GT1       0x0102  /* Desktop */
-#define PCI_CHIP_SANDYBRIDGE_GT2       0x0112
-#define PCI_CHIP_SANDYBRIDGE_GT2_PLUS  0x0122
-#define PCI_CHIP_SANDYBRIDGE_M_GT1     0x0106  /* Mobile */
-#define PCI_CHIP_SANDYBRIDGE_M_GT2     0x0116
-#define PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS        0x0126
-#define PCI_CHIP_SANDYBRIDGE_S_GT      0x010A  /* Server */
-#endif
-
-#define PCI_CHIP_IVYBRIDGE_GT1          0x0152  /* Desktop */
-#define PCI_CHIP_IVYBRIDGE_GT2          0x0162
-#define PCI_CHIP_IVYBRIDGE_M_GT1        0x0156  /* Mobile */
-#define PCI_CHIP_IVYBRIDGE_M_GT2        0x0166
-#define PCI_CHIP_IVYBRIDGE_S_GT1        0x015a  /* Server */
-#define PCI_CHIP_IVYBRIDGE_S_GT2        0x016a
-
-#define PCI_CHIP_HASWELL_GT1            0x0402 /* Desktop */
-#define PCI_CHIP_HASWELL_GT2            0x0412
-#define PCI_CHIP_HASWELL_GT3            0x0422
-#define PCI_CHIP_HASWELL_M_GT1          0x0406 /* Mobile */
-#define PCI_CHIP_HASWELL_M_GT2          0x0416
-#define PCI_CHIP_HASWELL_M_GT3          0x0426
-#define PCI_CHIP_HASWELL_S_GT1          0x040a /* Server */
-#define PCI_CHIP_HASWELL_S_GT2          0x041a
-#define PCI_CHIP_HASWELL_S_GT3          0x042a
-#define PCI_CHIP_HASWELL_B_GT1          0x040b /* Reserved */
-#define PCI_CHIP_HASWELL_B_GT2          0x041b
-#define PCI_CHIP_HASWELL_B_GT3          0x042b
-#define PCI_CHIP_HASWELL_E_GT1          0x040e /* Reserved */
-#define PCI_CHIP_HASWELL_E_GT2          0x041e
-#define PCI_CHIP_HASWELL_E_GT3          0x042e
-
-#define        PCI_CHIP_HASWELL_SDV_GT1                0x0c02 /* Desktop */
-#define        PCI_CHIP_HASWELL_SDV_GT2                0x0c12
-#define        PCI_CHIP_HASWELL_SDV_GT3                0x0c22
-#define        PCI_CHIP_HASWELL_SDV_M_GT1              0x0c06 /* Mobile */
-#define        PCI_CHIP_HASWELL_SDV_M_GT2              0x0c16
-#define        PCI_CHIP_HASWELL_SDV_M_GT3              0x0c26
-#define        PCI_CHIP_HASWELL_SDV_S_GT1              0x0c0a /* Server */
-#define        PCI_CHIP_HASWELL_SDV_S_GT2              0x0c1a
-#define        PCI_CHIP_HASWELL_SDV_S_GT3              0x0c2a
-#define PCI_CHIP_HASWELL_SDV_B_GT1              0x0c0b /* Reserved */
-#define PCI_CHIP_HASWELL_SDV_B_GT2              0x0c1b
-#define PCI_CHIP_HASWELL_SDV_B_GT3              0x0c2b
-#define PCI_CHIP_HASWELL_SDV_E_GT1              0x0c0e /* Reserved */
-#define PCI_CHIP_HASWELL_SDV_E_GT2              0x0c1e
-#define PCI_CHIP_HASWELL_SDV_E_GT3              0x0c2e
-
-#define        PCI_CHIP_HASWELL_ULT_GT1                0x0A02 /* Desktop */
-#define        PCI_CHIP_HASWELL_ULT_GT2                0x0A12
-#define        PCI_CHIP_HASWELL_ULT_GT3                0x0A22
-#define        PCI_CHIP_HASWELL_ULT_M_GT1              0x0A06 /* Mobile */
-#define        PCI_CHIP_HASWELL_ULT_M_GT2              0x0A16
-#define        PCI_CHIP_HASWELL_ULT_M_GT3              0x0A26
-#define        PCI_CHIP_HASWELL_ULT_S_GT1              0x0A0A /* Server */
-#define        PCI_CHIP_HASWELL_ULT_S_GT2              0x0A1A
-#define        PCI_CHIP_HASWELL_ULT_S_GT3              0x0A2A
-#define PCI_CHIP_HASWELL_ULT_B_GT1              0x0A0B /* Reserved */
-#define PCI_CHIP_HASWELL_ULT_B_GT2              0x0A1B
-#define PCI_CHIP_HASWELL_ULT_B_GT3              0x0A2B
-#define PCI_CHIP_HASWELL_ULT_E_GT1              0x0A0E /* Reserved */
-#define PCI_CHIP_HASWELL_ULT_E_GT2              0x0A1E
-#define PCI_CHIP_HASWELL_ULT_E_GT3              0x0A2E
-
-#define        PCI_CHIP_HASWELL_CRW_GT1                0x0D02 /* Desktop */
-#define        PCI_CHIP_HASWELL_CRW_GT2                0x0D12
-#define        PCI_CHIP_HASWELL_CRW_GT3                0x0D22
-#define        PCI_CHIP_HASWELL_CRW_M_GT1              0x0D06 /* Mobile */
-#define        PCI_CHIP_HASWELL_CRW_M_GT2              0x0D16
-#define        PCI_CHIP_HASWELL_CRW_M_GT3              0x0D26
-#define        PCI_CHIP_HASWELL_CRW_S_GT1              0x0D0A /* Server */
-#define        PCI_CHIP_HASWELL_CRW_S_GT2              0x0D1A
-#define        PCI_CHIP_HASWELL_CRW_S_GT3              0x0D2A
-#define PCI_CHIP_HASWELL_CRW_B_GT1              0x0D0B /* Reserved */
-#define PCI_CHIP_HASWELL_CRW_B_GT2              0x0D1B
-#define PCI_CHIP_HASWELL_CRW_B_GT3              0x0D2B
-#define PCI_CHIP_HASWELL_CRW_E_GT1              0x0D0E /* Reserved */
-#define PCI_CHIP_HASWELL_CRW_E_GT2              0x0D1E
-#define PCI_CHIP_HASWELL_CRW_E_GT3              0x0D2E
-
-#define PCI_CHIP_BAYTRAIL_M_1           0x0F31
-#define PCI_CHIP_BAYTRAIL_M_2           0x0F32
-#define PCI_CHIP_BAYTRAIL_M_3           0x0F33
-#define PCI_CHIP_BAYTRAIL_M_4           0x0157
-#define PCI_CHIP_BAYTRAIL_D             0x0155
-
-#define PCI_CHIP_BROADWELL_MS_GT1       0x1602
-#define PCI_CHIP_BROADWELL_MS_GT2       0x1612
-#define PCI_CHIP_BROADWELL_MS_GT2PLUS   0x1622
-
-#define PCI_CHIP_BROADWELL_M_GT1_1      0x1606
-#define PCI_CHIP_BROADWELL_M_GT2_1      0x1616
-#define PCI_CHIP_BROADWELL_M_GT2PLUS_1  0x1626
-
-#define PCI_CHIP_BROADWELL_M_GT1_2      0x160B
-#define PCI_CHIP_BROADWELL_M_GT2_2      0x161B
-#define PCI_CHIP_BROADWELL_M_GT2PLUS_2  0x162B
-
-#define PCI_CHIP_BROADWELL_M_GT1_3      0x160E
-#define PCI_CHIP_BROADWELL_M_GT2_3      0x161E
-#define PCI_CHIP_BROADWELL_M_GT2PLUS_3  0x162E
-
-#define PCI_CHIP_BROADWELL_D_GT1_1      0x160A
-#define PCI_CHIP_BROADWELL_D_GT2_1      0x161A
-#define PCI_CHIP_BROADWELL_D_GT2PLUS_1  0x162A
-
-#define PCI_CHIP_BROADWELL_D_GT1_2      0x160D
-#define PCI_CHIP_BROADWELL_D_GT2_2      0x161D
-#define PCI_CHIP_BROADWELL_D_GT2PLUS_2  0x162D
-
-#define IS_G45(devid)           (devid == PCI_CHIP_IGD_E_G ||   \
-                                 devid == PCI_CHIP_Q45_G ||     \
-                                 devid == PCI_CHIP_G45_G ||     \
-                                 devid == PCI_CHIP_G41_G ||     \
-                                 devid == PCI_CHIP_B43_G ||     \
-                                 devid == PCI_CHIP_B43_G1)
-#define IS_GM45(devid)          (devid == PCI_CHIP_GM45_GM)
-#define IS_G4X(devid)          (IS_G45(devid) || IS_GM45(devid))
-
-#define IS_IRONLAKE_D(devid)    (devid == PCI_CHIP_IRONLAKE_D_G)
-#define IS_IRONLAKE_M(devid)    (devid == PCI_CHIP_IRONLAKE_M_G)
-#define IS_IRONLAKE(devid)      (IS_IRONLAKE_D(devid) || IS_IRONLAKE_M(devid))
-
-#define IS_SNB_GT1(devid)       (devid == PCI_CHIP_SANDYBRIDGE_GT1 ||   \
-                                 devid == PCI_CHIP_SANDYBRIDGE_M_GT1 || \
-                                 devid == PCI_CHIP_SANDYBRIDGE_S_GT)
-
-#define IS_SNB_GT2(devid)       (devid == PCI_CHIP_SANDYBRIDGE_GT2 ||   \
-                                 devid == PCI_CHIP_SANDYBRIDGE_GT2_PLUS || \
-                                 devid == PCI_CHIP_SANDYBRIDGE_M_GT2 || \
-                                 devid == PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS)
-
-#define IS_GEN6(devid)          (IS_SNB_GT1(devid) ||   \
-                                 IS_SNB_GT2(devid))
-
-#define IS_BAYTRAIL_M1(devid)    (devid == PCI_CHIP_BAYTRAIL_M_1)
-#define IS_BAYTRAIL_M2(devid)    (devid == PCI_CHIP_BAYTRAIL_M_2)
-#define IS_BAYTRAIL_M3(devid)    (devid == PCI_CHIP_BAYTRAIL_M_3)
-#define IS_BAYTRAIL_D(devid)     (devid == PCI_CHIP_BAYTRAIL_D)
-#define IS_BAYTRAIL(devid)       (IS_BAYTRAIL_M1(devid) || \
-                                  IS_BAYTRAIL_M2(devid) || \
-                                  IS_BAYTRAIL_M3(devid) || \
-                                  IS_BAYTRAIL_D(devid) )
-
-#define IS_IVB_GT1(devid)       (devid == PCI_CHIP_IVYBRIDGE_GT1 ||     \
-                                 devid == PCI_CHIP_IVYBRIDGE_M_GT1 ||   \
-                                 devid == PCI_CHIP_IVYBRIDGE_S_GT1)
-
-#define IS_IVB_GT2(devid)       (devid == PCI_CHIP_IVYBRIDGE_GT2 ||     \
-                                 devid == PCI_CHIP_IVYBRIDGE_M_GT2 ||   \
-                                 devid == PCI_CHIP_IVYBRIDGE_S_GT2)
-
-#define IS_IVYBRIDGE(devid)     (IS_IVB_GT1(devid) ||   \
-                                 IS_IVB_GT2(devid) ||   \
-                                 IS_BAYTRAIL(devid) )
-
-#define IS_HSW_GT1(devid)      (devid == PCI_CHIP_HASWELL_GT1          || \
-                                 devid == PCI_CHIP_HASWELL_M_GT1       || \
-                                 devid == PCI_CHIP_HASWELL_S_GT1       || \
-                                devid == PCI_CHIP_HASWELL_B_GT1        || \
-                                devid == PCI_CHIP_HASWELL_E_GT1        || \
-                                 devid == PCI_CHIP_HASWELL_SDV_GT1     || \
-                                 devid == PCI_CHIP_HASWELL_SDV_M_GT1   || \
-                                 devid == PCI_CHIP_HASWELL_SDV_S_GT1   || \
-                                devid == PCI_CHIP_HASWELL_SDV_B_GT1    || \
-                                devid == PCI_CHIP_HASWELL_SDV_E_GT1    || \
-                                 devid == PCI_CHIP_HASWELL_CRW_GT1     || \
-                                 devid == PCI_CHIP_HASWELL_CRW_M_GT1   || \
-                                 devid == PCI_CHIP_HASWELL_CRW_S_GT1    || \
-                                devid == PCI_CHIP_HASWELL_CRW_B_GT1    || \
-                                devid == PCI_CHIP_HASWELL_CRW_E_GT1    || \
-                                 devid == PCI_CHIP_HASWELL_ULT_GT1     || \
-                                 devid == PCI_CHIP_HASWELL_ULT_M_GT1   || \
-                                 devid == PCI_CHIP_HASWELL_ULT_S_GT1    || \
-                                devid == PCI_CHIP_HASWELL_ULT_B_GT1    || \
-                                devid == PCI_CHIP_HASWELL_ULT_E_GT1)
-
-
-#define IS_HSW_GT2(devid)      (devid == PCI_CHIP_HASWELL_GT2||        \
-                                 devid == PCI_CHIP_HASWELL_M_GT2||      \
-                                 devid == PCI_CHIP_HASWELL_S_GT2||      \
-                                devid == PCI_CHIP_HASWELL_B_GT2 || \
-                                devid == PCI_CHIP_HASWELL_E_GT2 || \
-                                 devid == PCI_CHIP_HASWELL_SDV_GT2||    \
-                                 devid == PCI_CHIP_HASWELL_SDV_M_GT2||  \
-                                 devid == PCI_CHIP_HASWELL_SDV_S_GT2||  \
-                                devid == PCI_CHIP_HASWELL_SDV_B_GT2 || \
-                                devid == PCI_CHIP_HASWELL_SDV_E_GT2 || \
-                                 devid == PCI_CHIP_HASWELL_CRW_GT2||    \
-                                 devid == PCI_CHIP_HASWELL_CRW_M_GT2||  \
-                                 devid == PCI_CHIP_HASWELL_CRW_S_GT2||  \
-                                devid == PCI_CHIP_HASWELL_CRW_B_GT2|| \
-                                devid == PCI_CHIP_HASWELL_CRW_E_GT2|| \
-                                 devid == PCI_CHIP_HASWELL_ULT_GT2||    \
-                                 devid == PCI_CHIP_HASWELL_ULT_M_GT2||  \
-                                 devid == PCI_CHIP_HASWELL_ULT_S_GT2||  \
-                                devid == PCI_CHIP_HASWELL_ULT_B_GT2 || \
-                                devid == PCI_CHIP_HASWELL_ULT_E_GT2)
-
-
-#define IS_HSW_GT3(devid)      (devid == PCI_CHIP_HASWELL_GT3          || \
-                                 devid == PCI_CHIP_HASWELL_M_GT3        || \
-                                 devid == PCI_CHIP_HASWELL_S_GT3        || \
-                                devid == PCI_CHIP_HASWELL_B_GT3        || \
-                                devid == PCI_CHIP_HASWELL_E_GT3        || \
-                                 devid == PCI_CHIP_HASWELL_SDV_GT3      || \
-                                 devid == PCI_CHIP_HASWELL_SDV_M_GT3    || \
-                                 devid == PCI_CHIP_HASWELL_SDV_S_GT3    || \
-                                devid == PCI_CHIP_HASWELL_SDV_B_GT3    || \
-                                devid == PCI_CHIP_HASWELL_SDV_E_GT3    || \
-                                 devid == PCI_CHIP_HASWELL_CRW_GT3      || \
-                                 devid == PCI_CHIP_HASWELL_CRW_M_GT3    || \
-                                 devid == PCI_CHIP_HASWELL_CRW_S_GT3    || \
-                                devid == PCI_CHIP_HASWELL_CRW_B_GT3    || \
-                                devid == PCI_CHIP_HASWELL_CRW_E_GT3    || \
-                                 devid == PCI_CHIP_HASWELL_ULT_GT3      || \
-                                 devid == PCI_CHIP_HASWELL_ULT_M_GT3    || \
-                                 devid == PCI_CHIP_HASWELL_ULT_S_GT3    || \
-                                devid == PCI_CHIP_HASWELL_ULT_B_GT3    || \
-                                devid == PCI_CHIP_HASWELL_ULT_E_GT3)
-
-#define IS_HASWELL(devid)       (IS_HSW_GT1(devid) || \
-                                 IS_HSW_GT2(devid) || \
-                                 IS_HSW_GT3(devid))
-
-#define IS_GEN7(devid)          (IS_IVYBRIDGE(devid) || \
-                                 IS_HASWELL(devid))
-
-
-#define IS_BDW_GT1(devid)       (devid == PCI_CHIP_BROADWELL_M_GT1_1 || \
-                                 devid == PCI_CHIP_BROADWELL_M_GT1_2 || \
-                                 devid == PCI_CHIP_BROADWELL_M_GT1_3 || \
-                                 devid == PCI_CHIP_BROADWELL_D_GT1_1 || \
-                                 devid == PCI_CHIP_BROADWELL_D_GT1_2 || \
-                                 devid == PCI_CHIP_BROADWELL_MS_GT1)
-
-#define IS_BDW_GT2(devid)       (devid == PCI_CHIP_BROADWELL_M_GT2_1 || \
-                                 devid == PCI_CHIP_BROADWELL_M_GT2_2 || \
-                                 devid == PCI_CHIP_BROADWELL_M_GT2_3 || \
-                                 devid == PCI_CHIP_BROADWELL_D_GT2_1 || \
-                                 devid == PCI_CHIP_BROADWELL_D_GT2_2 || \
-                                 devid == PCI_CHIP_BROADWELL_MS_GT2)
-
-#define IS_BDW_GT2PLUS(devid)   (devid == PCI_CHIP_BROADWELL_M_GT2PLUS_1 || \
-                                 devid == PCI_CHIP_BROADWELL_M_GT2PLUS_2 || \
-                                 devid == PCI_CHIP_BROADWELL_M_GT2PLUS_3 || \
-                                 devid == PCI_CHIP_BROADWELL_D_GT2PLUS_1 || \
-                                 devid == PCI_CHIP_BROADWELL_D_GT2PLUS_2 || \
-                                 devid == PCI_CHIP_BROADWELL_MS_GT2PLUS)
-
-#define IS_GEN8(devid)          (IS_BDW_GT1(devid) ||   \
-                                 IS_BDW_GT2(devid) ||   \
-                                 IS_BDW_GT2PLUS(devid))
+#define IS_G4X(device_info)             (device_info->is_g4x)
+
+#define IS_IRONLAKE(device_info)        (device_info->gen == 5)
+
+#define IS_GEN6(device_info)            (device_info->gen == 6)
+
+#define IS_HASWELL(device_info)         (device_info->is_haswell)
+#define IS_GEN7(device_info)            (device_info->gen == 7)
+
+#define IS_GEN8(device_info)            (device_info->gen == 8)
 
 #endif /* _INTEL_DRIVER_H_ */