2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Xiang Haihao <haihao.xiang@intel.com>
26 * Zhao Yakui <yakui.zhao@intel.com>
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "i965_drv_video.h"
40 #include "i965_post_processing.h"
41 #include "i965_render.h"
42 #include "intel_media.h"
44 #define HAS_PP(ctx) (IS_IRONLAKE((ctx)->intel.device_info) || \
45 IS_GEN6((ctx)->intel.device_info) || \
46 IS_GEN7((ctx)->intel.device_info) || \
47 IS_GEN8((ctx)->intel.device_info))
50 #define SURFACE_STATE_PADDED_SIZE SURFACE_STATE_PADDED_SIZE_GEN8
52 #define SURFACE_STATE_OFFSET(index) (SURFACE_STATE_PADDED_SIZE * index)
53 #define BINDING_TABLE_OFFSET SURFACE_STATE_OFFSET(MAX_PP_SURFACES)
55 #define GPU_ASM_BLOCK_WIDTH 16
56 #define GPU_ASM_BLOCK_HEIGHT 8
57 #define GPU_ASM_X_OFFSET_ALIGNMENT 4
59 #define VA_STATUS_SUCCESS_1 0xFFFFFFFE
61 static VAStatus pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
62 const struct i965_surface *src_surface,
63 const VARectangle *src_rect,
64 struct i965_surface *dst_surface,
65 const VARectangle *dst_rect,
68 static VAStatus gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
69 const struct i965_surface *src_surface,
70 const VARectangle *src_rect,
71 struct i965_surface *dst_surface,
72 const VARectangle *dst_rect,
75 /* TODO: Modify the shader and then compile it again.
76 * Currently it is derived from Haswell*/
77 static const uint32_t pp_null_gen8[][4] = {
80 static const uint32_t pp_nv12_load_save_nv12_gen8[][4] = {
81 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
84 static const uint32_t pp_nv12_load_save_pl3_gen8[][4] = {
85 #include "shaders/post_processing/gen8/pl2_to_pl3.g8b"
88 static const uint32_t pp_pl3_load_save_nv12_gen8[][4] = {
89 #include "shaders/post_processing/gen8/pl3_to_pl2.g8b"
92 static const uint32_t pp_pl3_load_save_pl3_gen8[][4] = {
93 #include "shaders/post_processing/gen8/pl3_to_pl3.g8b"
96 static const uint32_t pp_nv12_scaling_gen8[][4] = {
97 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
100 static const uint32_t pp_nv12_avs_gen8[][4] = {
101 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
104 static const uint32_t pp_nv12_dndi_gen8[][4] = {
105 // #include "shaders/post_processing/gen7/dndi.g75b"
108 static const uint32_t pp_nv12_dn_gen8[][4] = {
109 // #include "shaders/post_processing/gen7/nv12_dn_nv12.g75b"
111 static const uint32_t pp_nv12_load_save_pa_gen8[][4] = {
112 #include "shaders/post_processing/gen8/pl2_to_pa.g8b"
114 static const uint32_t pp_pl3_load_save_pa_gen8[][4] = {
115 #include "shaders/post_processing/gen8/pl3_to_pa.g8b"
117 static const uint32_t pp_pa_load_save_nv12_gen8[][4] = {
118 #include "shaders/post_processing/gen8/pa_to_pl2.g8b"
120 static const uint32_t pp_pa_load_save_pl3_gen8[][4] = {
121 #include "shaders/post_processing/gen8/pa_to_pl3.g8b"
123 static const uint32_t pp_pa_load_save_pa_gen8[][4] = {
124 #include "shaders/post_processing/gen8/pa_to_pa.g8b"
126 static const uint32_t pp_rgbx_load_save_nv12_gen8[][4] = {
127 #include "shaders/post_processing/gen8/rgbx_to_nv12.g8b"
129 static const uint32_t pp_nv12_load_save_rgbx_gen8[][4] = {
130 #include "shaders/post_processing/gen8/pl2_to_rgbx.g8b"
133 static struct pp_module pp_modules_gen8[] = {
136 "NULL module (for testing)",
139 sizeof(pp_null_gen8),
149 PP_NV12_LOAD_SAVE_N12,
150 pp_nv12_load_save_nv12_gen8,
151 sizeof(pp_nv12_load_save_nv12_gen8),
155 gen8_pp_plx_avs_initialize,
161 PP_NV12_LOAD_SAVE_PL3,
162 pp_nv12_load_save_pl3_gen8,
163 sizeof(pp_nv12_load_save_pl3_gen8),
166 gen8_pp_plx_avs_initialize,
172 PP_PL3_LOAD_SAVE_N12,
173 pp_pl3_load_save_nv12_gen8,
174 sizeof(pp_pl3_load_save_nv12_gen8),
178 gen8_pp_plx_avs_initialize,
184 PP_PL3_LOAD_SAVE_N12,
185 pp_pl3_load_save_pl3_gen8,
186 sizeof(pp_pl3_load_save_pl3_gen8),
190 gen8_pp_plx_avs_initialize,
195 "NV12 Scaling module",
197 pp_nv12_scaling_gen8,
198 sizeof(pp_nv12_scaling_gen8),
202 gen8_pp_plx_avs_initialize,
210 sizeof(pp_nv12_avs_gen8),
214 gen8_pp_plx_avs_initialize,
222 sizeof(pp_nv12_dndi_gen8),
234 sizeof(pp_nv12_dn_gen8),
243 PP_NV12_LOAD_SAVE_PA,
244 pp_nv12_load_save_pa_gen8,
245 sizeof(pp_nv12_load_save_pa_gen8),
249 gen8_pp_plx_avs_initialize,
256 pp_pl3_load_save_pa_gen8,
257 sizeof(pp_pl3_load_save_pa_gen8),
261 gen8_pp_plx_avs_initialize,
267 PP_PA_LOAD_SAVE_NV12,
268 pp_pa_load_save_nv12_gen8,
269 sizeof(pp_pa_load_save_nv12_gen8),
273 gen8_pp_plx_avs_initialize,
280 pp_pa_load_save_pl3_gen8,
281 sizeof(pp_pa_load_save_pl3_gen8),
285 gen8_pp_plx_avs_initialize,
292 pp_pa_load_save_pa_gen8,
293 sizeof(pp_pa_load_save_pa_gen8),
297 gen8_pp_plx_avs_initialize,
303 PP_RGBX_LOAD_SAVE_NV12,
304 pp_rgbx_load_save_nv12_gen8,
305 sizeof(pp_rgbx_load_save_nv12_gen8),
309 gen8_pp_plx_avs_initialize,
315 PP_NV12_LOAD_SAVE_RGBX,
316 pp_nv12_load_save_rgbx_gen8,
317 sizeof(pp_nv12_load_save_rgbx_gen8),
321 gen8_pp_plx_avs_initialize,
326 pp_get_surface_fourcc(VADriverContextP ctx, const struct i965_surface *surface)
330 if (surface->type == I965_SURFACE_TYPE_IMAGE) {
331 struct object_image *obj_image = (struct object_image *)surface->base;
332 fourcc = obj_image->image.format.fourcc;
334 struct object_surface *obj_surface = (struct object_surface *)surface->base;
335 fourcc = obj_surface->fourcc;
342 gen8_pp_set_surface_tiling(struct gen8_surface_state *ss, unsigned int tiling)
345 case I915_TILING_NONE:
346 ss->ss0.tiled_surface = 0;
347 ss->ss0.tile_walk = 0;
350 ss->ss0.tiled_surface = 1;
351 ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
354 ss->ss0.tiled_surface = 1;
355 ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
361 gen8_pp_set_surface2_tiling(struct gen8_surface_state2 *ss, unsigned int tiling)
364 case I915_TILING_NONE:
365 ss->ss2.tiled_surface = 0;
366 ss->ss2.tile_walk = 0;
369 ss->ss2.tiled_surface = 1;
370 ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
373 ss->ss2.tiled_surface = 1;
374 ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
381 gen8_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
382 dri_bo *surf_bo, unsigned long surf_bo_offset,
383 int width, int height, int pitch, int format,
384 int index, int is_target)
386 struct gen8_surface_state *ss;
389 unsigned int swizzle;
391 dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
392 ss_bo = pp_context->surface_state_binding_table.bo;
395 dri_bo_map(ss_bo, True);
396 assert(ss_bo->virtual);
397 ss = (struct gen8_surface_state *)((char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index));
398 memset(ss, 0, sizeof(*ss));
399 ss->ss0.surface_type = I965_SURFACE_2D;
400 ss->ss0.surface_format = format;
401 ss->ss8.base_addr = surf_bo->offset + surf_bo_offset;
402 ss->ss2.width = width - 1;
403 ss->ss2.height = height - 1;
404 ss->ss3.pitch = pitch - 1;
406 /* Always set 1(align 4 mode) per B-spec */
407 ss->ss0.vertical_alignment = 1;
408 ss->ss0.horizontal_alignment = 1;
410 gen8_pp_set_surface_tiling(ss, tiling);
411 gen8_render_set_surface_scs(ss);
412 dri_bo_emit_reloc(ss_bo,
413 I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
415 SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state, ss8),
417 ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
423 gen8_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
424 dri_bo *surf_bo, unsigned long surf_bo_offset,
425 int width, int height, int wpitch,
426 int xoffset, int yoffset,
427 int format, int interleave_chroma,
430 struct gen8_surface_state2 *ss2;
433 unsigned int swizzle;
435 dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
436 ss2_bo = pp_context->surface_state_binding_table.bo;
439 dri_bo_map(ss2_bo, True);
440 assert(ss2_bo->virtual);
441 ss2 = (struct gen8_surface_state2 *)((char *)ss2_bo->virtual + SURFACE_STATE_OFFSET(index));
442 memset(ss2, 0, sizeof(*ss2));
443 ss2->ss6.base_addr = surf_bo->offset + surf_bo_offset;
444 ss2->ss1.cbcr_pixel_offset_v_direction = 0;
445 ss2->ss1.width = width - 1;
446 ss2->ss1.height = height - 1;
447 ss2->ss2.pitch = wpitch - 1;
448 ss2->ss2.interleave_chroma = interleave_chroma;
449 ss2->ss2.surface_format = format;
450 ss2->ss3.x_offset_for_cb = xoffset;
451 ss2->ss3.y_offset_for_cb = yoffset;
452 gen8_pp_set_surface2_tiling(ss2, tiling);
453 dri_bo_emit_reloc(ss2_bo,
454 I915_GEM_DOMAIN_RENDER, 0,
456 SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state2, ss6),
458 ((unsigned int *)((char *)ss2_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
459 dri_bo_unmap(ss2_bo);
463 gen8_pp_set_media_rw_message_surface(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
464 const struct i965_surface *surface,
465 int base_index, int is_target,
466 int *width, int *height, int *pitch, int *offset)
468 struct object_surface *obj_surface;
469 struct object_image *obj_image;
471 int fourcc = pp_get_surface_fourcc(ctx, surface);
472 const int U = (fourcc == VA_FOURCC_YV12 ||
473 fourcc == VA_FOURCC_YV16 ||
474 fourcc == VA_FOURCC_IMC1) ? 2 : 1;
475 const int V = (fourcc == VA_FOURCC_YV12 ||
476 fourcc == VA_FOURCC_YV16 ||
477 fourcc == VA_FOURCC_IMC1) ? 1 : 2;
478 int interleaved_uv = fourcc == VA_FOURCC_NV12;
479 int packed_yuv = (fourcc == VA_FOURCC_YUY2 || fourcc == VA_FOURCC_UYVY);
480 int rgbx_format = (fourcc == VA_FOURCC_RGBA ||
481 fourcc == VA_FOURCC_RGBX ||
482 fourcc == VA_FOURCC_BGRA ||
483 fourcc == VA_FOURCC_BGRX);
485 if (surface->type == I965_SURFACE_TYPE_SURFACE) {
486 obj_surface = (struct object_surface *)surface->base;
487 bo = obj_surface->bo;
488 width[0] = obj_surface->orig_width;
489 height[0] = obj_surface->orig_height;
490 pitch[0] = obj_surface->width;
495 width[0] = obj_surface->orig_width * 2; /* surface format is R8, so double the width */
497 width[0] = obj_surface->orig_width; /* surface foramt is YCBCR, width is specified in units of pixels */
499 } else if (rgbx_format) {
501 width[0] = obj_surface->orig_width * 4; /* surface format is R8, so quad the width */
504 width[1] = obj_surface->cb_cr_width;
505 height[1] = obj_surface->cb_cr_height;
506 pitch[1] = obj_surface->cb_cr_pitch;
507 offset[1] = obj_surface->y_cb_offset * obj_surface->width;
509 width[2] = obj_surface->cb_cr_width;
510 height[2] = obj_surface->cb_cr_height;
511 pitch[2] = obj_surface->cb_cr_pitch;
512 offset[2] = obj_surface->y_cr_offset * obj_surface->width;
514 obj_image = (struct object_image *)surface->base;
516 width[0] = obj_image->image.width;
517 height[0] = obj_image->image.height;
518 pitch[0] = obj_image->image.pitches[0];
519 offset[0] = obj_image->image.offsets[0];
523 width[0] = obj_image->image.width * 4; /* surface format is R8, so quad the width */
524 } else if (packed_yuv) {
526 width[0] = obj_image->image.width * 2; /* surface format is R8, so double the width */
528 width[0] = obj_image->image.width; /* surface foramt is YCBCR, width is specified in units of pixels */
529 } else if (interleaved_uv) {
530 width[1] = obj_image->image.width / 2;
531 height[1] = obj_image->image.height / 2;
532 pitch[1] = obj_image->image.pitches[1];
533 offset[1] = obj_image->image.offsets[1];
535 width[1] = obj_image->image.width / 2;
536 height[1] = obj_image->image.height / 2;
537 pitch[1] = obj_image->image.pitches[U];
538 offset[1] = obj_image->image.offsets[U];
539 width[2] = obj_image->image.width / 2;
540 height[2] = obj_image->image.height / 2;
541 pitch[2] = obj_image->image.pitches[V];
542 offset[2] = obj_image->image.offsets[V];
543 if (fourcc == VA_FOURCC_YV16 || fourcc == VA_FOURCC_422H) {
544 width[1] = obj_image->image.width / 2;
545 height[1] = obj_image->image.height;
546 width[2] = obj_image->image.width / 2;
547 height[2] = obj_image->image.height;
553 gen8_pp_set_surface_state(ctx, pp_context,
555 width[0] / 4, height[0], pitch[0],
556 I965_SURFACEFORMAT_R8_UINT,
559 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
560 /* the format is MSB: X-B-G-R */
561 pp_static_parameter->grf2.save_avs_rgb_swap = 0;
562 if ((fourcc == VA_FOURCC_BGRA) ||
563 (fourcc == VA_FOURCC_BGRX)) {
564 /* It is stored as MSB: X-R-G-B */
565 pp_static_parameter->grf2.save_avs_rgb_swap = 1;
568 if (!packed_yuv && !rgbx_format) {
569 if (interleaved_uv) {
570 gen8_pp_set_surface_state(ctx, pp_context,
572 width[1] / 2, height[1], pitch[1],
573 I965_SURFACEFORMAT_R8G8_SINT,
576 gen8_pp_set_surface_state(ctx, pp_context,
578 width[1] / 4, height[1], pitch[1],
579 I965_SURFACEFORMAT_R8_SINT,
581 gen8_pp_set_surface_state(ctx, pp_context,
583 width[2] / 4, height[2], pitch[2],
584 I965_SURFACEFORMAT_R8_SINT,
589 int format0 = SURFACE_FORMAT_Y8_UNORM;
593 format0 = SURFACE_FORMAT_YCRCB_NORMAL;
597 format0 = SURFACE_FORMAT_YCRCB_SWAPY;
604 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
605 /* Only R8G8B8A8_UNORM is supported for BGRX or RGBX */
606 format0 = SURFACE_FORMAT_R8G8B8A8_UNORM;
607 pp_static_parameter->grf2.src_avs_rgb_swap = 0;
608 if ((fourcc == VA_FOURCC_BGRA) ||
609 (fourcc == VA_FOURCC_BGRX)) {
610 pp_static_parameter->grf2.src_avs_rgb_swap = 1;
613 gen8_pp_set_surface2_state(ctx, pp_context,
615 width[0], height[0], pitch[0],
620 if (!packed_yuv && !rgbx_format) {
621 if (interleaved_uv) {
622 gen8_pp_set_surface2_state(ctx, pp_context,
624 width[1], height[1], pitch[1],
626 SURFACE_FORMAT_R8B8_UNORM, 0,
629 gen8_pp_set_surface2_state(ctx, pp_context,
631 width[1], height[1], pitch[1],
633 SURFACE_FORMAT_R8_UNORM, 0,
635 gen8_pp_set_surface2_state(ctx, pp_context,
637 width[2], height[2], pitch[2],
639 SURFACE_FORMAT_R8_UNORM, 0,
647 pp_null_x_steps(void *private_context)
653 pp_null_y_steps(void *private_context)
659 pp_null_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
665 pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
666 const struct i965_surface *src_surface,
667 const VARectangle *src_rect,
668 struct i965_surface *dst_surface,
669 const VARectangle *dst_rect,
672 /* private function & data */
673 pp_context->pp_x_steps = pp_null_x_steps;
674 pp_context->pp_y_steps = pp_null_y_steps;
675 pp_context->private_context = NULL;
676 pp_context->pp_set_block_parameter = pp_null_set_block_parameter;
678 dst_surface->flags = src_surface->flags;
680 return VA_STATUS_SUCCESS;
683 static void calculate_boundary_block_mask(struct i965_post_processing_context *pp_context, const VARectangle *dst_rect)
685 int i, dst_width_adjust;
686 /* x offset of dest surface must be dword aligned.
687 * so we have to extend dst surface on left edge, and mask out pixels not interested
689 if (dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT) {
690 pp_context->block_horizontal_mask_left = 0;
691 for (i=dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT; i<GPU_ASM_BLOCK_WIDTH; i++)
693 pp_context->block_horizontal_mask_left |= 1<<i;
697 pp_context->block_horizontal_mask_left = 0xffff;
700 dst_width_adjust = dst_rect->width + dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT;
701 if (dst_width_adjust%GPU_ASM_BLOCK_WIDTH){
702 pp_context->block_horizontal_mask_right = (1 << (dst_width_adjust%GPU_ASM_BLOCK_WIDTH)) - 1;
705 pp_context->block_horizontal_mask_right = 0xffff;
708 if (dst_rect->height%GPU_ASM_BLOCK_HEIGHT){
709 pp_context->block_vertical_mask_bottom = (1 << (dst_rect->height%GPU_ASM_BLOCK_HEIGHT)) - 1;
712 pp_context->block_vertical_mask_bottom = 0xff;
718 gen7_pp_avs_x_steps(void *private_context)
720 struct pp_avs_context *pp_avs_context = private_context;
722 return pp_avs_context->dest_w / 16;
726 gen7_pp_avs_y_steps(void *private_context)
728 struct pp_avs_context *pp_avs_context = private_context;
730 return pp_avs_context->dest_h / 16;
734 gen7_pp_avs_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
736 struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)pp_context->private_context;
737 struct gen7_pp_inline_parameter *pp_inline_parameter = pp_context->pp_inline_parameter;
739 pp_inline_parameter->grf7.destination_block_horizontal_origin = x * 16 + pp_avs_context->dest_x;
740 pp_inline_parameter->grf7.destination_block_vertical_origin = y * 16 + pp_avs_context->dest_y;
741 pp_inline_parameter->grf7.constant_0 = 0xffffffff;
742 pp_inline_parameter->grf7.sampler_load_main_video_x_scaling_step = pp_avs_context->horiz_range / pp_avs_context->src_w;
747 static void gen7_update_src_surface_uv_offset(VADriverContextP ctx,
748 struct i965_post_processing_context *pp_context,
749 const struct i965_surface *surface)
751 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
752 int fourcc = pp_get_surface_fourcc(ctx, surface);
754 if (fourcc == VA_FOURCC_YUY2) {
755 pp_static_parameter->grf2.di_destination_packed_y_component_offset = 0;
756 pp_static_parameter->grf2.di_destination_packed_u_component_offset = 1;
757 pp_static_parameter->grf2.di_destination_packed_v_component_offset = 3;
758 } else if (fourcc == VA_FOURCC_UYVY) {
759 pp_static_parameter->grf2.di_destination_packed_y_component_offset = 1;
760 pp_static_parameter->grf2.di_destination_packed_u_component_offset = 0;
761 pp_static_parameter->grf2.di_destination_packed_v_component_offset = 2;
766 gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
767 const struct i965_surface *src_surface,
768 const VARectangle *src_rect,
769 struct i965_surface *dst_surface,
770 const VARectangle *dst_rect,
773 /* TODO: Add the sampler_8x8 state */
774 struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->pp_avs_context;
775 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
776 struct gen8_sampler_8x8_avs *sampler_8x8;
777 struct i965_sampler_8x8_coefficient *sampler_8x8_state;
779 int width[3], height[3], pitch[3], offset[3];
780 int src_width, src_height;
781 unsigned char *cc_ptr;
783 memset(pp_static_parameter, 0, sizeof(struct gen7_pp_static_parameter));
786 gen8_pp_set_media_rw_message_surface(ctx, pp_context, src_surface, 0, 0,
787 width, height, pitch, offset);
788 src_height = height[0];
789 src_width = width[0];
791 /* destination surface */
792 gen8_pp_set_media_rw_message_surface(ctx, pp_context, dst_surface, 24, 1,
793 width, height, pitch, offset);
795 /* sampler 8x8 state */
796 dri_bo_map(pp_context->dynamic_state.bo, True);
797 assert(pp_context->dynamic_state.bo->virtual);
799 cc_ptr = (unsigned char *) pp_context->dynamic_state.bo->virtual +
800 pp_context->sampler_offset;
801 /* Currently only one gen8 sampler_8x8 is initialized */
802 sampler_8x8 = (struct gen8_sampler_8x8_avs *) cc_ptr;
803 memset(sampler_8x8, 0, sizeof(*sampler_8x8));
805 sampler_8x8->dw0.gain_factor = 44;
806 sampler_8x8->dw0.weak_edge_threshold = 1;
807 sampler_8x8->dw0.strong_edge_threshold = 8;
808 /* Use the value like that on Ivy instead of default
809 * sampler_8x8->dw0.r3x_coefficient = 5;
811 sampler_8x8->dw0.r3x_coefficient = 27;
812 sampler_8x8->dw0.r3c_coefficient = 5;
814 sampler_8x8->dw2.global_noise_estimation = 255;
815 sampler_8x8->dw2.non_edge_weight = 1;
816 sampler_8x8->dw2.regular_weight = 2;
817 sampler_8x8->dw2.strong_edge_weight = 7;
818 /* Use the value like that on Ivy instead of default
819 * sampler_8x8->dw2.r5x_coefficient = 7;
820 * sampler_8x8->dw2.r5cx_coefficient = 7;
821 * sampler_8x8->dw2.r5c_coefficient = 7;
823 sampler_8x8->dw2.r5x_coefficient = 9;
824 sampler_8x8->dw2.r5cx_coefficient = 8;
825 sampler_8x8->dw2.r5c_coefficient = 3;
827 sampler_8x8->dw3.sin_alpha = 101; /* sin_alpha = 0 */
828 sampler_8x8->dw3.cos_alpha = 79; /* cos_alpha = 0 */
829 sampler_8x8->dw3.sat_max = 0x1f;
830 sampler_8x8->dw3.hue_max = 14;
831 /* The 8tap filter will determine whether the adaptive Filter is
832 * applied for all channels(dw153).
833 * If the 8tap filter is disabled, the adaptive filter should be disabled.
834 * Only when 8tap filter is enabled, it can be enabled or not.
836 sampler_8x8->dw3.enable_8tap_filter = 3;
837 sampler_8x8->dw3.ief4_smooth_enable = 0;
839 sampler_8x8->dw4.s3u = 0;
840 sampler_8x8->dw4.diamond_margin = 4;
841 sampler_8x8->dw4.vy_std_enable = 0;
842 sampler_8x8->dw4.umid = 110;
843 sampler_8x8->dw4.vmid = 154;
845 sampler_8x8->dw5.diamond_dv = 0;
846 sampler_8x8->dw5.diamond_th = 35;
847 sampler_8x8->dw5.diamond_alpha = 100; /* diamond_alpha = 0 */
848 sampler_8x8->dw5.hs_margin = 3;
849 sampler_8x8->dw5.diamond_du = 2;
851 sampler_8x8->dw6.y_point1 = 46;
852 sampler_8x8->dw6.y_point2 = 47;
853 sampler_8x8->dw6.y_point3 = 254;
854 sampler_8x8->dw6.y_point4 = 255;
856 sampler_8x8->dw7.inv_margin_vyl = 3300; /* inv_margin_vyl = 0 */
858 sampler_8x8->dw8.inv_margin_vyu = 1600; /* inv_margin_vyu = 0 */
859 sampler_8x8->dw8.p0l = 46;
860 sampler_8x8->dw8.p1l = 216;
862 sampler_8x8->dw9.p2l = 236;
863 sampler_8x8->dw9.p3l = 236;
864 sampler_8x8->dw9.b0l = 133;
865 sampler_8x8->dw9.b1l = 130;
867 sampler_8x8->dw10.b2l = 130;
868 sampler_8x8->dw10.b3l = 130;
869 /* s0l = -5 / 256. s2.8 */
870 sampler_8x8->dw10.s0l = 1029; /* s0l = 0 */
871 sampler_8x8->dw10.y_slope2 = 31; /* y_slop2 = 0 */
873 sampler_8x8->dw11.s1l = 0;
874 sampler_8x8->dw11.s2l = 0;
876 sampler_8x8->dw12.s3l = 0;
877 sampler_8x8->dw12.p0u = 46;
878 sampler_8x8->dw12.p1u = 66;
879 sampler_8x8->dw12.y_slope1 = 31; /* y_slope1 = 0 */
881 sampler_8x8->dw13.p2u = 130;
882 sampler_8x8->dw13.p3u = 236;
883 sampler_8x8->dw13.b0u = 143;
884 sampler_8x8->dw13.b1u = 163;
886 sampler_8x8->dw14.b2u = 200;
887 sampler_8x8->dw14.b3u = 140;
888 sampler_8x8->dw14.s0u = 256; /* s0u = 0 */
890 sampler_8x8->dw15.s1u = 113; /* s1u = 0 */
891 sampler_8x8->dw15.s2u = 1203; /* s2u = 0 */
893 sampler_8x8_state = sampler_8x8->coefficients;
895 for (i = 0; i < 17; i++) {
900 memset(sampler_8x8_state, 0, sizeof(*sampler_8x8_state));
901 /* for Y channel, currently ignore */
902 sampler_8x8_state->dw0.table_0x_filter_c0 = 0x0;
903 sampler_8x8_state->dw0.table_0x_filter_c1 = 0x0;
904 sampler_8x8_state->dw0.table_0x_filter_c2 = 0x0;
905 sampler_8x8_state->dw0.table_0x_filter_c3 =
906 intel_format_convert(1 - coff, 1, 6, 0);
907 sampler_8x8_state->dw1.table_0x_filter_c4 =
908 intel_format_convert(coff, 1, 6, 0);
909 sampler_8x8_state->dw1.table_0x_filter_c5 = 0x0;
910 sampler_8x8_state->dw1.table_0x_filter_c6 = 0x0;
911 sampler_8x8_state->dw1.table_0x_filter_c7 = 0x0;
912 sampler_8x8_state->dw2.table_0y_filter_c0 = 0x0;
913 sampler_8x8_state->dw2.table_0y_filter_c1 = 0x0;
914 sampler_8x8_state->dw2.table_0y_filter_c2 = 0x0;
915 sampler_8x8_state->dw2.table_0y_filter_c3 =
916 intel_format_convert(1 - coff, 1, 6, 0);
917 sampler_8x8_state->dw3.table_0y_filter_c4 =
918 intel_format_convert(coff, 1, 6, 0);
919 sampler_8x8_state->dw3.table_0y_filter_c5 = 0x0;
920 sampler_8x8_state->dw3.table_0y_filter_c6 = 0x0;
921 sampler_8x8_state->dw3.table_0y_filter_c7 = 0x0;
922 /* for U/V channel, 0.25 */
923 sampler_8x8_state->dw4.table_1x_filter_c0 = 0x0;
924 sampler_8x8_state->dw4.table_1x_filter_c1 = 0x0;
925 sampler_8x8_state->dw4.table_1x_filter_c2 = 0x0;
926 sampler_8x8_state->dw4.table_1x_filter_c3 =
927 intel_format_convert(1 - coff, 1, 6, 0);
928 sampler_8x8_state->dw5.table_1x_filter_c4 =
929 intel_format_convert(coff, 1, 6, 0);
930 sampler_8x8_state->dw5.table_1x_filter_c5 = 0x00;
931 sampler_8x8_state->dw5.table_1x_filter_c6 = 0x0;
932 sampler_8x8_state->dw5.table_1x_filter_c7 = 0x0;
933 sampler_8x8_state->dw6.table_1y_filter_c0 = 0x0;
934 sampler_8x8_state->dw6.table_1y_filter_c1 = 0x0;
935 sampler_8x8_state->dw6.table_1y_filter_c2 = 0x0;
936 sampler_8x8_state->dw6.table_1y_filter_c3 =
937 intel_format_convert(1 - coff, 1, 6, 0);
938 sampler_8x8_state->dw7.table_1y_filter_c4 =
939 intel_format_convert(coff, 1, 6,0);
940 sampler_8x8_state->dw7.table_1y_filter_c5 = 0x0;
941 sampler_8x8_state->dw7.table_1y_filter_c6 = 0x0;
942 sampler_8x8_state->dw7.table_1y_filter_c7 = 0x0;
946 sampler_8x8->dw152.default_sharpness_level = 0;
947 sampler_8x8->dw153.adaptive_filter_for_all_channel = 1;
948 sampler_8x8->dw153.bypass_y_adaptive_filtering = 1;
949 sampler_8x8->dw153.bypass_x_adaptive_filtering = 1;
951 dri_bo_unmap(pp_context->dynamic_state.bo);
954 /* private function & data */
955 pp_context->pp_x_steps = gen7_pp_avs_x_steps;
956 pp_context->pp_y_steps = gen7_pp_avs_y_steps;
957 pp_context->private_context = &pp_context->pp_avs_context;
958 pp_context->pp_set_block_parameter = gen7_pp_avs_set_block_parameter;
960 pp_avs_context->dest_x = dst_rect->x;
961 pp_avs_context->dest_y = dst_rect->y;
962 pp_avs_context->dest_w = ALIGN(dst_rect->width, 16);
963 pp_avs_context->dest_h = ALIGN(dst_rect->height, 16);
964 pp_avs_context->src_w = src_rect->width;
965 pp_avs_context->src_h = src_rect->height;
966 pp_avs_context->horiz_range = (float)src_rect->width / src_width;
968 int dw = (pp_avs_context->src_w - 1) / 16 + 1;
969 dw = MAX(dw, dst_rect->width);
971 pp_static_parameter->grf1.pointer_to_inline_parameter = 7;
972 pp_static_parameter->grf2.avs_wa_enable = 0; /* It is not required on GEN8+ */
973 pp_static_parameter->grf2.avs_wa_width = src_width;
974 pp_static_parameter->grf2.avs_wa_one_div_256_width = (float) 1.0 / (256 * src_width);
975 pp_static_parameter->grf2.avs_wa_five_div_256_width = (float) 5.0 / (256 * src_width);
976 pp_static_parameter->grf2.alpha = 255;
978 pp_static_parameter->grf3.sampler_load_horizontal_scaling_step_ratio = (float) pp_avs_context->src_w / dw;
979 pp_static_parameter->grf4.sampler_load_vertical_scaling_step = (float) src_rect->height / src_height / dst_rect->height;
980 pp_static_parameter->grf5.sampler_load_vertical_frame_origin = (float) src_rect->y / src_height -
981 (float) pp_avs_context->dest_y * pp_static_parameter->grf4.sampler_load_vertical_scaling_step;
982 pp_static_parameter->grf6.sampler_load_horizontal_frame_origin = (float) src_rect->x / src_width -
983 (float) pp_avs_context->dest_x * pp_avs_context->horiz_range / dw;
985 gen7_update_src_surface_uv_offset(ctx, pp_context, dst_surface);
987 dst_surface->flags = src_surface->flags;
989 return VA_STATUS_SUCCESS;
994 VADriverContextP ctx,
995 struct i965_post_processing_context *pp_context,
996 const struct i965_surface *src_surface,
997 const VARectangle *src_rect,
998 struct i965_surface *dst_surface,
999 const VARectangle *dst_rect,
1005 struct i965_driver_data *i965 = i965_driver_data(ctx);
1008 unsigned int end_offset;
1009 struct pp_module *pp_module;
1010 int static_param_size, inline_param_size;
1012 dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1013 bo = dri_bo_alloc(i965->intel.bufmgr,
1014 "surface state & binding table",
1015 (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_PP_SURFACES,
1018 pp_context->surface_state_binding_table.bo = bo;
1020 pp_context->idrt.num_interface_descriptors = 0;
1022 pp_context->sampler_size = 2 * 4096;
1024 bo_size = 4096 + pp_context->curbe_size + pp_context->sampler_size
1025 + pp_context->idrt_size;
1027 dri_bo_unreference(pp_context->dynamic_state.bo);
1028 bo = dri_bo_alloc(i965->intel.bufmgr,
1034 pp_context->dynamic_state.bo = bo;
1035 pp_context->dynamic_state.bo_size = bo_size;
1038 pp_context->dynamic_state.end_offset = 0;
1040 /* Constant buffer offset */
1041 pp_context->curbe_offset = ALIGN(end_offset, 64);
1042 end_offset = pp_context->curbe_offset + pp_context->curbe_size;
1044 /* Interface descriptor offset */
1045 pp_context->idrt_offset = ALIGN(end_offset, 64);
1046 end_offset = pp_context->idrt_offset + pp_context->idrt_size;
1048 /* Sampler state offset */
1049 pp_context->sampler_offset = ALIGN(end_offset, 64);
1050 end_offset = pp_context->sampler_offset + pp_context->sampler_size;
1052 /* update the end offset of dynamic_state */
1053 pp_context->dynamic_state.end_offset = ALIGN(end_offset, 64);
1055 static_param_size = sizeof(struct gen7_pp_static_parameter);
1056 inline_param_size = sizeof(struct gen7_pp_inline_parameter);
1058 memset(pp_context->pp_static_parameter, 0, static_param_size);
1059 memset(pp_context->pp_inline_parameter, 0, inline_param_size);
1061 assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
1062 pp_context->current_pp = pp_index;
1063 pp_module = &pp_context->pp_modules[pp_index];
1065 if (pp_module->initialize)
1066 va_status = pp_module->initialize(ctx, pp_context,
1073 va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
1075 calculate_boundary_block_mask(pp_context, dst_rect);
1081 gen8_pp_interface_descriptor_table(VADriverContextP ctx,
1082 struct i965_post_processing_context *pp_context)
1084 struct gen8_interface_descriptor_data *desc;
1086 int pp_index = pp_context->current_pp;
1087 unsigned char *cc_ptr;
1089 bo = pp_context->dynamic_state.bo;
1092 assert(bo->virtual);
1093 cc_ptr = (unsigned char *)bo->virtual + pp_context->idrt_offset;
1095 desc = (struct gen8_interface_descriptor_data *) cc_ptr +
1096 pp_context->idrt.num_interface_descriptors;
1098 memset(desc, 0, sizeof(*desc));
1099 desc->desc0.kernel_start_pointer =
1100 pp_context->pp_modules[pp_index].kernel.kernel_offset >> 6; /* reloc */
1101 desc->desc2.single_program_flow = 1;
1102 desc->desc2.floating_point_mode = FLOATING_POINT_IEEE_754;
1103 desc->desc3.sampler_count = 0; /* 1 - 4 samplers used */
1104 desc->desc3.sampler_state_pointer = pp_context->sampler_offset >> 5;
1105 desc->desc4.binding_table_entry_count = 0;
1106 desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET >> 5);
1107 desc->desc5.constant_urb_entry_read_offset = 0;
1109 desc->desc5.constant_urb_entry_read_length = 6; /* grf 1-6 */
1112 pp_context->idrt.num_interface_descriptors++;
1117 gen8_pp_upload_constants(VADriverContextP ctx,
1118 struct i965_post_processing_context *pp_context)
1120 unsigned char *constant_buffer;
1123 assert(sizeof(struct gen7_pp_static_parameter) == 192);
1125 param_size = sizeof(struct gen7_pp_static_parameter);
1127 dri_bo_map(pp_context->dynamic_state.bo, 1);
1128 assert(pp_context->dynamic_state.bo->virtual);
1129 constant_buffer = (unsigned char *) pp_context->dynamic_state.bo->virtual +
1130 pp_context->curbe_offset;
1132 memcpy(constant_buffer, pp_context->pp_static_parameter, param_size);
1133 dri_bo_unmap(pp_context->dynamic_state.bo);
1138 gen8_pp_states_setup(VADriverContextP ctx,
1139 struct i965_post_processing_context *pp_context)
1141 gen8_pp_interface_descriptor_table(ctx, pp_context);
1142 gen8_pp_upload_constants(ctx, pp_context);
1146 gen6_pp_pipeline_select(VADriverContextP ctx,
1147 struct i965_post_processing_context *pp_context)
1149 struct intel_batchbuffer *batch = pp_context->batch;
1151 BEGIN_BATCH(batch, 1);
1152 OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
1153 ADVANCE_BATCH(batch);
1157 gen8_pp_state_base_address(VADriverContextP ctx,
1158 struct i965_post_processing_context *pp_context)
1160 struct intel_batchbuffer *batch = pp_context->batch;
1162 BEGIN_BATCH(batch, 16);
1163 OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (16 - 2));
1164 /* DW1 Generate state address */
1165 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1166 OUT_BATCH(batch, 0);
1167 OUT_BATCH(batch, 0);
1168 /* DW4. Surface state address */
1169 OUT_RELOC(batch, pp_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1170 OUT_BATCH(batch, 0);
1171 /* DW6. Dynamic state address */
1172 OUT_RELOC(batch, pp_context->dynamic_state.bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
1173 0, 0 | BASE_ADDRESS_MODIFY);
1174 OUT_BATCH(batch, 0);
1176 /* DW8. Indirect object address */
1177 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1178 OUT_BATCH(batch, 0);
1180 /* DW10. Instruction base address */
1181 OUT_RELOC(batch, pp_context->instruction_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1182 OUT_BATCH(batch, 0);
1184 OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1185 OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1186 OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1187 OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1188 ADVANCE_BATCH(batch);
1192 gen8_pp_vfe_state(VADriverContextP ctx,
1193 struct i965_post_processing_context *pp_context)
1195 struct intel_batchbuffer *batch = pp_context->batch;
1197 BEGIN_BATCH(batch, 9);
1198 OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (9 - 2));
1199 OUT_BATCH(batch, 0);
1200 OUT_BATCH(batch, 0);
1202 (pp_context->vfe_gpu_state.max_num_threads - 1) << 16 |
1203 pp_context->vfe_gpu_state.num_urb_entries << 8);
1204 OUT_BATCH(batch, 0);
1206 (pp_context->vfe_gpu_state.urb_entry_size) << 16 |
1207 /* URB Entry Allocation Size, in 256 bits unit */
1208 (pp_context->vfe_gpu_state.curbe_allocation_size));
1209 /* CURBE Allocation Size, in 256 bits unit */
1210 OUT_BATCH(batch, 0);
1211 OUT_BATCH(batch, 0);
1212 OUT_BATCH(batch, 0);
1213 ADVANCE_BATCH(batch);
1217 gen8_interface_descriptor_load(VADriverContextP ctx,
1218 struct i965_post_processing_context *pp_context)
1220 struct intel_batchbuffer *batch = pp_context->batch;
1222 BEGIN_BATCH(batch, 6);
1224 OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
1225 OUT_BATCH(batch, 0);
1227 OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
1228 OUT_BATCH(batch, 0);
1230 pp_context->idrt.num_interface_descriptors * sizeof(struct gen8_interface_descriptor_data));
1231 OUT_BATCH(batch, pp_context->idrt_offset);
1232 ADVANCE_BATCH(batch);
1236 gen8_pp_curbe_load(VADriverContextP ctx,
1237 struct i965_post_processing_context *pp_context)
1239 struct intel_batchbuffer *batch = pp_context->batch;
1240 struct i965_driver_data *i965 = i965_driver_data(ctx);
1241 int param_size = 64;
1243 if (IS_GEN8(i965->intel.device_info))
1244 param_size = sizeof(struct gen7_pp_static_parameter);
1246 BEGIN_BATCH(batch, 4);
1247 OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
1248 OUT_BATCH(batch, 0);
1251 OUT_BATCH(batch, pp_context->curbe_offset);
1252 ADVANCE_BATCH(batch);
1256 gen8_pp_object_walker(VADriverContextP ctx,
1257 struct i965_post_processing_context *pp_context)
1259 struct i965_driver_data *i965 = i965_driver_data(ctx);
1260 struct intel_batchbuffer *batch = pp_context->batch;
1261 int x, x_steps, y, y_steps;
1262 int param_size, command_length_in_dws, extra_cmd_in_dws;
1263 dri_bo *command_buffer;
1264 unsigned int *command_ptr;
1266 param_size = sizeof(struct gen7_pp_inline_parameter);
1267 if (IS_GEN8(i965->intel.device_info))
1268 param_size = sizeof(struct gen7_pp_inline_parameter);
1270 x_steps = pp_context->pp_x_steps(pp_context->private_context);
1271 y_steps = pp_context->pp_y_steps(pp_context->private_context);
1272 command_length_in_dws = 6 + (param_size >> 2);
1273 extra_cmd_in_dws = 2;
1274 command_buffer = dri_bo_alloc(i965->intel.bufmgr,
1275 "command objects buffer",
1276 (command_length_in_dws + extra_cmd_in_dws) * 4 * x_steps * y_steps + 64,
1279 dri_bo_map(command_buffer, 1);
1280 command_ptr = command_buffer->virtual;
1282 for (y = 0; y < y_steps; y++) {
1283 for (x = 0; x < x_steps; x++) {
1284 if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
1286 *command_ptr++ = (CMD_MEDIA_OBJECT | (command_length_in_dws - 2));
1292 memcpy(command_ptr, pp_context->pp_inline_parameter, param_size);
1293 command_ptr += (param_size >> 2);
1295 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1301 if ((command_length_in_dws + extra_cmd_in_dws) * x_steps * y_steps % 2 == 0)
1304 *command_ptr++ = MI_BATCH_BUFFER_END;
1307 dri_bo_unmap(command_buffer);
1309 if (IS_GEN8(i965->intel.device_info)) {
1310 BEGIN_BATCH(batch, 3);
1311 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
1312 OUT_RELOC(batch, command_buffer,
1313 I915_GEM_DOMAIN_COMMAND, 0, 0);
1314 OUT_BATCH(batch, 0);
1315 ADVANCE_BATCH(batch);
1318 dri_bo_unreference(command_buffer);
1320 /* Have to execute the batch buffer here becuase MI_BATCH_BUFFER_END
1321 * will cause control to pass back to ring buffer
1323 intel_batchbuffer_end_atomic(batch);
1324 intel_batchbuffer_flush(batch);
1325 intel_batchbuffer_start_atomic(batch, 0x1000);
1329 gen8_pp_pipeline_setup(VADriverContextP ctx,
1330 struct i965_post_processing_context *pp_context)
1332 struct intel_batchbuffer *batch = pp_context->batch;
1334 intel_batchbuffer_start_atomic(batch, 0x1000);
1335 intel_batchbuffer_emit_mi_flush(batch);
1336 gen6_pp_pipeline_select(ctx, pp_context);
1337 gen8_pp_state_base_address(ctx, pp_context);
1338 gen8_pp_vfe_state(ctx, pp_context);
1339 gen8_pp_curbe_load(ctx, pp_context);
1340 gen8_interface_descriptor_load(ctx, pp_context);
1341 gen8_pp_vfe_state(ctx, pp_context);
1342 gen8_pp_object_walker(ctx, pp_context);
1343 intel_batchbuffer_end_atomic(batch);
1347 gen8_post_processing(
1348 VADriverContextP ctx,
1349 struct i965_post_processing_context *pp_context,
1350 const struct i965_surface *src_surface,
1351 const VARectangle *src_rect,
1352 struct i965_surface *dst_surface,
1353 const VARectangle *dst_rect,
1360 va_status = gen8_pp_initialize(ctx, pp_context,
1368 if (va_status == VA_STATUS_SUCCESS) {
1369 gen8_pp_states_setup(ctx, pp_context);
1370 gen8_pp_pipeline_setup(ctx, pp_context);
1377 gen8_post_processing_context_finalize(struct i965_post_processing_context *pp_context)
1379 dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1380 pp_context->surface_state_binding_table.bo = NULL;
1382 dri_bo_unreference(pp_context->pp_dndi_context.stmm_bo);
1383 pp_context->pp_dndi_context.stmm_bo = NULL;
1385 dri_bo_unreference(pp_context->pp_dn_context.stmm_bo);
1386 pp_context->pp_dn_context.stmm_bo = NULL;
1388 if (pp_context->instruction_state.bo) {
1389 dri_bo_unreference(pp_context->instruction_state.bo);
1390 pp_context->instruction_state.bo = NULL;
1393 if (pp_context->indirect_state.bo) {
1394 dri_bo_unreference(pp_context->indirect_state.bo);
1395 pp_context->indirect_state.bo = NULL;
1398 if (pp_context->dynamic_state.bo) {
1399 dri_bo_unreference(pp_context->dynamic_state.bo);
1400 pp_context->dynamic_state.bo = NULL;
1403 free(pp_context->pp_static_parameter);
1404 free(pp_context->pp_inline_parameter);
1405 pp_context->pp_static_parameter = NULL;
1406 pp_context->pp_inline_parameter = NULL;
1409 #define VPP_CURBE_ALLOCATION_SIZE 32
1412 gen8_post_processing_context_init(VADriverContextP ctx,
1414 struct intel_batchbuffer *batch)
1416 struct i965_driver_data *i965 = i965_driver_data(ctx);
1418 unsigned int kernel_offset, end_offset;
1419 unsigned char *kernel_ptr;
1420 struct pp_module *pp_module;
1421 struct i965_post_processing_context *pp_context = data;
1424 pp_context->vfe_gpu_state.max_num_threads = 60;
1425 pp_context->vfe_gpu_state.num_urb_entries = 59;
1426 pp_context->vfe_gpu_state.gpgpu_mode = 0;
1427 pp_context->vfe_gpu_state.urb_entry_size = 16 - 1;
1428 pp_context->vfe_gpu_state.curbe_allocation_size = VPP_CURBE_ALLOCATION_SIZE;
1431 pp_context->intel_post_processing = gen8_post_processing;
1432 pp_context->finalize = gen8_post_processing_context_finalize;
1434 assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen8));
1436 if (IS_GEN8(i965->intel.device_info))
1437 memcpy(pp_context->pp_modules, pp_modules_gen8, sizeof(pp_context->pp_modules));
1439 /* should never get here !!! */
1443 kernel_size = 4096 ;
1445 for (i = 0; i < NUM_PP_MODULES; i++) {
1446 pp_module = &pp_context->pp_modules[i];
1448 if (pp_module->kernel.bin && pp_module->kernel.size) {
1449 kernel_size += pp_module->kernel.size;
1453 pp_context->instruction_state.bo = dri_bo_alloc(i965->intel.bufmgr,
1457 if (pp_context->instruction_state.bo == NULL) {
1458 WARN_ONCE("failure to allocate the buffer space for kernel shader in VPP\n");
1462 assert(pp_context->instruction_state.bo);
1465 pp_context->instruction_state.bo_size = kernel_size;
1466 pp_context->instruction_state.end_offset = 0;
1469 dri_bo_map(pp_context->instruction_state.bo, 1);
1470 kernel_ptr = (unsigned char *)(pp_context->instruction_state.bo->virtual);
1472 for (i = 0; i < NUM_PP_MODULES; i++) {
1473 pp_module = &pp_context->pp_modules[i];
1475 kernel_offset = ALIGN(end_offset, 64);
1476 pp_module->kernel.kernel_offset = kernel_offset;
1478 if (pp_module->kernel.bin && pp_module->kernel.size) {
1480 memcpy(kernel_ptr + kernel_offset, pp_module->kernel.bin, pp_module->kernel.size);
1481 end_offset = kernel_offset + pp_module->kernel.size;
1485 pp_context->instruction_state.end_offset = ALIGN(end_offset, 64);
1487 dri_bo_unmap(pp_context->instruction_state.bo);
1489 /* static & inline parameters */
1490 if (IS_GEN8(i965->intel.device_info)) {
1491 pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
1492 pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
1495 pp_context->pp_dndi_context.current_out_surface = VA_INVALID_SURFACE;
1496 pp_context->pp_dndi_context.current_out_obj_surface = NULL;
1497 pp_context->pp_dndi_context.frame_order = -1;
1498 pp_context->batch = batch;
1500 pp_context->idrt_size = 5 * sizeof(struct gen8_interface_descriptor_data);
1501 pp_context->curbe_size = 256;