2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Xiang Haihao <haihao.xiang@intel.com>
34 #include <va/va_backend.h>
36 #include "intel_batchbuffer.h"
37 #include "intel_driver.h"
38 #include "i965_defines.h"
39 #include "i965_structs.h"
40 #include "i965_drv_video.h"
41 #include "i965_post_processing.h"
42 #include "i965_render.h"
44 #define HAS_PP(ctx) (IS_IRONLAKE((ctx)->intel.device_id) || \
45 IS_GEN6((ctx)->intel.device_id) || \
46 IS_GEN7((ctx)->intel.device_id))
48 static const uint32_t pp_null_gen5[][4] = {
49 #include "shaders/post_processing/null.g4b.gen5"
52 static const uint32_t pp_nv12_load_save_nv12_gen5[][4] = {
53 #include "shaders/post_processing/nv12_load_save_nv12.g4b.gen5"
56 static const uint32_t pp_nv12_load_save_pl3_gen5[][4] = {
57 #include "shaders/post_processing/nv12_load_save_pl3.g4b.gen5"
60 static const uint32_t pp_pl3_load_save_nv12_gen5[][4] = {
61 #include "shaders/post_processing/pl3_load_save_nv12.g4b.gen5"
64 static const uint32_t pp_pl3_load_save_pl3_gen5[][4] = {
65 #include "shaders/post_processing/pl3_load_save_pl3.g4b.gen5"
68 static const uint32_t pp_nv12_scaling_gen5[][4] = {
69 #include "shaders/post_processing/nv12_scaling_nv12.g4b.gen5"
72 static const uint32_t pp_nv12_avs_gen5[][4] = {
73 #include "shaders/post_processing/nv12_avs_nv12.g4b.gen5"
76 static const uint32_t pp_nv12_dndi_gen5[][4] = {
77 #include "shaders/post_processing/nv12_dndi_nv12.g4b.gen5"
80 static void pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
81 const struct i965_surface *src_surface,
82 const VARectangle *src_rect,
83 const struct i965_surface *dst_surface,
84 const VARectangle *dst_rect,
86 static void pp_nv12_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
87 const struct i965_surface *src_surface,
88 const VARectangle *src_rect,
89 const struct i965_surface *dst_surface,
90 const VARectangle *dst_rect,
92 static void pp_nv12_scaling_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
93 const struct i965_surface *src_surface,
94 const VARectangle *src_rect,
95 const struct i965_surface *dst_surface,
96 const VARectangle *dst_rect,
98 static void pp_plx_load_save_plx_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
99 const struct i965_surface *src_surface,
100 const VARectangle *src_rect,
101 const struct i965_surface *dst_surface,
102 const VARectangle *dst_rect,
104 static void pp_nv12_dndi_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
105 const struct i965_surface *src_surface,
106 const VARectangle *src_rect,
107 const struct i965_surface *dst_surface,
108 const VARectangle *dst_rect,
111 static struct pp_module pp_modules_gen5[] = {
114 "NULL module (for testing)",
117 sizeof(pp_null_gen5),
127 PP_NV12_LOAD_SAVE_N12,
128 pp_nv12_load_save_nv12_gen5,
129 sizeof(pp_nv12_load_save_nv12_gen5),
133 pp_plx_load_save_plx_initialize,
139 PP_NV12_LOAD_SAVE_PL3,
140 pp_nv12_load_save_pl3_gen5,
141 sizeof(pp_nv12_load_save_pl3_gen5),
145 pp_plx_load_save_plx_initialize,
151 PP_PL3_LOAD_SAVE_N12,
152 pp_pl3_load_save_nv12_gen5,
153 sizeof(pp_pl3_load_save_nv12_gen5),
157 pp_plx_load_save_plx_initialize,
163 PP_PL3_LOAD_SAVE_N12,
164 pp_pl3_load_save_pl3_gen5,
165 sizeof(pp_pl3_load_save_pl3_gen5),
169 pp_plx_load_save_plx_initialize
174 "NV12 Scaling module",
176 pp_nv12_scaling_gen5,
177 sizeof(pp_nv12_scaling_gen5),
181 pp_nv12_scaling_initialize,
189 sizeof(pp_nv12_avs_gen5),
193 pp_nv12_avs_initialize,
201 sizeof(pp_nv12_dndi_gen5),
205 pp_nv12_dndi_initialize,
209 static const uint32_t pp_null_gen6[][4] = {
210 #include "shaders/post_processing/null.g6b"
213 static const uint32_t pp_nv12_load_save_nv12_gen6[][4] = {
214 #include "shaders/post_processing/nv12_load_save_nv12.g6b"
217 static const uint32_t pp_nv12_load_save_pl3_gen6[][4] = {
218 #include "shaders/post_processing/nv12_load_save_pl3.g6b"
221 static const uint32_t pp_pl3_load_save_nv12_gen6[][4] = {
222 #include "shaders/post_processing/pl3_load_save_nv12.g6b"
225 static const uint32_t pp_pl3_load_save_pl3_gen6[][4] = {
226 #include "shaders/post_processing/pl3_load_save_pl3.g6b"
229 static const uint32_t pp_nv12_scaling_gen6[][4] = {
230 #include "shaders/post_processing/nv12_scaling_nv12.g6b"
233 static const uint32_t pp_nv12_avs_gen6[][4] = {
234 #include "shaders/post_processing/nv12_avs_nv12.g6b"
237 static const uint32_t pp_nv12_dndi_gen6[][4] = {
238 #include "shaders/post_processing/nv12_dndi_nv12.g6b"
241 static struct pp_module pp_modules_gen6[] = {
244 "NULL module (for testing)",
247 sizeof(pp_null_gen6),
257 PP_NV12_LOAD_SAVE_N12,
258 pp_nv12_load_save_nv12_gen6,
259 sizeof(pp_nv12_load_save_nv12_gen6),
263 pp_plx_load_save_plx_initialize,
269 PP_NV12_LOAD_SAVE_PL3,
270 pp_nv12_load_save_pl3_gen6,
271 sizeof(pp_nv12_load_save_pl3_gen6),
275 pp_plx_load_save_plx_initialize,
281 PP_PL3_LOAD_SAVE_N12,
282 pp_pl3_load_save_nv12_gen6,
283 sizeof(pp_pl3_load_save_nv12_gen6),
287 pp_plx_load_save_plx_initialize,
293 PP_PL3_LOAD_SAVE_N12,
294 pp_pl3_load_save_pl3_gen6,
295 sizeof(pp_pl3_load_save_pl3_gen6),
299 pp_plx_load_save_plx_initialize,
304 "NV12 Scaling module",
306 pp_nv12_scaling_gen6,
307 sizeof(pp_nv12_scaling_gen6),
311 pp_nv12_scaling_initialize,
319 sizeof(pp_nv12_avs_gen6),
323 pp_nv12_avs_initialize,
331 sizeof(pp_nv12_dndi_gen6),
335 pp_nv12_dndi_initialize,
339 #define pp_static_parameter pp_context->pp_static_parameter
340 #define pp_inline_parameter pp_context->pp_inline_parameter
343 pp_get_surface_fourcc(VADriverContextP ctx, const struct i965_surface *surface)
345 struct i965_driver_data *i965 = i965_driver_data(ctx);
348 if (surface->flag == I965_SURFACE_IMAGE) {
349 struct object_image *obj_image = IMAGE(surface->id);
350 fourcc = obj_image->image.format.fourcc;
352 struct object_surface *obj_surface = SURFACE(surface->id);
353 fourcc = obj_surface->fourcc;
360 pp_set_surface_tiling(struct i965_surface_state *ss, unsigned int tiling)
363 case I915_TILING_NONE:
364 ss->ss3.tiled_surface = 0;
365 ss->ss3.tile_walk = 0;
368 ss->ss3.tiled_surface = 1;
369 ss->ss3.tile_walk = I965_TILEWALK_XMAJOR;
372 ss->ss3.tiled_surface = 1;
373 ss->ss3.tile_walk = I965_TILEWALK_YMAJOR;
379 pp_set_surface2_tiling(struct i965_surface_state2 *ss, unsigned int tiling)
382 case I915_TILING_NONE:
383 ss->ss2.tiled_surface = 0;
384 ss->ss2.tile_walk = 0;
387 ss->ss2.tiled_surface = 1;
388 ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
391 ss->ss2.tiled_surface = 1;
392 ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
398 ironlake_pp_surface_state(struct i965_post_processing_context *pp_context)
404 ironlake_pp_interface_descriptor_table(struct i965_post_processing_context *pp_context)
406 struct i965_interface_descriptor *desc;
408 int pp_index = pp_context->current_pp;
410 bo = pp_context->idrt.bo;
414 memset(desc, 0, sizeof(*desc));
415 desc->desc0.grf_reg_blocks = 10;
416 desc->desc0.kernel_start_pointer = pp_context->pp_modules[pp_index].kernel.bo->offset >> 6; /* reloc */
417 desc->desc1.const_urb_entry_read_offset = 0;
418 desc->desc1.const_urb_entry_read_len = 4; /* grf 1-4 */
419 desc->desc2.sampler_state_pointer = pp_context->sampler_state_table.bo->offset >> 5;
420 desc->desc2.sampler_count = 0;
421 desc->desc3.binding_table_entry_count = 0;
422 desc->desc3.binding_table_pointer =
423 pp_context->binding_table.bo->offset >> 5; /*reloc */
425 dri_bo_emit_reloc(bo,
426 I915_GEM_DOMAIN_INSTRUCTION, 0,
427 desc->desc0.grf_reg_blocks,
428 offsetof(struct i965_interface_descriptor, desc0),
429 pp_context->pp_modules[pp_index].kernel.bo);
431 dri_bo_emit_reloc(bo,
432 I915_GEM_DOMAIN_INSTRUCTION, 0,
433 desc->desc2.sampler_count << 2,
434 offsetof(struct i965_interface_descriptor, desc2),
435 pp_context->sampler_state_table.bo);
437 dri_bo_emit_reloc(bo,
438 I915_GEM_DOMAIN_INSTRUCTION, 0,
439 desc->desc3.binding_table_entry_count,
440 offsetof(struct i965_interface_descriptor, desc3),
441 pp_context->binding_table.bo);
444 pp_context->idrt.num_interface_descriptors++;
448 ironlake_pp_binding_table(struct i965_post_processing_context *pp_context)
450 unsigned int *binding_table;
451 dri_bo *bo = pp_context->binding_table.bo;
456 binding_table = bo->virtual;
457 memset(binding_table, 0, bo->size);
459 for (i = 0; i < MAX_PP_SURFACES; i++) {
460 if (pp_context->surfaces[i].ss_bo) {
461 assert(pp_context->surfaces[i].s_bo);
463 binding_table[i] = pp_context->surfaces[i].ss_bo->offset;
464 dri_bo_emit_reloc(bo,
465 I915_GEM_DOMAIN_INSTRUCTION, 0,
467 i * sizeof(*binding_table),
468 pp_context->surfaces[i].ss_bo);
477 ironlake_pp_vfe_state(struct i965_post_processing_context *pp_context)
479 struct i965_vfe_state *vfe_state;
482 bo = pp_context->vfe_state.bo;
485 vfe_state = bo->virtual;
486 memset(vfe_state, 0, sizeof(*vfe_state));
487 vfe_state->vfe1.max_threads = pp_context->urb.num_vfe_entries - 1;
488 vfe_state->vfe1.urb_entry_alloc_size = pp_context->urb.size_vfe_entry - 1;
489 vfe_state->vfe1.num_urb_entries = pp_context->urb.num_vfe_entries;
490 vfe_state->vfe1.vfe_mode = VFE_GENERIC_MODE;
491 vfe_state->vfe1.children_present = 0;
492 vfe_state->vfe2.interface_descriptor_base =
493 pp_context->idrt.bo->offset >> 4; /* reloc */
494 dri_bo_emit_reloc(bo,
495 I915_GEM_DOMAIN_INSTRUCTION, 0,
497 offsetof(struct i965_vfe_state, vfe2),
498 pp_context->idrt.bo);
503 ironlake_pp_upload_constants(struct i965_post_processing_context *pp_context)
505 unsigned char *constant_buffer;
507 assert(sizeof(pp_static_parameter) == 128);
508 dri_bo_map(pp_context->curbe.bo, 1);
509 assert(pp_context->curbe.bo->virtual);
510 constant_buffer = pp_context->curbe.bo->virtual;
511 memcpy(constant_buffer, &pp_static_parameter, sizeof(pp_static_parameter));
512 dri_bo_unmap(pp_context->curbe.bo);
516 ironlake_pp_states_setup(VADriverContextP ctx,
517 struct i965_post_processing_context *pp_context)
519 ironlake_pp_surface_state(pp_context);
520 ironlake_pp_binding_table(pp_context);
521 ironlake_pp_interface_descriptor_table(pp_context);
522 ironlake_pp_vfe_state(pp_context);
523 ironlake_pp_upload_constants(pp_context);
527 ironlake_pp_pipeline_select(VADriverContextP ctx,
528 struct i965_post_processing_context *pp_context)
530 struct intel_batchbuffer *batch = pp_context->batch;
532 BEGIN_BATCH(batch, 1);
533 OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
534 ADVANCE_BATCH(batch);
538 ironlake_pp_urb_layout(VADriverContextP ctx,
539 struct i965_post_processing_context *pp_context)
541 struct intel_batchbuffer *batch = pp_context->batch;
542 unsigned int vfe_fence, cs_fence;
544 vfe_fence = pp_context->urb.cs_start;
545 cs_fence = pp_context->urb.size;
547 BEGIN_BATCH(batch, 3);
548 OUT_BATCH(batch, CMD_URB_FENCE | UF0_VFE_REALLOC | UF0_CS_REALLOC | 1);
551 (vfe_fence << UF2_VFE_FENCE_SHIFT) | /* VFE_SIZE */
552 (cs_fence << UF2_CS_FENCE_SHIFT)); /* CS_SIZE */
553 ADVANCE_BATCH(batch);
557 ironlake_pp_state_base_address(VADriverContextP ctx,
558 struct i965_post_processing_context *pp_context)
560 struct intel_batchbuffer *batch = pp_context->batch;
562 BEGIN_BATCH(batch, 8);
563 OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | 6);
564 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
565 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
566 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
567 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
568 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
569 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
570 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
571 ADVANCE_BATCH(batch);
575 ironlake_pp_state_pointers(VADriverContextP ctx,
576 struct i965_post_processing_context *pp_context)
578 struct intel_batchbuffer *batch = pp_context->batch;
580 BEGIN_BATCH(batch, 3);
581 OUT_BATCH(batch, CMD_MEDIA_STATE_POINTERS | 1);
583 OUT_RELOC(batch, pp_context->vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
584 ADVANCE_BATCH(batch);
588 ironlake_pp_cs_urb_layout(VADriverContextP ctx,
589 struct i965_post_processing_context *pp_context)
591 struct intel_batchbuffer *batch = pp_context->batch;
593 BEGIN_BATCH(batch, 2);
594 OUT_BATCH(batch, CMD_CS_URB_STATE | 0);
596 ((pp_context->urb.size_cs_entry - 1) << 4) | /* URB Entry Allocation Size */
597 (pp_context->urb.num_cs_entries << 0)); /* Number of URB Entries */
598 ADVANCE_BATCH(batch);
602 ironlake_pp_constant_buffer(VADriverContextP ctx,
603 struct i965_post_processing_context *pp_context)
605 struct intel_batchbuffer *batch = pp_context->batch;
607 BEGIN_BATCH(batch, 2);
608 OUT_BATCH(batch, CMD_CONSTANT_BUFFER | (1 << 8) | (2 - 2));
609 OUT_RELOC(batch, pp_context->curbe.bo,
610 I915_GEM_DOMAIN_INSTRUCTION, 0,
611 pp_context->urb.size_cs_entry - 1);
612 ADVANCE_BATCH(batch);
616 ironlake_pp_object_walker(VADriverContextP ctx,
617 struct i965_post_processing_context *pp_context)
619 struct intel_batchbuffer *batch = pp_context->batch;
620 int x, x_steps, y, y_steps;
622 x_steps = pp_context->pp_x_steps(&pp_context->private_context);
623 y_steps = pp_context->pp_y_steps(&pp_context->private_context);
625 for (y = 0; y < y_steps; y++) {
626 for (x = 0; x < x_steps; x++) {
627 if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
628 BEGIN_BATCH(batch, 20);
629 OUT_BATCH(batch, CMD_MEDIA_OBJECT | 18);
631 OUT_BATCH(batch, 0); /* no indirect data */
634 /* inline data grf 5-6 */
635 assert(sizeof(pp_inline_parameter) == 64);
636 intel_batchbuffer_data(batch, &pp_inline_parameter, sizeof(pp_inline_parameter));
638 ADVANCE_BATCH(batch);
645 ironlake_pp_pipeline_setup(VADriverContextP ctx,
646 struct i965_post_processing_context *pp_context)
648 struct intel_batchbuffer *batch = pp_context->batch;
650 intel_batchbuffer_start_atomic(batch, 0x1000);
651 intel_batchbuffer_emit_mi_flush(batch);
652 ironlake_pp_pipeline_select(ctx, pp_context);
653 ironlake_pp_state_base_address(ctx, pp_context);
654 ironlake_pp_state_pointers(ctx, pp_context);
655 ironlake_pp_urb_layout(ctx, pp_context);
656 ironlake_pp_cs_urb_layout(ctx, pp_context);
657 ironlake_pp_constant_buffer(ctx, pp_context);
658 ironlake_pp_object_walker(ctx, pp_context);
659 intel_batchbuffer_end_atomic(batch);
663 i965_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
664 dri_bo *surf_bo, unsigned long surf_bo_offset,
665 int width, int height, int pitch, int format,
666 int index, int is_target)
668 struct i965_driver_data *i965 = i965_driver_data(ctx);
669 struct i965_surface_state *ss;
672 unsigned int swizzle;
674 dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
675 pp_context->surfaces[index].s_bo = surf_bo;
676 dri_bo_reference(pp_context->surfaces[index].s_bo);
677 ss_bo = dri_bo_alloc(i965->intel.bufmgr,
679 sizeof(struct i965_surface_state),
682 pp_context->surfaces[index].ss_bo = ss_bo;
683 dri_bo_map(ss_bo, True);
684 assert(ss_bo->virtual);
686 memset(ss, 0, sizeof(*ss));
687 ss->ss0.surface_type = I965_SURFACE_2D;
688 ss->ss0.surface_format = format;
689 ss->ss1.base_addr = surf_bo->offset + surf_bo_offset;
690 ss->ss2.width = width - 1;
691 ss->ss2.height = height - 1;
692 ss->ss3.pitch = pitch - 1;
693 pp_set_surface_tiling(ss, tiling);
694 dri_bo_emit_reloc(ss_bo,
695 I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
697 offsetof(struct i965_surface_state, ss1),
698 pp_context->surfaces[index].s_bo);
703 i965_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
704 dri_bo *surf_bo, unsigned long surf_bo_offset,
705 int width, int height, int wpitch,
706 int xoffset, int yoffset,
707 int format, int interleave_chroma,
710 struct i965_driver_data *i965 = i965_driver_data(ctx);
711 struct i965_surface_state2 *ss2;
714 unsigned int swizzle;
716 dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
717 pp_context->surfaces[index].s_bo = surf_bo;
718 dri_bo_reference(pp_context->surfaces[index].s_bo);
719 ss2_bo = dri_bo_alloc(i965->intel.bufmgr,
721 sizeof(struct i965_surface_state2),
724 pp_context->surfaces[index].ss_bo = ss2_bo;
725 dri_bo_map(ss2_bo, True);
726 assert(ss2_bo->virtual);
727 ss2 = ss2_bo->virtual;
728 memset(ss2, 0, sizeof(*ss2));
729 ss2->ss0.surface_base_address = surf_bo->offset + surf_bo_offset;
730 ss2->ss1.cbcr_pixel_offset_v_direction = 0;
731 ss2->ss1.width = width - 1;
732 ss2->ss1.height = height - 1;
733 ss2->ss2.pitch = wpitch - 1;
734 ss2->ss2.interleave_chroma = interleave_chroma;
735 ss2->ss2.surface_format = format;
736 ss2->ss3.x_offset_for_cb = xoffset;
737 ss2->ss3.y_offset_for_cb = yoffset;
738 pp_set_surface2_tiling(ss2, tiling);
739 dri_bo_emit_reloc(ss2_bo,
740 I915_GEM_DOMAIN_RENDER, 0,
742 offsetof(struct i965_surface_state2, ss0),
744 dri_bo_unmap(ss2_bo);
748 pp_set_media_rw_message_surface(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
749 const struct i965_surface *surface,
750 int base_index, int is_target,
751 int *width, int *height, int *pitch, int *offset)
753 struct i965_driver_data *i965 = i965_driver_data(ctx);
754 struct object_surface *obj_surface;
755 struct object_image *obj_image;
757 int fourcc = pp_get_surface_fourcc(ctx, surface);
759 const int U = fourcc == VA_FOURCC('Y', 'V', '1', '2') ? 2 : 1;
760 const int V = fourcc == VA_FOURCC('Y', 'V', '1', '2') ? 1 : 2;
762 int interleaved_uv = fourcc == VA_FOURCC('N', 'V', '1', '2');
764 if (surface->flag == I965_SURFACE_SURFACE) {
765 obj_surface = SURFACE(surface->id);
766 bo = obj_surface->bo;
767 width[0] = obj_surface->orig_width;
768 height[0] = obj_surface->orig_height;
769 pitch[0] = obj_surface->width;
772 if (interleaved_uv) {
773 width[1] = obj_surface->orig_width;
774 height[1] = obj_surface->orig_height / 2;
775 pitch[1] = obj_surface->width;
776 offset[1] = offset[0] + obj_surface->width * obj_surface->height;
778 width[1] = obj_surface->orig_width / 2;
779 height[1] = obj_surface->orig_height / 2;
780 pitch[1] = obj_surface->width / 2;
781 offset[1] = offset[0] + obj_surface->width * obj_surface->height;
782 width[2] = obj_surface->orig_width / 2;
783 height[2] = obj_surface->orig_height / 2;
784 pitch[2] = obj_surface->width / 2;
785 offset[2] = offset[1] + (obj_surface->width / 2) * (obj_surface->height / 2);
788 obj_image = IMAGE(surface->id);
790 width[0] = obj_image->image.width;
791 height[0] = obj_image->image.height;
792 pitch[0] = obj_image->image.pitches[0];
793 offset[0] = obj_image->image.offsets[0];
795 if (interleaved_uv) {
796 width[1] = obj_image->image.width;
797 height[1] = obj_image->image.height / 2;
798 pitch[1] = obj_image->image.pitches[1];
799 offset[1] = obj_image->image.offsets[1];
801 width[1] = obj_image->image.width / 2;
802 height[1] = obj_image->image.height / 2;
803 pitch[1] = obj_image->image.pitches[1];
804 offset[1] = obj_image->image.offsets[1];
805 width[2] = obj_image->image.width / 2;
806 height[2] = obj_image->image.height / 2;
807 pitch[2] = obj_image->image.pitches[2];
808 offset[2] = obj_image->image.offsets[2];
813 i965_pp_set_surface_state(ctx, pp_context,
815 width[Y] / 4, height[Y], pitch[Y], I965_SURFACEFORMAT_R8_UNORM,
816 base_index, is_target);
818 if (interleaved_uv) {
819 i965_pp_set_surface_state(ctx, pp_context,
821 width[UV] / 4, height[UV], pitch[UV], I965_SURFACEFORMAT_R8_UNORM,
822 base_index + 1, is_target);
825 i965_pp_set_surface_state(ctx, pp_context,
827 width[U] / 4, height[U], pitch[U], I965_SURFACEFORMAT_R8_UNORM,
828 base_index + 1, is_target);
831 i965_pp_set_surface_state(ctx, pp_context,
833 width[V] / 4, height[V], pitch[V], I965_SURFACEFORMAT_R8_UNORM,
834 base_index + 2, is_target);
840 pp_null_x_steps(void *private_context)
846 pp_null_y_steps(void *private_context)
852 pp_null_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
858 pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
859 const struct i965_surface *src_surface,
860 const VARectangle *src_rect,
861 const struct i965_surface *dst_surface,
862 const VARectangle *dst_rect,
865 /* private function & data */
866 pp_context->pp_x_steps = pp_null_x_steps;
867 pp_context->pp_y_steps = pp_null_y_steps;
868 pp_context->pp_set_block_parameter = pp_null_set_block_parameter;
872 pp_load_save_x_steps(void *private_context)
878 pp_load_save_y_steps(void *private_context)
880 struct pp_load_save_context *pp_load_save_context = private_context;
882 return pp_load_save_context->dest_h / 8;
886 pp_load_save_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
888 pp_inline_parameter.grf5.block_vertical_mask = 0xff;
889 pp_inline_parameter.grf5.block_horizontal_mask = 0xffff;
890 pp_inline_parameter.grf5.destination_block_horizontal_origin = x * 16;
891 pp_inline_parameter.grf5.destination_block_vertical_origin = y * 8;
897 pp_plx_load_save_plx_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
898 const struct i965_surface *src_surface,
899 const VARectangle *src_rect,
900 const struct i965_surface *dst_surface,
901 const VARectangle *dst_rect,
904 struct pp_load_save_context *pp_load_save_context = (struct pp_load_save_context *)&pp_context->private_context;
905 int width[3], height[3], pitch[3], offset[3];
909 pp_set_media_rw_message_surface(ctx, pp_context, src_surface, 1, 0,
910 width, height, pitch, offset);
912 /* destination surface */
913 pp_set_media_rw_message_surface(ctx, pp_context, dst_surface, 7, 1,
914 width, height, pitch, offset);
916 /* private function & data */
917 pp_context->pp_x_steps = pp_load_save_x_steps;
918 pp_context->pp_y_steps = pp_load_save_y_steps;
919 pp_context->pp_set_block_parameter = pp_load_save_set_block_parameter;
920 pp_load_save_context->dest_h = ALIGN(height[Y], 16);
921 pp_load_save_context->dest_w = ALIGN(width[Y], 16);
923 pp_inline_parameter.grf5.block_count_x = ALIGN(width[Y], 16) / 16; /* 1 x N */
924 pp_inline_parameter.grf5.number_blocks = ALIGN(width[Y], 16) / 16;
928 pp_scaling_x_steps(void *private_context)
934 pp_scaling_y_steps(void *private_context)
936 struct pp_scaling_context *pp_scaling_context = private_context;
938 return pp_scaling_context->dest_h / 8;
942 pp_scaling_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
944 struct pp_scaling_context *pp_scaling_context = (struct pp_scaling_context *)&pp_context->private_context;
945 float src_x_steping = pp_inline_parameter.grf5.normalized_video_x_scaling_step;
946 float src_y_steping = pp_static_parameter.grf1.r1_6.normalized_video_y_scaling_step;
948 pp_inline_parameter.grf5.r5_1.source_surface_block_normalized_horizontal_origin = src_x_steping * x * 16 + pp_scaling_context->src_normalized_x;
949 pp_inline_parameter.grf5.source_surface_block_normalized_vertical_origin = src_y_steping * y * 8 + pp_scaling_context->src_normalized_y;
950 pp_inline_parameter.grf5.destination_block_horizontal_origin = x * 16 + pp_scaling_context->dest_x;
951 pp_inline_parameter.grf5.destination_block_vertical_origin = y * 8 + pp_scaling_context->dest_y;
957 pp_nv12_scaling_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
958 const struct i965_surface *src_surface,
959 const VARectangle *src_rect,
960 const struct i965_surface *dst_surface,
961 const VARectangle *dst_rect,
964 struct i965_driver_data *i965 = i965_driver_data(ctx);
965 struct pp_scaling_context *pp_scaling_context = (struct pp_scaling_context *)&pp_context->private_context;
966 struct object_surface *obj_surface;
967 struct i965_sampler_state *sampler_state;
968 int in_w, in_h, in_wpitch, in_hpitch;
969 int out_w, out_h, out_wpitch, out_hpitch;
972 obj_surface = SURFACE(src_surface->id);
973 in_w = obj_surface->orig_width;
974 in_h = obj_surface->orig_height;
975 in_wpitch = obj_surface->width;
976 in_hpitch = obj_surface->height;
978 /* source Y surface index 1 */
979 i965_pp_set_surface_state(ctx, pp_context,
981 in_w, in_h, in_wpitch, I965_SURFACEFORMAT_R8_UNORM,
984 /* source UV surface index 2 */
985 i965_pp_set_surface_state(ctx, pp_context,
986 obj_surface->bo, in_wpitch * in_hpitch,
987 in_w / 2, in_h / 2, in_wpitch, I965_SURFACEFORMAT_R8G8_UNORM,
990 /* destination surface */
991 obj_surface = SURFACE(dst_surface->id);
992 out_w = obj_surface->orig_width;
993 out_h = obj_surface->orig_height;
994 out_wpitch = obj_surface->width;
995 out_hpitch = obj_surface->height;
997 /* destination Y surface index 7 */
998 i965_pp_set_surface_state(ctx, pp_context,
1000 out_w / 4, out_h, out_wpitch, I965_SURFACEFORMAT_R8_UNORM,
1003 /* destination UV surface index 8 */
1004 i965_pp_set_surface_state(ctx, pp_context,
1005 obj_surface->bo, out_wpitch * out_hpitch,
1006 out_w / 4, out_h / 2, out_wpitch, I965_SURFACEFORMAT_R8G8_UNORM,
1010 dri_bo_map(pp_context->sampler_state_table.bo, True);
1011 assert(pp_context->sampler_state_table.bo->virtual);
1012 sampler_state = pp_context->sampler_state_table.bo->virtual;
1014 /* SIMD16 Y index 1 */
1015 sampler_state[1].ss0.min_filter = I965_MAPFILTER_LINEAR;
1016 sampler_state[1].ss0.mag_filter = I965_MAPFILTER_LINEAR;
1017 sampler_state[1].ss1.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1018 sampler_state[1].ss1.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1019 sampler_state[1].ss1.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1021 /* SIMD16 UV index 2 */
1022 sampler_state[2].ss0.min_filter = I965_MAPFILTER_LINEAR;
1023 sampler_state[2].ss0.mag_filter = I965_MAPFILTER_LINEAR;
1024 sampler_state[2].ss1.r_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1025 sampler_state[2].ss1.s_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1026 sampler_state[2].ss1.t_wrap_mode = I965_TEXCOORDMODE_CLAMP;
1028 dri_bo_unmap(pp_context->sampler_state_table.bo);
1030 /* private function & data */
1031 pp_context->pp_x_steps = pp_scaling_x_steps;
1032 pp_context->pp_y_steps = pp_scaling_y_steps;
1033 pp_context->pp_set_block_parameter = pp_scaling_set_block_parameter;
1035 pp_scaling_context->dest_x = dst_rect->x;
1036 pp_scaling_context->dest_y = dst_rect->y;
1037 pp_scaling_context->dest_w = ALIGN(dst_rect->width, 16);
1038 pp_scaling_context->dest_h = ALIGN(dst_rect->height, 16);
1039 pp_scaling_context->src_normalized_x = (float)src_rect->x / in_w / out_w;
1040 pp_scaling_context->src_normalized_y = (float)src_rect->y / in_h / out_h;
1042 pp_static_parameter.grf1.r1_6.normalized_video_y_scaling_step = (float) src_rect->height / in_h / out_h;
1044 pp_inline_parameter.grf5.normalized_video_x_scaling_step = (float) src_rect->width / in_w / out_w;
1045 pp_inline_parameter.grf5.block_count_x = pp_scaling_context->dest_w / 16; /* 1 x N */
1046 pp_inline_parameter.grf5.number_blocks = pp_scaling_context->dest_w / 16;
1047 pp_inline_parameter.grf5.block_vertical_mask = 0xff;
1048 pp_inline_parameter.grf5.block_horizontal_mask = 0xffff;
1052 pp_avs_x_steps(void *private_context)
1054 struct pp_avs_context *pp_avs_context = private_context;
1056 return pp_avs_context->dest_w / 16;
1060 pp_avs_y_steps(void *private_context)
1066 pp_avs_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
1068 struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->private_context;
1069 float src_x_steping, src_y_steping, video_step_delta;
1070 int tmp_w = ALIGN(pp_avs_context->dest_h * pp_avs_context->src_w / pp_avs_context->src_h, 16);
1072 if (tmp_w >= pp_avs_context->dest_w) {
1073 pp_inline_parameter.grf5.normalized_video_x_scaling_step = 1.0 / tmp_w;
1074 pp_inline_parameter.grf6.video_step_delta = 0;
1077 pp_inline_parameter.grf5.r5_1.source_surface_block_normalized_horizontal_origin = (float)(tmp_w - pp_avs_context->dest_w) / tmp_w / 2 +
1078 pp_avs_context->src_normalized_x;
1080 src_x_steping = pp_inline_parameter.grf5.normalized_video_x_scaling_step;
1081 video_step_delta = pp_inline_parameter.grf6.video_step_delta;
1082 pp_inline_parameter.grf5.r5_1.source_surface_block_normalized_horizontal_origin += src_x_steping * 16 +
1083 16 * 15 * video_step_delta / 2;
1086 int n0, n1, n2, nls_left, nls_right;
1087 int factor_a = 5, factor_b = 4;
1090 n0 = (pp_avs_context->dest_w - tmp_w) / (16 * 2);
1091 n1 = (pp_avs_context->dest_w - tmp_w) / 16 - n0;
1092 n2 = tmp_w / (16 * factor_a);
1094 nls_right = n1 + n2;
1095 f = (float) n2 * 16 / tmp_w;
1098 pp_inline_parameter.grf6.video_step_delta = 0.0;
1101 pp_inline_parameter.grf5.normalized_video_x_scaling_step = 1.0 / pp_avs_context->dest_w;
1102 pp_inline_parameter.grf5.r5_1.source_surface_block_normalized_horizontal_origin = pp_avs_context->src_normalized_x;
1104 src_x_steping = pp_inline_parameter.grf5.normalized_video_x_scaling_step;
1105 video_step_delta = pp_inline_parameter.grf6.video_step_delta;
1106 pp_inline_parameter.grf5.r5_1.source_surface_block_normalized_horizontal_origin += src_x_steping * 16 +
1107 16 * 15 * video_step_delta / 2;
1111 /* f = a * nls_left * 16 + b * nls_left * 16 * (nls_left * 16 - 1) / 2 */
1112 float a = f / (nls_left * 16 * factor_b);
1113 float b = (f - nls_left * 16 * a) * 2 / (nls_left * 16 * (nls_left * 16 - 1));
1115 pp_inline_parameter.grf6.video_step_delta = b;
1118 pp_inline_parameter.grf5.r5_1.source_surface_block_normalized_horizontal_origin = pp_avs_context->src_normalized_x;
1119 pp_inline_parameter.grf5.normalized_video_x_scaling_step = a;
1121 src_x_steping = pp_inline_parameter.grf5.normalized_video_x_scaling_step;
1122 video_step_delta = pp_inline_parameter.grf6.video_step_delta;
1123 pp_inline_parameter.grf5.r5_1.source_surface_block_normalized_horizontal_origin += src_x_steping * 16 +
1124 16 * 15 * video_step_delta / 2;
1125 pp_inline_parameter.grf5.normalized_video_x_scaling_step += 16 * b;
1127 } else if (x < (pp_avs_context->dest_w / 16 - nls_right)) {
1128 /* scale the center linearly */
1129 src_x_steping = pp_inline_parameter.grf5.normalized_video_x_scaling_step;
1130 video_step_delta = pp_inline_parameter.grf6.video_step_delta;
1131 pp_inline_parameter.grf5.r5_1.source_surface_block_normalized_horizontal_origin += src_x_steping * 16 +
1132 16 * 15 * video_step_delta / 2;
1133 pp_inline_parameter.grf6.video_step_delta = 0.0;
1134 pp_inline_parameter.grf5.normalized_video_x_scaling_step = 1.0 / tmp_w;
1136 float a = f / (nls_right * 16 * factor_b);
1137 float b = (f - nls_right * 16 * a) * 2 / (nls_right * 16 * (nls_right * 16 - 1));
1139 src_x_steping = pp_inline_parameter.grf5.normalized_video_x_scaling_step;
1140 video_step_delta = pp_inline_parameter.grf6.video_step_delta;
1141 pp_inline_parameter.grf5.r5_1.source_surface_block_normalized_horizontal_origin += src_x_steping * 16 +
1142 16 * 15 * video_step_delta / 2;
1143 pp_inline_parameter.grf6.video_step_delta = -b;
1145 if (x == (pp_avs_context->dest_w / 16 - nls_right))
1146 pp_inline_parameter.grf5.normalized_video_x_scaling_step = a + (nls_right * 16 - 1) * b;
1148 pp_inline_parameter.grf5.normalized_video_x_scaling_step -= b * 16;
1153 src_y_steping = pp_static_parameter.grf1.r1_6.normalized_video_y_scaling_step;
1154 pp_inline_parameter.grf5.source_surface_block_normalized_vertical_origin = src_y_steping * y * 8 + pp_avs_context->src_normalized_y;
1155 pp_inline_parameter.grf5.destination_block_horizontal_origin = x * 16 + pp_avs_context->dest_x;
1156 pp_inline_parameter.grf5.destination_block_vertical_origin = y * 8 + pp_avs_context->dest_y;
1162 pp_nv12_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
1163 const struct i965_surface *src_surface,
1164 const VARectangle *src_rect,
1165 const struct i965_surface *dst_surface,
1166 const VARectangle *dst_rect,
1169 struct i965_driver_data *i965 = i965_driver_data(ctx);
1170 struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->private_context;
1171 struct object_surface *obj_surface;
1172 struct i965_sampler_8x8 *sampler_8x8;
1173 struct i965_sampler_8x8_state *sampler_8x8_state;
1175 int in_w, in_h, in_wpitch, in_hpitch;
1176 int out_w, out_h, out_wpitch, out_hpitch;
1179 obj_surface = SURFACE(src_surface->id);
1180 in_w = obj_surface->orig_width;
1181 in_h = obj_surface->orig_height;
1182 in_wpitch = obj_surface->width;
1183 in_hpitch = obj_surface->height;
1185 /* source Y surface index 1 */
1186 i965_pp_set_surface2_state(ctx, pp_context,
1188 in_w, in_h, in_wpitch,
1190 SURFACE_FORMAT_Y8_UNORM, 0,
1193 /* source UV surface index 2 */
1194 i965_pp_set_surface2_state(ctx, pp_context,
1195 obj_surface->bo, in_wpitch * in_hpitch,
1196 in_w, in_h, in_wpitch,
1198 SURFACE_FORMAT_PLANAR_420_8, 1,
1201 /* destination surface */
1202 obj_surface = SURFACE(dst_surface->id);
1203 out_w = obj_surface->orig_width;
1204 out_h = obj_surface->orig_height;
1205 out_wpitch = obj_surface->width;
1206 out_hpitch = obj_surface->height;
1207 assert(out_w <= out_wpitch && out_h <= out_hpitch);
1209 /* destination Y surface index 7 */
1210 i965_pp_set_surface_state(ctx, pp_context,
1212 out_w / 4, out_h, out_wpitch, I965_SURFACEFORMAT_R8_UNORM,
1215 /* destination UV surface index 8 */
1216 i965_pp_set_surface_state(ctx, pp_context,
1217 obj_surface->bo, out_wpitch * out_hpitch,
1218 out_w / 4, out_h / 2, out_wpitch, I965_SURFACEFORMAT_R8G8_UNORM,
1221 /* sampler 8x8 state */
1222 dri_bo_map(pp_context->sampler_state_table.bo_8x8, True);
1223 assert(pp_context->sampler_state_table.bo_8x8->virtual);
1224 assert(sizeof(*sampler_8x8_state) == sizeof(int) * 138);
1225 sampler_8x8_state = pp_context->sampler_state_table.bo_8x8->virtual;
1226 memset(sampler_8x8_state, 0, sizeof(*sampler_8x8_state));
1227 sampler_8x8_state->dw136.default_sharpness_level = 0;
1228 sampler_8x8_state->dw137.adaptive_filter_for_all_channel = 1;
1229 sampler_8x8_state->dw137.bypass_y_adaptive_filtering = 1;
1230 sampler_8x8_state->dw137.bypass_x_adaptive_filtering = 1;
1231 dri_bo_unmap(pp_context->sampler_state_table.bo_8x8);
1234 dri_bo_map(pp_context->sampler_state_table.bo, True);
1235 assert(pp_context->sampler_state_table.bo->virtual);
1236 assert(sizeof(*sampler_8x8) == sizeof(int) * 16);
1237 sampler_8x8 = pp_context->sampler_state_table.bo->virtual;
1239 /* sample_8x8 Y index 1 */
1241 memset(&sampler_8x8[index], 0, sizeof(*sampler_8x8));
1242 sampler_8x8[index].dw0.avs_filter_type = AVS_FILTER_ADAPTIVE_8_TAP;
1243 sampler_8x8[index].dw0.ief_bypass = 0;
1244 sampler_8x8[index].dw0.ief_filter_type = IEF_FILTER_DETAIL;
1245 sampler_8x8[index].dw0.ief_filter_size = IEF_FILTER_SIZE_5X5;
1246 sampler_8x8[index].dw1.sampler_8x8_state_pointer = pp_context->sampler_state_table.bo_8x8->offset >> 5;
1247 sampler_8x8[index].dw2.global_noise_estimation = 22;
1248 sampler_8x8[index].dw2.strong_edge_threshold = 8;
1249 sampler_8x8[index].dw2.weak_edge_threshold = 1;
1250 sampler_8x8[index].dw3.strong_edge_weight = 7;
1251 sampler_8x8[index].dw3.regular_weight = 2;
1252 sampler_8x8[index].dw3.non_edge_weight = 0;
1253 sampler_8x8[index].dw3.gain_factor = 40;
1254 sampler_8x8[index].dw4.steepness_boost = 0;
1255 sampler_8x8[index].dw4.steepness_threshold = 0;
1256 sampler_8x8[index].dw4.mr_boost = 0;
1257 sampler_8x8[index].dw4.mr_threshold = 5;
1258 sampler_8x8[index].dw5.pwl1_point_1 = 4;
1259 sampler_8x8[index].dw5.pwl1_point_2 = 12;
1260 sampler_8x8[index].dw5.pwl1_point_3 = 16;
1261 sampler_8x8[index].dw5.pwl1_point_4 = 26;
1262 sampler_8x8[index].dw6.pwl1_point_5 = 40;
1263 sampler_8x8[index].dw6.pwl1_point_6 = 160;
1264 sampler_8x8[index].dw6.pwl1_r3_bias_0 = 127;
1265 sampler_8x8[index].dw6.pwl1_r3_bias_1 = 98;
1266 sampler_8x8[index].dw7.pwl1_r3_bias_2 = 88;
1267 sampler_8x8[index].dw7.pwl1_r3_bias_3 = 64;
1268 sampler_8x8[index].dw7.pwl1_r3_bias_4 = 44;
1269 sampler_8x8[index].dw7.pwl1_r3_bias_5 = 0;
1270 sampler_8x8[index].dw8.pwl1_r3_bias_6 = 0;
1271 sampler_8x8[index].dw8.pwl1_r5_bias_0 = 3;
1272 sampler_8x8[index].dw8.pwl1_r5_bias_1 = 32;
1273 sampler_8x8[index].dw8.pwl1_r5_bias_2 = 32;
1274 sampler_8x8[index].dw9.pwl1_r5_bias_3 = 58;
1275 sampler_8x8[index].dw9.pwl1_r5_bias_4 = 100;
1276 sampler_8x8[index].dw9.pwl1_r5_bias_5 = 108;
1277 sampler_8x8[index].dw9.pwl1_r5_bias_6 = 88;
1278 sampler_8x8[index].dw10.pwl1_r3_slope_0 = -116;
1279 sampler_8x8[index].dw10.pwl1_r3_slope_1 = -20;
1280 sampler_8x8[index].dw10.pwl1_r3_slope_2 = -96;
1281 sampler_8x8[index].dw10.pwl1_r3_slope_3 = -32;
1282 sampler_8x8[index].dw11.pwl1_r3_slope_4 = -50;
1283 sampler_8x8[index].dw11.pwl1_r3_slope_5 = 0;
1284 sampler_8x8[index].dw11.pwl1_r3_slope_6 = 0;
1285 sampler_8x8[index].dw11.pwl1_r5_slope_0 = 116;
1286 sampler_8x8[index].dw12.pwl1_r5_slope_1 = 0;
1287 sampler_8x8[index].dw12.pwl1_r5_slope_2 = 114;
1288 sampler_8x8[index].dw12.pwl1_r5_slope_3 = 67;
1289 sampler_8x8[index].dw12.pwl1_r5_slope_4 = 9;
1290 sampler_8x8[index].dw13.pwl1_r5_slope_5 = -3;
1291 sampler_8x8[index].dw13.pwl1_r5_slope_6 = -15;
1292 sampler_8x8[index].dw13.limiter_boost = 0;
1293 sampler_8x8[index].dw13.minimum_limiter = 10;
1294 sampler_8x8[index].dw13.maximum_limiter = 11;
1295 sampler_8x8[index].dw14.clip_limiter = 130;
1296 dri_bo_emit_reloc(pp_context->sampler_state_table.bo,
1297 I915_GEM_DOMAIN_RENDER,
1300 sizeof(*sampler_8x8) * index + offsetof(struct i965_sampler_8x8, dw1),
1301 pp_context->sampler_state_table.bo_8x8);
1303 dri_bo_map(pp_context->sampler_state_table.bo_8x8_uv, True);
1304 assert(pp_context->sampler_state_table.bo_8x8_uv->virtual);
1305 assert(sizeof(*sampler_8x8_state) == sizeof(int) * 138);
1306 sampler_8x8_state = pp_context->sampler_state_table.bo_8x8_uv->virtual;
1307 memset(sampler_8x8_state, 0, sizeof(*sampler_8x8_state));
1308 sampler_8x8_state->dw136.default_sharpness_level = 0;
1309 sampler_8x8_state->dw137.adaptive_filter_for_all_channel = 0;
1310 sampler_8x8_state->dw137.bypass_y_adaptive_filtering = 1;
1311 sampler_8x8_state->dw137.bypass_x_adaptive_filtering = 1;
1312 dri_bo_unmap(pp_context->sampler_state_table.bo_8x8_uv);
1314 /* sample_8x8 UV index 2 */
1316 memset(&sampler_8x8[index], 0, sizeof(*sampler_8x8));
1317 sampler_8x8[index].dw0.avs_filter_type = AVS_FILTER_NEAREST;
1318 sampler_8x8[index].dw0.ief_bypass = 0;
1319 sampler_8x8[index].dw0.ief_filter_type = IEF_FILTER_DETAIL;
1320 sampler_8x8[index].dw0.ief_filter_size = IEF_FILTER_SIZE_5X5;
1321 sampler_8x8[index].dw1.sampler_8x8_state_pointer = pp_context->sampler_state_table.bo_8x8_uv->offset >> 5;
1322 sampler_8x8[index].dw2.global_noise_estimation = 22;
1323 sampler_8x8[index].dw2.strong_edge_threshold = 8;
1324 sampler_8x8[index].dw2.weak_edge_threshold = 1;
1325 sampler_8x8[index].dw3.strong_edge_weight = 7;
1326 sampler_8x8[index].dw3.regular_weight = 2;
1327 sampler_8x8[index].dw3.non_edge_weight = 0;
1328 sampler_8x8[index].dw3.gain_factor = 40;
1329 sampler_8x8[index].dw4.steepness_boost = 0;
1330 sampler_8x8[index].dw4.steepness_threshold = 0;
1331 sampler_8x8[index].dw4.mr_boost = 0;
1332 sampler_8x8[index].dw4.mr_threshold = 5;
1333 sampler_8x8[index].dw5.pwl1_point_1 = 4;
1334 sampler_8x8[index].dw5.pwl1_point_2 = 12;
1335 sampler_8x8[index].dw5.pwl1_point_3 = 16;
1336 sampler_8x8[index].dw5.pwl1_point_4 = 26;
1337 sampler_8x8[index].dw6.pwl1_point_5 = 40;
1338 sampler_8x8[index].dw6.pwl1_point_6 = 160;
1339 sampler_8x8[index].dw6.pwl1_r3_bias_0 = 127;
1340 sampler_8x8[index].dw6.pwl1_r3_bias_1 = 98;
1341 sampler_8x8[index].dw7.pwl1_r3_bias_2 = 88;
1342 sampler_8x8[index].dw7.pwl1_r3_bias_3 = 64;
1343 sampler_8x8[index].dw7.pwl1_r3_bias_4 = 44;
1344 sampler_8x8[index].dw7.pwl1_r3_bias_5 = 0;
1345 sampler_8x8[index].dw8.pwl1_r3_bias_6 = 0;
1346 sampler_8x8[index].dw8.pwl1_r5_bias_0 = 3;
1347 sampler_8x8[index].dw8.pwl1_r5_bias_1 = 32;
1348 sampler_8x8[index].dw8.pwl1_r5_bias_2 = 32;
1349 sampler_8x8[index].dw9.pwl1_r5_bias_3 = 58;
1350 sampler_8x8[index].dw9.pwl1_r5_bias_4 = 100;
1351 sampler_8x8[index].dw9.pwl1_r5_bias_5 = 108;
1352 sampler_8x8[index].dw9.pwl1_r5_bias_6 = 88;
1353 sampler_8x8[index].dw10.pwl1_r3_slope_0 = -116;
1354 sampler_8x8[index].dw10.pwl1_r3_slope_1 = -20;
1355 sampler_8x8[index].dw10.pwl1_r3_slope_2 = -96;
1356 sampler_8x8[index].dw10.pwl1_r3_slope_3 = -32;
1357 sampler_8x8[index].dw11.pwl1_r3_slope_4 = -50;
1358 sampler_8x8[index].dw11.pwl1_r3_slope_5 = 0;
1359 sampler_8x8[index].dw11.pwl1_r3_slope_6 = 0;
1360 sampler_8x8[index].dw11.pwl1_r5_slope_0 = 116;
1361 sampler_8x8[index].dw12.pwl1_r5_slope_1 = 0;
1362 sampler_8x8[index].dw12.pwl1_r5_slope_2 = 114;
1363 sampler_8x8[index].dw12.pwl1_r5_slope_3 = 67;
1364 sampler_8x8[index].dw12.pwl1_r5_slope_4 = 9;
1365 sampler_8x8[index].dw13.pwl1_r5_slope_5 = -3;
1366 sampler_8x8[index].dw13.pwl1_r5_slope_6 = -15;
1367 sampler_8x8[index].dw13.limiter_boost = 0;
1368 sampler_8x8[index].dw13.minimum_limiter = 10;
1369 sampler_8x8[index].dw13.maximum_limiter = 11;
1370 sampler_8x8[index].dw14.clip_limiter = 130;
1371 dri_bo_emit_reloc(pp_context->sampler_state_table.bo,
1372 I915_GEM_DOMAIN_RENDER,
1375 sizeof(*sampler_8x8) * index + offsetof(struct i965_sampler_8x8, dw1),
1376 pp_context->sampler_state_table.bo_8x8_uv);
1378 dri_bo_unmap(pp_context->sampler_state_table.bo);
1380 /* private function & data */
1381 pp_context->pp_x_steps = pp_avs_x_steps;
1382 pp_context->pp_y_steps = pp_avs_y_steps;
1383 pp_context->pp_set_block_parameter = pp_avs_set_block_parameter;
1385 pp_avs_context->dest_x = dst_rect->x;
1386 pp_avs_context->dest_y = dst_rect->y;
1387 pp_avs_context->dest_w = ALIGN(dst_rect->width, 16);
1388 pp_avs_context->dest_h = ALIGN(dst_rect->height, 16);
1389 pp_avs_context->src_normalized_x = (float)src_rect->x / in_w / out_w;
1390 pp_avs_context->src_normalized_y = (float)src_rect->y / in_h / out_h;
1391 pp_avs_context->src_w = src_rect->width;
1392 pp_avs_context->src_h = src_rect->height;
1394 pp_static_parameter.grf4.r4_2.avs.nlas = 1;
1395 pp_static_parameter.grf1.r1_6.normalized_video_y_scaling_step = (float) src_rect->height / in_h / out_h;
1397 pp_inline_parameter.grf5.normalized_video_x_scaling_step = (float) src_rect->width / in_w / out_w;
1398 pp_inline_parameter.grf5.block_count_x = 1; /* M x 1 */
1399 pp_inline_parameter.grf5.number_blocks = pp_avs_context->dest_h / 8;
1400 pp_inline_parameter.grf5.block_vertical_mask = 0xff;
1401 pp_inline_parameter.grf5.block_horizontal_mask = 0xffff;
1402 pp_inline_parameter.grf6.video_step_delta = 0.0;
1406 pp_dndi_x_steps(void *private_context)
1412 pp_dndi_y_steps(void *private_context)
1414 struct pp_dndi_context *pp_dndi_context = private_context;
1416 return pp_dndi_context->dest_h / 4;
1420 pp_dndi_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
1422 pp_inline_parameter.grf5.destination_block_horizontal_origin = x * 16;
1423 pp_inline_parameter.grf5.destination_block_vertical_origin = y * 4;
1429 void pp_nv12_dndi_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
1430 const struct i965_surface *src_surface,
1431 const VARectangle *src_rect,
1432 const struct i965_surface *dst_surface,
1433 const VARectangle *dst_rect,
1436 struct i965_driver_data *i965 = i965_driver_data(ctx);
1437 struct pp_dndi_context *pp_dndi_context = (struct pp_dndi_context *)&pp_context->private_context;
1438 struct object_surface *obj_surface;
1439 struct i965_sampler_dndi *sampler_dndi;
1445 obj_surface = SURFACE(src_surface->id);
1446 orig_w = obj_surface->orig_width;
1447 orig_h = obj_surface->orig_height;
1448 w = obj_surface->width;
1449 h = obj_surface->height;
1451 if (pp_context->stmm.bo == NULL) {
1452 pp_context->stmm.bo = dri_bo_alloc(i965->intel.bufmgr,
1456 assert(pp_context->stmm.bo);
1459 /* source UV surface index 2 */
1460 i965_pp_set_surface_state(ctx, pp_context,
1461 obj_surface->bo, w * h,
1462 orig_w / 4, orig_h / 2, w, I965_SURFACEFORMAT_R8G8_UNORM,
1465 /* source YUV surface index 4 */
1466 i965_pp_set_surface2_state(ctx, pp_context,
1470 SURFACE_FORMAT_PLANAR_420_8, 1,
1473 /* source STMM surface index 20 */
1474 i965_pp_set_surface_state(ctx, pp_context,
1475 pp_context->stmm.bo, 0,
1476 orig_w, orig_h, w, I965_SURFACEFORMAT_R8_UNORM,
1479 /* destination surface */
1480 obj_surface = SURFACE(dst_surface->id);
1481 orig_w = obj_surface->orig_width;
1482 orig_h = obj_surface->orig_height;
1483 w = obj_surface->width;
1484 h = obj_surface->height;
1486 /* destination Y surface index 7 */
1487 i965_pp_set_surface_state(ctx, pp_context,
1489 orig_w / 4, orig_h, w, I965_SURFACEFORMAT_R8_UNORM,
1492 /* destination UV surface index 8 */
1493 i965_pp_set_surface_state(ctx, pp_context,
1494 obj_surface->bo, w * h,
1495 orig_w / 4, orig_h / 2, w, I965_SURFACEFORMAT_R8G8_UNORM,
1498 dri_bo_map(pp_context->sampler_state_table.bo, True);
1499 assert(pp_context->sampler_state_table.bo->virtual);
1500 assert(sizeof(*sampler_dndi) == sizeof(int) * 8);
1501 sampler_dndi = pp_context->sampler_state_table.bo->virtual;
1503 /* sample dndi index 1 */
1505 sampler_dndi[index].dw0.denoise_asd_threshold = 0;
1506 sampler_dndi[index].dw0.denoise_history_delta = 8; // 0-15, default is 8
1507 sampler_dndi[index].dw0.denoise_maximum_history = 128; // 128-240
1508 sampler_dndi[index].dw0.denoise_stad_threshold = 0;
1510 sampler_dndi[index].dw1.denoise_threshold_for_sum_of_complexity_measure = 64;
1511 sampler_dndi[index].dw1.denoise_moving_pixel_threshold = 0;
1512 sampler_dndi[index].dw1.stmm_c2 = 0;
1513 sampler_dndi[index].dw1.low_temporal_difference_threshold = 8;
1514 sampler_dndi[index].dw1.temporal_difference_threshold = 16;
1516 sampler_dndi[index].dw2.block_noise_estimate_noise_threshold = 15; // 0-31
1517 sampler_dndi[index].dw2.block_noise_estimate_edge_threshold = 7; // 0-15
1518 sampler_dndi[index].dw2.denoise_edge_threshold = 7; // 0-15
1519 sampler_dndi[index].dw2.good_neighbor_threshold = 7; // 0-63
1521 sampler_dndi[index].dw3.maximum_stmm = 128;
1522 sampler_dndi[index].dw3.multipler_for_vecm = 2;
1523 sampler_dndi[index].dw3.blending_constant_across_time_for_small_values_of_stmm = 0;
1524 sampler_dndi[index].dw3.blending_constant_across_time_for_large_values_of_stmm = 64;
1525 sampler_dndi[index].dw3.stmm_blending_constant_select = 0;
1527 sampler_dndi[index].dw4.sdi_delta = 8;
1528 sampler_dndi[index].dw4.sdi_threshold = 128;
1529 sampler_dndi[index].dw4.stmm_output_shift = 7; // stmm_max - stmm_min = 2 ^ stmm_output_shift
1530 sampler_dndi[index].dw4.stmm_shift_up = 0;
1531 sampler_dndi[index].dw4.stmm_shift_down = 0;
1532 sampler_dndi[index].dw4.minimum_stmm = 0;
1534 sampler_dndi[index].dw5.fmd_temporal_difference_threshold = 0;
1535 sampler_dndi[index].dw5.sdi_fallback_mode_2_constant = 0;
1536 sampler_dndi[index].dw5.sdi_fallback_mode_1_t2_constant = 0;
1537 sampler_dndi[index].dw5.sdi_fallback_mode_1_t1_constant = 0;
1539 sampler_dndi[index].dw6.dn_enable = 1;
1540 sampler_dndi[index].dw6.di_enable = 1;
1541 sampler_dndi[index].dw6.di_partial = 0;
1542 sampler_dndi[index].dw6.dndi_top_first = 1;
1543 sampler_dndi[index].dw6.dndi_stream_id = 1;
1544 sampler_dndi[index].dw6.dndi_first_frame = 1;
1545 sampler_dndi[index].dw6.progressive_dn = 0;
1546 sampler_dndi[index].dw6.fmd_tear_threshold = 32;
1547 sampler_dndi[index].dw6.fmd2_vertical_difference_threshold = 32;
1548 sampler_dndi[index].dw6.fmd1_vertical_difference_threshold = 32;
1550 sampler_dndi[index].dw7.fmd_for_1st_field_of_current_frame = 2;
1551 sampler_dndi[index].dw7.fmd_for_2nd_field_of_previous_frame = 1;
1552 sampler_dndi[index].dw7.vdi_walker_enable = 0;
1553 sampler_dndi[index].dw7.column_width_minus1 = w / 16;
1555 dri_bo_unmap(pp_context->sampler_state_table.bo);
1557 /* private function & data */
1558 pp_context->pp_x_steps = pp_dndi_x_steps;
1559 pp_context->pp_y_steps = pp_dndi_y_steps;
1560 pp_context->pp_set_block_parameter = pp_dndi_set_block_parameter;
1562 pp_static_parameter.grf1.statistics_surface_picth = w / 2;
1563 pp_static_parameter.grf1.r1_6.di.top_field_first = 0;
1564 pp_static_parameter.grf4.r4_2.di.motion_history_coefficient_m2 = 64;
1565 pp_static_parameter.grf4.r4_2.di.motion_history_coefficient_m1 = 192;
1567 pp_inline_parameter.grf5.block_count_x = w / 16; /* 1 x N */
1568 pp_inline_parameter.grf5.number_blocks = w / 16;
1569 pp_inline_parameter.grf5.block_vertical_mask = 0xff;
1570 pp_inline_parameter.grf5.block_horizontal_mask = 0xffff;
1572 pp_dndi_context->dest_w = w;
1573 pp_dndi_context->dest_h = h;
1577 ironlake_pp_initialize(
1578 VADriverContextP ctx,
1579 struct i965_post_processing_context *pp_context,
1580 const struct i965_surface *src_surface,
1581 const VARectangle *src_rect,
1582 const struct i965_surface *dst_surface,
1583 const VARectangle *dst_rect,
1588 struct i965_driver_data *i965 = i965_driver_data(ctx);
1589 struct pp_module *pp_module;
1593 dri_bo_unreference(pp_context->curbe.bo);
1594 bo = dri_bo_alloc(i965->intel.bufmgr,
1599 pp_context->curbe.bo = bo;
1601 dri_bo_unreference(pp_context->binding_table.bo);
1602 bo = dri_bo_alloc(i965->intel.bufmgr,
1604 sizeof(unsigned int),
1607 pp_context->binding_table.bo = bo;
1609 dri_bo_unreference(pp_context->idrt.bo);
1610 bo = dri_bo_alloc(i965->intel.bufmgr,
1611 "interface discriptor",
1612 sizeof(struct i965_interface_descriptor),
1615 pp_context->idrt.bo = bo;
1616 pp_context->idrt.num_interface_descriptors = 0;
1618 dri_bo_unreference(pp_context->sampler_state_table.bo);
1619 bo = dri_bo_alloc(i965->intel.bufmgr,
1620 "sampler state table",
1624 dri_bo_map(bo, True);
1625 memset(bo->virtual, 0, bo->size);
1627 pp_context->sampler_state_table.bo = bo;
1629 dri_bo_unreference(pp_context->sampler_state_table.bo_8x8);
1630 bo = dri_bo_alloc(i965->intel.bufmgr,
1631 "sampler 8x8 state ",
1635 pp_context->sampler_state_table.bo_8x8 = bo;
1637 dri_bo_unreference(pp_context->sampler_state_table.bo_8x8_uv);
1638 bo = dri_bo_alloc(i965->intel.bufmgr,
1639 "sampler 8x8 state ",
1643 pp_context->sampler_state_table.bo_8x8_uv = bo;
1645 dri_bo_unreference(pp_context->vfe_state.bo);
1646 bo = dri_bo_alloc(i965->intel.bufmgr,
1648 sizeof(struct i965_vfe_state),
1651 pp_context->vfe_state.bo = bo;
1653 for (i = 0; i < MAX_PP_SURFACES; i++) {
1654 dri_bo_unreference(pp_context->surfaces[i].ss_bo);
1655 pp_context->surfaces[i].ss_bo = NULL;
1657 dri_bo_unreference(pp_context->surfaces[i].s_bo);
1658 pp_context->surfaces[i].s_bo = NULL;
1661 memset(&pp_static_parameter, 0, sizeof(pp_static_parameter));
1662 memset(&pp_inline_parameter, 0, sizeof(pp_inline_parameter));
1663 assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
1664 pp_context->current_pp = pp_index;
1665 pp_module = &pp_context->pp_modules[pp_index];
1667 if (pp_module->initialize)
1668 pp_module->initialize(ctx, pp_context,
1677 ironlake_post_processing(
1678 VADriverContextP ctx,
1679 struct i965_post_processing_context *pp_context,
1680 const struct i965_surface *src_surface,
1681 const VARectangle *src_rect,
1682 const struct i965_surface *dst_surface,
1683 const VARectangle *dst_rect,
1688 ironlake_pp_initialize(ctx, pp_context, src_surface, src_rect, dst_surface, dst_rect, pp_index, filter_param);
1689 ironlake_pp_states_setup(ctx, pp_context);
1690 ironlake_pp_pipeline_setup(ctx, pp_context);
1695 VADriverContextP ctx,
1696 struct i965_post_processing_context *pp_context,
1697 const struct i965_surface *src_surface,
1698 const VARectangle *src_rect,
1699 const struct i965_surface *dst_surface,
1700 const VARectangle *dst_rect,
1705 struct i965_driver_data *i965 = i965_driver_data(ctx);
1706 struct pp_module *pp_module;
1710 dri_bo_unreference(pp_context->curbe.bo);
1711 bo = dri_bo_alloc(i965->intel.bufmgr,
1716 pp_context->curbe.bo = bo;
1718 dri_bo_unreference(pp_context->binding_table.bo);
1719 bo = dri_bo_alloc(i965->intel.bufmgr,
1721 sizeof(unsigned int),
1724 pp_context->binding_table.bo = bo;
1726 dri_bo_unreference(pp_context->idrt.bo);
1727 bo = dri_bo_alloc(i965->intel.bufmgr,
1728 "interface discriptor",
1729 sizeof(struct gen6_interface_descriptor_data),
1732 pp_context->idrt.bo = bo;
1733 pp_context->idrt.num_interface_descriptors = 0;
1735 dri_bo_unreference(pp_context->sampler_state_table.bo);
1736 bo = dri_bo_alloc(i965->intel.bufmgr,
1737 "sampler state table",
1741 dri_bo_map(bo, True);
1742 memset(bo->virtual, 0, bo->size);
1744 pp_context->sampler_state_table.bo = bo;
1746 dri_bo_unreference(pp_context->sampler_state_table.bo_8x8);
1747 bo = dri_bo_alloc(i965->intel.bufmgr,
1748 "sampler 8x8 state ",
1752 pp_context->sampler_state_table.bo_8x8 = bo;
1754 dri_bo_unreference(pp_context->sampler_state_table.bo_8x8_uv);
1755 bo = dri_bo_alloc(i965->intel.bufmgr,
1756 "sampler 8x8 state ",
1760 pp_context->sampler_state_table.bo_8x8_uv = bo;
1762 dri_bo_unreference(pp_context->vfe_state.bo);
1763 bo = dri_bo_alloc(i965->intel.bufmgr,
1765 sizeof(struct i965_vfe_state),
1768 pp_context->vfe_state.bo = bo;
1770 for (i = 0; i < MAX_PP_SURFACES; i++) {
1771 dri_bo_unreference(pp_context->surfaces[i].ss_bo);
1772 pp_context->surfaces[i].ss_bo = NULL;
1774 dri_bo_unreference(pp_context->surfaces[i].s_bo);
1775 pp_context->surfaces[i].s_bo = NULL;
1778 memset(&pp_static_parameter, 0, sizeof(pp_static_parameter));
1779 memset(&pp_inline_parameter, 0, sizeof(pp_inline_parameter));
1780 assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
1781 pp_context->current_pp = pp_index;
1782 pp_module = &pp_context->pp_modules[pp_index];
1784 if (pp_module->initialize)
1785 pp_module->initialize(ctx, pp_context,
1794 gen6_pp_binding_table(struct i965_post_processing_context *pp_context)
1796 unsigned int *binding_table;
1797 dri_bo *bo = pp_context->binding_table.bo;
1801 assert(bo->virtual);
1802 binding_table = bo->virtual;
1803 memset(binding_table, 0, bo->size);
1805 for (i = 0; i < MAX_PP_SURFACES; i++) {
1806 if (pp_context->surfaces[i].ss_bo) {
1807 assert(pp_context->surfaces[i].s_bo);
1809 binding_table[i] = pp_context->surfaces[i].ss_bo->offset;
1810 dri_bo_emit_reloc(bo,
1811 I915_GEM_DOMAIN_INSTRUCTION, 0,
1813 i * sizeof(*binding_table),
1814 pp_context->surfaces[i].ss_bo);
1823 gen6_pp_interface_descriptor_table(struct i965_post_processing_context *pp_context)
1825 struct gen6_interface_descriptor_data *desc;
1827 int pp_index = pp_context->current_pp;
1829 bo = pp_context->idrt.bo;
1830 dri_bo_map(bo, True);
1831 assert(bo->virtual);
1833 memset(desc, 0, sizeof(*desc));
1834 desc->desc0.kernel_start_pointer =
1835 pp_context->pp_modules[pp_index].kernel.bo->offset >> 6; /* reloc */
1836 desc->desc1.single_program_flow = 1;
1837 desc->desc1.floating_point_mode = FLOATING_POINT_IEEE_754;
1838 desc->desc2.sampler_count = 1; /* 1 - 4 samplers used */
1839 desc->desc2.sampler_state_pointer =
1840 pp_context->sampler_state_table.bo->offset >> 5;
1841 desc->desc3.binding_table_entry_count = 0;
1842 desc->desc3.binding_table_pointer =
1843 pp_context->binding_table.bo->offset >> 5; /*reloc */
1844 desc->desc4.constant_urb_entry_read_offset = 0;
1845 desc->desc4.constant_urb_entry_read_length = 4; /* grf 1-4 */
1847 dri_bo_emit_reloc(bo,
1848 I915_GEM_DOMAIN_INSTRUCTION, 0,
1850 offsetof(struct gen6_interface_descriptor_data, desc0),
1851 pp_context->pp_modules[pp_index].kernel.bo);
1853 dri_bo_emit_reloc(bo,
1854 I915_GEM_DOMAIN_INSTRUCTION, 0,
1855 desc->desc2.sampler_count << 2,
1856 offsetof(struct gen6_interface_descriptor_data, desc2),
1857 pp_context->sampler_state_table.bo);
1859 dri_bo_emit_reloc(bo,
1860 I915_GEM_DOMAIN_INSTRUCTION, 0,
1861 desc->desc3.binding_table_entry_count,
1862 offsetof(struct gen6_interface_descriptor_data, desc3),
1863 pp_context->binding_table.bo);
1866 pp_context->idrt.num_interface_descriptors++;
1870 gen6_pp_upload_constants(struct i965_post_processing_context *pp_context)
1872 unsigned char *constant_buffer;
1874 assert(sizeof(pp_static_parameter) == 128);
1875 dri_bo_map(pp_context->curbe.bo, 1);
1876 assert(pp_context->curbe.bo->virtual);
1877 constant_buffer = pp_context->curbe.bo->virtual;
1878 memcpy(constant_buffer, &pp_static_parameter, sizeof(pp_static_parameter));
1879 dri_bo_unmap(pp_context->curbe.bo);
1883 gen6_pp_states_setup(VADriverContextP ctx,
1884 struct i965_post_processing_context *pp_context)
1886 gen6_pp_binding_table(pp_context);
1887 gen6_pp_interface_descriptor_table(pp_context);
1888 gen6_pp_upload_constants(pp_context);
1892 gen6_pp_pipeline_select(VADriverContextP ctx,
1893 struct i965_post_processing_context *pp_context)
1895 struct intel_batchbuffer *batch = pp_context->batch;
1897 BEGIN_BATCH(batch, 1);
1898 OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
1899 ADVANCE_BATCH(batch);
1903 gen6_pp_state_base_address(VADriverContextP ctx,
1904 struct i965_post_processing_context *pp_context)
1906 struct intel_batchbuffer *batch = pp_context->batch;
1908 BEGIN_BATCH(batch, 10);
1909 OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (10 - 2));
1910 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1911 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1912 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1913 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1914 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1915 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1916 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1917 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1918 OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1919 ADVANCE_BATCH(batch);
1923 gen6_pp_vfe_state(VADriverContextP ctx,
1924 struct i965_post_processing_context *pp_context)
1926 struct intel_batchbuffer *batch = pp_context->batch;
1928 BEGIN_BATCH(batch, 8);
1929 OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (8 - 2));
1930 OUT_BATCH(batch, 0);
1932 (pp_context->urb.num_vfe_entries - 1) << 16 |
1933 pp_context->urb.num_vfe_entries << 8);
1934 OUT_BATCH(batch, 0);
1936 (pp_context->urb.size_vfe_entry * 2) << 16 | /* in 256 bits unit */
1937 (pp_context->urb.size_cs_entry * pp_context->urb.num_cs_entries * 2 - 1)); /* in 256 bits unit */
1938 OUT_BATCH(batch, 0);
1939 OUT_BATCH(batch, 0);
1940 OUT_BATCH(batch, 0);
1941 ADVANCE_BATCH(batch);
1945 gen6_pp_curbe_load(VADriverContextP ctx,
1946 struct i965_post_processing_context *pp_context)
1948 struct intel_batchbuffer *batch = pp_context->batch;
1950 assert(pp_context->urb.size_cs_entry * pp_context->urb.num_cs_entries * 512 <= pp_context->curbe.bo->size);
1952 BEGIN_BATCH(batch, 4);
1953 OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
1954 OUT_BATCH(batch, 0);
1956 pp_context->urb.size_cs_entry * pp_context->urb.num_cs_entries * 512);
1958 pp_context->curbe.bo,
1959 I915_GEM_DOMAIN_INSTRUCTION, 0,
1961 ADVANCE_BATCH(batch);
1965 gen6_interface_descriptor_load(VADriverContextP ctx,
1966 struct i965_post_processing_context *pp_context)
1968 struct intel_batchbuffer *batch = pp_context->batch;
1970 BEGIN_BATCH(batch, 4);
1971 OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
1972 OUT_BATCH(batch, 0);
1974 pp_context->idrt.num_interface_descriptors * sizeof(struct gen6_interface_descriptor_data));
1976 pp_context->idrt.bo,
1977 I915_GEM_DOMAIN_INSTRUCTION, 0,
1979 ADVANCE_BATCH(batch);
1983 gen6_pp_object_walker(VADriverContextP ctx,
1984 struct i965_post_processing_context *pp_context)
1986 struct intel_batchbuffer *batch = pp_context->batch;
1987 int x, x_steps, y, y_steps;
1989 x_steps = pp_context->pp_x_steps(&pp_context->private_context);
1990 y_steps = pp_context->pp_y_steps(&pp_context->private_context);
1992 for (y = 0; y < y_steps; y++) {
1993 for (x = 0; x < x_steps; x++) {
1994 if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
1995 BEGIN_BATCH(batch, 22);
1996 OUT_BATCH(batch, CMD_MEDIA_OBJECT | 20);
1997 OUT_BATCH(batch, 0);
1998 OUT_BATCH(batch, 0); /* no indirect data */
1999 OUT_BATCH(batch, 0);
2000 OUT_BATCH(batch, 0); /* scoreboard */
2001 OUT_BATCH(batch, 0);
2003 /* inline data grf 5-6 */
2004 assert(sizeof(pp_inline_parameter) == 64);
2005 intel_batchbuffer_data(batch, &pp_inline_parameter, sizeof(pp_inline_parameter));
2007 ADVANCE_BATCH(batch);
2014 gen6_pp_pipeline_setup(VADriverContextP ctx,
2015 struct i965_post_processing_context *pp_context)
2017 struct intel_batchbuffer *batch = pp_context->batch;
2019 intel_batchbuffer_start_atomic(batch, 0x1000);
2020 intel_batchbuffer_emit_mi_flush(batch);
2021 gen6_pp_pipeline_select(ctx, pp_context);
2022 gen6_pp_curbe_load(ctx, pp_context);
2023 gen6_interface_descriptor_load(ctx, pp_context);
2024 gen6_pp_state_base_address(ctx, pp_context);
2025 gen6_pp_vfe_state(ctx, pp_context);
2026 gen6_pp_object_walker(ctx, pp_context);
2027 intel_batchbuffer_end_atomic(batch);
2031 gen6_post_processing(
2032 VADriverContextP ctx,
2033 struct i965_post_processing_context *pp_context,
2034 const struct i965_surface *src_surface,
2035 const VARectangle *src_rect,
2036 const struct i965_surface *dst_surface,
2037 const VARectangle *dst_rect,
2042 gen6_pp_initialize(ctx, pp_context,
2049 gen6_pp_states_setup(ctx, pp_context);
2050 gen6_pp_pipeline_setup(ctx, pp_context);
2054 i965_post_processing_internal(
2055 VADriverContextP ctx,
2056 struct i965_post_processing_context *pp_context,
2057 const struct i965_surface *src_surface,
2058 const VARectangle *src_rect,
2059 const struct i965_surface *dst_surface,
2060 const VARectangle *dst_rect,
2065 struct i965_driver_data *i965 = i965_driver_data(ctx);
2067 if (IS_GEN6(i965->intel.device_id) ||
2068 IS_GEN7(i965->intel.device_id))
2069 gen6_post_processing(ctx, pp_context, src_surface, src_rect, dst_surface, dst_rect, pp_index, filter_param);
2071 ironlake_post_processing(ctx, pp_context, src_surface, src_rect, dst_surface, dst_rect, pp_index, filter_param);
2075 i965_DestroySurfaces(VADriverContextP ctx,
2076 VASurfaceID *surface_list,
2079 i965_CreateSurfaces(VADriverContextP ctx,
2084 VASurfaceID *surfaces);
2086 i965_post_processing(
2087 VADriverContextP ctx,
2088 VASurfaceID surface,
2089 const VARectangle *src_rect,
2090 const VARectangle *dst_rect,
2092 int *has_done_scaling
2095 struct i965_driver_data *i965 = i965_driver_data(ctx);
2096 VASurfaceID in_surface_id = surface;
2097 VASurfaceID out_surface_id = VA_INVALID_ID;
2099 *has_done_scaling = 0;
2102 struct object_surface *obj_surface;
2104 struct i965_surface src_surface;
2105 struct i965_surface dst_surface;
2107 obj_surface = SURFACE(in_surface_id);
2109 /* Currently only support post processing for NV12 surface */
2110 if (obj_surface->fourcc != VA_FOURCC('N', 'V', '1', '2'))
2111 return out_surface_id;
2113 if (flags & I965_PP_FLAG_DEINTERLACING) {
2114 status = i965_CreateSurfaces(ctx,
2115 obj_surface->orig_width,
2116 obj_surface->orig_height,
2117 VA_RT_FORMAT_YUV420,
2120 assert(status == VA_STATUS_SUCCESS);
2121 obj_surface = SURFACE(out_surface_id);
2122 i965_check_alloc_surface_bo(ctx, obj_surface, 0, VA_FOURCC('N','V','1','2'));
2124 src_surface.id = in_surface_id;
2125 src_surface.flag = I965_SURFACE_SURFACE;
2126 dst_surface.id = out_surface_id;
2127 dst_surface.flag = I965_SURFACE_SURFACE;
2129 i965_post_processing_internal(ctx, i965->pp_context,
2138 if (flags & I965_PP_FLAG_AVS) {
2139 struct i965_render_state *render_state = &i965->render_state;
2140 struct intel_region *dest_region = render_state->draw_region;
2142 if (out_surface_id != VA_INVALID_ID)
2143 in_surface_id = out_surface_id;
2145 status = i965_CreateSurfaces(ctx,
2147 dest_region->height,
2148 VA_RT_FORMAT_YUV420,
2151 assert(status == VA_STATUS_SUCCESS);
2152 obj_surface = SURFACE(out_surface_id);
2153 i965_check_alloc_surface_bo(ctx, obj_surface, 0, VA_FOURCC('N','V','1','2'));
2155 src_surface.id = in_surface_id;
2156 src_surface.flag = I965_SURFACE_SURFACE;
2157 dst_surface.id = out_surface_id;
2158 dst_surface.flag = I965_SURFACE_SURFACE;
2160 i965_post_processing_internal(ctx, i965->pp_context,
2168 if (in_surface_id != surface)
2169 i965_DestroySurfaces(ctx, &in_surface_id, 1);
2171 *has_done_scaling = 1;
2175 return out_surface_id;
2179 i965_image_i420_processing(VADriverContextP ctx,
2180 const struct i965_surface *src_surface,
2181 const VARectangle *src_rect,
2182 const struct i965_surface *dst_surface,
2183 const VARectangle *dst_rect)
2185 struct i965_driver_data *i965 = i965_driver_data(ctx);
2186 struct i965_post_processing_context *pp_context = i965->pp_context;
2187 int fourcc = pp_get_surface_fourcc(ctx, dst_surface);
2189 if (fourcc == VA_FOURCC('N', 'V', '1', '2')) {
2190 i965_post_processing_internal(ctx, i965->pp_context,
2195 PP_PL3_LOAD_SAVE_N12,
2198 i965_post_processing_internal(ctx, i965->pp_context,
2203 PP_PL3_LOAD_SAVE_PL3,
2207 intel_batchbuffer_flush(pp_context->batch);
2209 return VA_STATUS_SUCCESS;
2213 i965_image_nv12_processing(VADriverContextP ctx,
2214 const struct i965_surface *src_surface,
2215 const VARectangle *src_rect,
2216 const struct i965_surface *dst_surface,
2217 const VARectangle *dst_rect)
2219 struct i965_driver_data *i965 = i965_driver_data(ctx);
2220 struct i965_post_processing_context *pp_context = i965->pp_context;
2221 int fourcc = pp_get_surface_fourcc(ctx, dst_surface);
2223 if (fourcc == VA_FOURCC('N', 'V', '1', '2')) {
2224 i965_post_processing_internal(ctx, i965->pp_context,
2229 PP_NV12_LOAD_SAVE_N12,
2232 i965_post_processing_internal(ctx, i965->pp_context,
2237 PP_NV12_LOAD_SAVE_PL3,
2241 intel_batchbuffer_flush(pp_context->batch);
2243 return VA_STATUS_SUCCESS;
2247 i965_image_processing(VADriverContextP ctx,
2248 const struct i965_surface *src_surface,
2249 const VARectangle *src_rect,
2250 const struct i965_surface *dst_surface,
2251 const VARectangle *dst_rect)
2253 struct i965_driver_data *i965 = i965_driver_data(ctx);
2254 VAStatus status = VA_STATUS_ERROR_UNIMPLEMENTED;
2257 int fourcc = pp_get_surface_fourcc(ctx, src_surface);
2260 case VA_FOURCC('Y', 'V', '1', '2'):
2261 case VA_FOURCC('I', '4', '2', '0'):
2262 status = i965_image_i420_processing(ctx,
2269 case VA_FOURCC('N', 'V', '1', '2'):
2270 status = i965_image_nv12_processing(ctx,
2278 status = VA_STATUS_ERROR_UNIMPLEMENTED;
2287 i965_post_processing_context_finalize(struct i965_post_processing_context *pp_context)
2291 dri_bo_unreference(pp_context->curbe.bo);
2292 pp_context->curbe.bo = NULL;
2294 for (i = 0; i < MAX_PP_SURFACES; i++) {
2295 dri_bo_unreference(pp_context->surfaces[i].ss_bo);
2296 pp_context->surfaces[i].ss_bo = NULL;
2298 dri_bo_unreference(pp_context->surfaces[i].s_bo);
2299 pp_context->surfaces[i].s_bo = NULL;
2302 dri_bo_unreference(pp_context->sampler_state_table.bo);
2303 pp_context->sampler_state_table.bo = NULL;
2305 dri_bo_unreference(pp_context->sampler_state_table.bo_8x8);
2306 pp_context->sampler_state_table.bo_8x8 = NULL;
2308 dri_bo_unreference(pp_context->sampler_state_table.bo_8x8_uv);
2309 pp_context->sampler_state_table.bo_8x8_uv = NULL;
2311 dri_bo_unreference(pp_context->binding_table.bo);
2312 pp_context->binding_table.bo = NULL;
2314 dri_bo_unreference(pp_context->idrt.bo);
2315 pp_context->idrt.bo = NULL;
2316 pp_context->idrt.num_interface_descriptors = 0;
2318 dri_bo_unreference(pp_context->vfe_state.bo);
2319 pp_context->vfe_state.bo = NULL;
2321 dri_bo_unreference(pp_context->stmm.bo);
2322 pp_context->stmm.bo = NULL;
2324 for (i = 0; i < NUM_PP_MODULES; i++) {
2325 struct pp_module *pp_module = &pp_context->pp_modules[i];
2327 dri_bo_unreference(pp_module->kernel.bo);
2328 pp_module->kernel.bo = NULL;
2334 i965_post_processing_terminate(VADriverContextP ctx)
2336 struct i965_driver_data *i965 = i965_driver_data(ctx);
2337 struct i965_post_processing_context *pp_context = i965->pp_context;
2340 i965_post_processing_context_finalize(pp_context);
2344 i965->pp_context = NULL;
2350 i965_post_processing_context_init(VADriverContextP ctx,
2351 struct i965_post_processing_context *pp_context,
2352 struct intel_batchbuffer *batch)
2354 struct i965_driver_data *i965 = i965_driver_data(ctx);
2357 pp_context->urb.size = URB_SIZE((&i965->intel));
2358 pp_context->urb.num_vfe_entries = 32;
2359 pp_context->urb.size_vfe_entry = 1; /* in 512 bits unit */
2360 pp_context->urb.num_cs_entries = 1;
2361 pp_context->urb.size_cs_entry = 2; /* in 512 bits unit */
2362 pp_context->urb.vfe_start = 0;
2363 pp_context->urb.cs_start = pp_context->urb.vfe_start +
2364 pp_context->urb.num_vfe_entries * pp_context->urb.size_vfe_entry;
2365 assert(pp_context->urb.cs_start +
2366 pp_context->urb.num_cs_entries * pp_context->urb.size_cs_entry <= URB_SIZE((&i965->intel)));
2368 assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen5));
2369 assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen6));
2371 if (IS_GEN6(i965->intel.device_id) ||
2372 IS_GEN7(i965->intel.device_id))
2373 memcpy(pp_context->pp_modules, pp_modules_gen6, sizeof(pp_context->pp_modules));
2374 else if (IS_IRONLAKE(i965->intel.device_id))
2375 memcpy(pp_context->pp_modules, pp_modules_gen5, sizeof(pp_context->pp_modules));
2377 for (i = 0; i < NUM_PP_MODULES; i++) {
2378 struct pp_module *pp_module = &pp_context->pp_modules[i];
2379 dri_bo_unreference(pp_module->kernel.bo);
2380 if (pp_module->kernel.bin) {
2381 pp_module->kernel.bo = dri_bo_alloc(i965->intel.bufmgr,
2382 pp_module->kernel.name,
2383 pp_module->kernel.size,
2385 assert(pp_module->kernel.bo);
2386 dri_bo_subdata(pp_module->kernel.bo, 0, pp_module->kernel.size, pp_module->kernel.bin);
2388 pp_module->kernel.bo = NULL;
2392 pp_context->batch = batch;
2396 i965_post_processing_init(VADriverContextP ctx)
2398 struct i965_driver_data *i965 = i965_driver_data(ctx);
2399 struct i965_post_processing_context *pp_context = i965->pp_context;
2402 if (pp_context == NULL) {
2403 pp_context = calloc(1, sizeof(*pp_context));
2404 i965_post_processing_context_init(ctx, pp_context, i965->batch);
2405 i965->pp_context = pp_context;
2412 static const int procfilter_to_pp_flag[10] = {
2413 PP_NULL, /* VAProcFilterNone */
2414 PP_NULL, /* VAProcFilterDering */
2415 PP_NULL, /* VAProcFilterDeblocking */
2416 PP_NV12_DNDI, /* VAProcFilterNoiseReduction */
2417 PP_NV12_DNDI, /* VAProcFilterDeinterlacing */
2418 PP_NULL, /* VAProcFilterSharpening */
2419 PP_NULL, /* VAProcFilterColorEnhancement */
2420 PP_NULL, /* VAProcFilterProcAmp */
2421 PP_NULL, /* VAProcFilterComposition */
2422 PP_NULL, /* VAProcFilterFrameRateConversion */
2426 i965_proc_picture(VADriverContextP ctx,
2428 union codec_state *codec_state,
2429 struct hw_context *hw_context)
2431 struct i965_driver_data *i965 = i965_driver_data(ctx);
2432 struct i965_proc_context *proc_context = (struct i965_proc_context *)hw_context;
2433 struct proc_state *proc_state = &codec_state->proc;
2434 VAProcPipelineParameterBuffer *pipeline_param = (VAProcPipelineParameterBuffer *)proc_state->pipeline_param->buffer;
2435 VAProcInputParameterBuffer *input_param = (VAProcInputParameterBuffer *)proc_state->input_param->buffer;
2436 struct object_surface *obj_surface;
2437 struct i965_surface src_surface, dst_surface;
2440 VASurfaceID tmp_surfaces[VA_PROC_PIPELINE_MAX_NUM_FILTERS];
2441 int num_tmp_surfaces = 0;
2443 assert(input_param->surface != VA_INVALID_ID);
2444 assert(proc_state->current_render_target != VA_INVALID_ID);
2446 obj_surface = SURFACE(proc_state->current_render_target);
2447 i965_check_alloc_surface_bo(ctx, obj_surface, 0, VA_FOURCC('N','V','1','2'));
2449 obj_surface = SURFACE(input_param->surface);
2450 assert(obj_surface->fourcc == VA_FOURCC('N', 'V', '1', '2'));
2452 src_surface.id = input_param->surface;
2453 src_surface.flag = I965_SURFACE_SURFACE;
2455 for (i = 0; i < VA_PROC_PIPELINE_MAX_NUM_FILTERS; i++) {
2456 VAProcFilterType filter_type = pipeline_param->filter_pipeline[i];
2457 VASurfaceID out_surface_id = VA_INVALID_ID;
2458 void *filter_param = NULL;
2460 if (procfilter_to_pp_flag[filter_type] != PP_NULL) {
2461 if (proc_state->filter_param[filter_type])
2462 filter_param = proc_state->filter_param[filter_type]->buffer;
2464 status = i965_CreateSurfaces(ctx,
2465 obj_surface->orig_width,
2466 obj_surface->orig_height,
2467 VA_RT_FORMAT_YUV420,
2470 assert(status == VA_STATUS_SUCCESS);
2471 tmp_surfaces[num_tmp_surfaces++] = out_surface_id;
2472 obj_surface = SURFACE(out_surface_id);
2473 i965_check_alloc_surface_bo(ctx, obj_surface, 0, VA_FOURCC('N','V','1','2'));
2474 dst_surface.id = out_surface_id;
2475 dst_surface.flag = I965_SURFACE_SURFACE;
2476 i965_post_processing_internal(ctx, &proc_context->pp_context,
2478 &input_param->region,
2480 &input_param->region,
2481 procfilter_to_pp_flag[filter_type],
2483 src_surface.id = dst_surface.id;
2487 dst_surface.id = proc_state->current_render_target;
2488 dst_surface.flag = I965_SURFACE_SURFACE;
2489 i965_post_processing_internal(ctx, &proc_context->pp_context,
2491 &input_param->region,
2493 &pipeline_param->output_region,
2497 if (num_tmp_surfaces)
2498 i965_DestroySurfaces(ctx,
2502 intel_batchbuffer_flush(hw_context->batch);
2506 i965_proc_context_destroy(void *hw_context)
2508 struct i965_proc_context *proc_context = (struct i965_proc_context *)hw_context;
2510 i965_post_processing_context_finalize(&proc_context->pp_context);
2511 intel_batchbuffer_free(proc_context->base.batch);
2516 i965_proc_context_init(VADriverContextP ctx, VAProfile profile)
2518 struct intel_driver_data *intel = intel_driver_data(ctx);
2519 struct i965_proc_context *proc_context = calloc(1, sizeof(struct i965_proc_context));
2521 proc_context->base.destroy = i965_proc_context_destroy;
2522 proc_context->base.run = i965_proc_picture;
2523 proc_context->base.batch = intel_batchbuffer_new(intel, I915_EXEC_RENDER);
2524 i965_post_processing_context_init(ctx, &proc_context->pp_context, proc_context->base.batch);
2526 return (struct hw_context *)proc_context;