VPP: Simplify surface state setting for csc and scaling on IVB/HSW/BDW
[platform/upstream/libva-intel-driver.git] / src / gen8_post_processing.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sub license, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19  * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Xiang Haihao <haihao.xiang@intel.com>
26  *    Zhao Yakui <yakui.zhao@intel.com>
27  *
28  */
29
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <assert.h>
34
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "i965_drv_video.h"
40 #include "i965_post_processing.h"
41 #include "i965_render.h"
42 #include "intel_media.h"
43
44 #define SURFACE_STATE_PADDED_SIZE               SURFACE_STATE_PADDED_SIZE_GEN8
45
46 #define SURFACE_STATE_OFFSET(index)             (SURFACE_STATE_PADDED_SIZE * index)
47 #define BINDING_TABLE_OFFSET                    SURFACE_STATE_OFFSET(MAX_PP_SURFACES)
48
49 #define GPU_ASM_BLOCK_WIDTH         16
50 #define GPU_ASM_BLOCK_HEIGHT        8
51 #define GPU_ASM_X_OFFSET_ALIGNMENT  4
52
53 #define VA_STATUS_SUCCESS_1                     0xFFFFFFFE
54
55 static VAStatus pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
56                                    const struct i965_surface *src_surface,
57                                    const VARectangle *src_rect,
58                                    struct i965_surface *dst_surface,
59                                    const VARectangle *dst_rect,
60                                    void *filter_param);
61
62 static VAStatus gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
63                                            const struct i965_surface *src_surface,
64                                            const VARectangle *src_rect,
65                                            struct i965_surface *dst_surface,
66                                            const VARectangle *dst_rect,
67                                            void *filter_param);
68
69 /* TODO: Modify the shader and then compile it again.
70  * Currently it is derived from Haswell*/
71 static const uint32_t pp_null_gen8[][4] = {
72 };
73
74 static const uint32_t pp_nv12_load_save_nv12_gen8[][4] = {
75 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
76 };
77
78 static const uint32_t pp_nv12_load_save_pl3_gen8[][4] = {
79 #include "shaders/post_processing/gen8/pl2_to_pl3.g8b"
80 };
81
82 static const uint32_t pp_pl3_load_save_nv12_gen8[][4] = {
83 #include "shaders/post_processing/gen8/pl3_to_pl2.g8b"
84 };
85
86 static const uint32_t pp_pl3_load_save_pl3_gen8[][4] = {
87 #include "shaders/post_processing/gen8/pl3_to_pl3.g8b"
88 };
89
90 static const uint32_t pp_nv12_scaling_gen8[][4] = {
91 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
92 };
93
94 static const uint32_t pp_nv12_avs_gen8[][4] = {
95 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
96 };
97
98 static const uint32_t pp_nv12_dndi_gen8[][4] = {
99 // #include "shaders/post_processing/gen7/dndi.g75b"
100 };
101
102 static const uint32_t pp_nv12_dn_gen8[][4] = {
103 // #include "shaders/post_processing/gen7/nv12_dn_nv12.g75b"
104 };
105 static const uint32_t pp_nv12_load_save_pa_gen8[][4] = {
106 #include "shaders/post_processing/gen8/pl2_to_pa.g8b"
107 };
108 static const uint32_t pp_pl3_load_save_pa_gen8[][4] = {
109 #include "shaders/post_processing/gen8/pl3_to_pa.g8b"
110 };
111 static const uint32_t pp_pa_load_save_nv12_gen8[][4] = {
112 #include "shaders/post_processing/gen8/pa_to_pl2.g8b"
113 };
114 static const uint32_t pp_pa_load_save_pl3_gen8[][4] = {
115 #include "shaders/post_processing/gen8/pa_to_pl3.g8b"
116 };
117 static const uint32_t pp_pa_load_save_pa_gen8[][4] = {
118 #include "shaders/post_processing/gen8/pa_to_pa.g8b"
119 };
120 static const uint32_t pp_rgbx_load_save_nv12_gen8[][4] = {
121 #include "shaders/post_processing/gen8/rgbx_to_nv12.g8b"
122 };
123 static const uint32_t pp_nv12_load_save_rgbx_gen8[][4] = {
124 #include "shaders/post_processing/gen8/pl2_to_rgbx.g8b"
125 };
126
127 static struct pp_module pp_modules_gen8[] = {
128     {
129         {
130             "NULL module (for testing)",
131             PP_NULL,
132             pp_null_gen8,
133             sizeof(pp_null_gen8),
134             NULL,
135         },
136
137         pp_null_initialize,
138     },
139
140     {
141         {
142             "NV12_NV12",
143             PP_NV12_LOAD_SAVE_N12,
144             pp_nv12_load_save_nv12_gen8,
145             sizeof(pp_nv12_load_save_nv12_gen8),
146             NULL,
147         },
148
149         gen8_pp_plx_avs_initialize,
150     },
151
152     {
153         {
154             "NV12_PL3",
155             PP_NV12_LOAD_SAVE_PL3,
156             pp_nv12_load_save_pl3_gen8,
157             sizeof(pp_nv12_load_save_pl3_gen8),
158             NULL,
159         },
160         gen8_pp_plx_avs_initialize,
161     },
162
163     {
164         {
165             "PL3_NV12",
166             PP_PL3_LOAD_SAVE_N12,
167             pp_pl3_load_save_nv12_gen8,
168             sizeof(pp_pl3_load_save_nv12_gen8),
169             NULL,
170         },
171
172         gen8_pp_plx_avs_initialize,
173     },
174
175     {
176         {
177             "PL3_PL3",
178             PP_PL3_LOAD_SAVE_N12,
179             pp_pl3_load_save_pl3_gen8,
180             sizeof(pp_pl3_load_save_pl3_gen8),
181             NULL,
182         },
183
184         gen8_pp_plx_avs_initialize,
185     },
186
187     {
188         {
189             "NV12 Scaling module",
190             PP_NV12_SCALING,
191             pp_nv12_scaling_gen8,
192             sizeof(pp_nv12_scaling_gen8),
193             NULL,
194         },
195
196         gen8_pp_plx_avs_initialize,
197     },
198
199     {
200         {
201             "NV12 AVS module",
202             PP_NV12_AVS,
203             pp_nv12_avs_gen8,
204             sizeof(pp_nv12_avs_gen8),
205             NULL,
206         },
207
208         gen8_pp_plx_avs_initialize,
209     },
210
211     {
212         {
213             "NV12 DNDI module",
214             PP_NV12_DNDI,
215             pp_nv12_dndi_gen8,
216             sizeof(pp_nv12_dndi_gen8),
217             NULL,
218         },
219
220         pp_null_initialize,
221     },
222
223     {
224         {
225             "NV12 DN module",
226             PP_NV12_DN,
227             pp_nv12_dn_gen8,
228             sizeof(pp_nv12_dn_gen8),
229             NULL,
230         },
231
232         pp_null_initialize,
233     },
234     {
235         {
236             "NV12_PA module",
237             PP_NV12_LOAD_SAVE_PA,
238             pp_nv12_load_save_pa_gen8,
239             sizeof(pp_nv12_load_save_pa_gen8),
240             NULL,
241         },
242
243         gen8_pp_plx_avs_initialize,
244     },
245
246     {
247         {
248             "PL3_PA module",
249             PP_PL3_LOAD_SAVE_PA,
250             pp_pl3_load_save_pa_gen8,
251             sizeof(pp_pl3_load_save_pa_gen8),
252             NULL,
253         },
254
255         gen8_pp_plx_avs_initialize,
256     },
257
258     {
259         {
260             "PA_NV12 module",
261             PP_PA_LOAD_SAVE_NV12,
262             pp_pa_load_save_nv12_gen8,
263             sizeof(pp_pa_load_save_nv12_gen8),
264             NULL,
265         },
266
267         gen8_pp_plx_avs_initialize,
268     },
269
270     {
271         {
272             "PA_PL3 module",
273             PP_PA_LOAD_SAVE_PL3,
274             pp_pa_load_save_pl3_gen8,
275             sizeof(pp_pa_load_save_pl3_gen8),
276             NULL,
277         },
278
279         gen8_pp_plx_avs_initialize,
280     },
281
282     {
283         {
284             "PA_PA module",
285             PP_PA_LOAD_SAVE_PA,
286             pp_pa_load_save_pa_gen8,
287             sizeof(pp_pa_load_save_pa_gen8),
288             NULL,
289         },
290
291         gen8_pp_plx_avs_initialize,
292     },
293
294     {
295         {
296             "RGBX_NV12 module",
297             PP_RGBX_LOAD_SAVE_NV12,
298             pp_rgbx_load_save_nv12_gen8,
299             sizeof(pp_rgbx_load_save_nv12_gen8),
300             NULL,
301         },
302
303         gen8_pp_plx_avs_initialize,
304     },
305
306     {
307         {
308             "NV12_RGBX module",
309             PP_NV12_LOAD_SAVE_RGBX,
310             pp_nv12_load_save_rgbx_gen8,
311             sizeof(pp_nv12_load_save_rgbx_gen8),
312             NULL,
313         },
314
315         gen8_pp_plx_avs_initialize,
316     },
317 };
318
319 static int
320 pp_get_surface_fourcc(VADriverContextP ctx, const struct i965_surface *surface)
321 {
322     int fourcc;
323
324     if (surface->type == I965_SURFACE_TYPE_IMAGE) {
325         struct object_image *obj_image = (struct object_image *)surface->base;
326         fourcc = obj_image->image.format.fourcc;
327     } else {
328         struct object_surface *obj_surface = (struct object_surface *)surface->base;
329         fourcc = obj_surface->fourcc;
330     }
331
332     return fourcc;
333 }
334
335 static void
336 gen8_pp_set_surface_tiling(struct gen8_surface_state *ss, unsigned int tiling)
337 {
338     switch (tiling) {
339     case I915_TILING_NONE:
340         ss->ss0.tiled_surface = 0;
341         ss->ss0.tile_walk = 0;
342         break;
343     case I915_TILING_X:
344         ss->ss0.tiled_surface = 1;
345         ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
346         break;
347     case I915_TILING_Y:
348         ss->ss0.tiled_surface = 1;
349         ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
350         break;
351     }
352 }
353
354 static void
355 gen8_pp_set_surface2_tiling(struct gen8_surface_state2 *ss, unsigned int tiling)
356 {
357     switch (tiling) {
358     case I915_TILING_NONE:
359         ss->ss2.tiled_surface = 0;
360         ss->ss2.tile_walk = 0;
361         break;
362     case I915_TILING_X:
363         ss->ss2.tiled_surface = 1;
364         ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
365         break;
366     case I915_TILING_Y:
367         ss->ss2.tiled_surface = 1;
368         ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
369         break;
370     }
371 }
372
373
374 static void
375 gen8_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
376                           dri_bo *surf_bo, unsigned long surf_bo_offset,
377                           int width, int height, int pitch, int format,
378                           int index, int is_target)
379 {
380     struct gen8_surface_state *ss;
381     dri_bo *ss_bo;
382     unsigned int tiling;
383     unsigned int swizzle;
384
385     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
386     ss_bo = pp_context->surface_state_binding_table.bo;
387     assert(ss_bo);
388
389     dri_bo_map(ss_bo, True);
390     assert(ss_bo->virtual);
391     ss = (struct gen8_surface_state *)((char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index));
392     memset(ss, 0, sizeof(*ss));
393     ss->ss0.surface_type = I965_SURFACE_2D;
394     ss->ss0.surface_format = format;
395     ss->ss8.base_addr = surf_bo->offset + surf_bo_offset;
396     ss->ss2.width = width - 1;
397     ss->ss2.height = height - 1;
398     ss->ss3.pitch = pitch - 1;
399
400     /* Always set 1(align 4 mode) per B-spec */
401     ss->ss0.vertical_alignment = 1;
402     ss->ss0.horizontal_alignment = 1;
403
404     gen8_pp_set_surface_tiling(ss, tiling);
405     gen8_render_set_surface_scs(ss);
406     dri_bo_emit_reloc(ss_bo,
407                       I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
408                       surf_bo_offset,
409                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state, ss8),
410                       surf_bo);
411     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
412     dri_bo_unmap(ss_bo);
413 }
414
415
416 static void
417 gen8_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
418                            dri_bo *surf_bo, unsigned long surf_bo_offset,
419                            int width, int height, int wpitch,
420                            int xoffset, int yoffset,
421                            int format, int interleave_chroma,
422                            int index)
423 {
424     struct gen8_surface_state2 *ss2;
425     dri_bo *ss2_bo;
426     unsigned int tiling;
427     unsigned int swizzle;
428
429     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
430     ss2_bo = pp_context->surface_state_binding_table.bo;
431     assert(ss2_bo);
432
433     dri_bo_map(ss2_bo, True);
434     assert(ss2_bo->virtual);
435     ss2 = (struct gen8_surface_state2 *)((char *)ss2_bo->virtual + SURFACE_STATE_OFFSET(index));
436     memset(ss2, 0, sizeof(*ss2));
437     ss2->ss6.base_addr = surf_bo->offset + surf_bo_offset;
438     ss2->ss1.cbcr_pixel_offset_v_direction = 0;
439     ss2->ss1.width = width - 1;
440     ss2->ss1.height = height - 1;
441     ss2->ss2.pitch = wpitch - 1;
442     ss2->ss2.interleave_chroma = interleave_chroma;
443     ss2->ss2.surface_format = format;
444     ss2->ss3.x_offset_for_cb = xoffset;
445     ss2->ss3.y_offset_for_cb = yoffset;
446     gen8_pp_set_surface2_tiling(ss2, tiling);
447     dri_bo_emit_reloc(ss2_bo,
448                       I915_GEM_DOMAIN_RENDER, 0,
449                       surf_bo_offset,
450                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state2, ss6),
451                       surf_bo);
452     ((unsigned int *)((char *)ss2_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
453     dri_bo_unmap(ss2_bo);
454 }
455
456 static void
457 gen8_pp_set_media_rw_message_surface(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
458                                      const struct i965_surface *surface,
459                                      int base_index, int is_target,
460                                      int *width, int *height, int *pitch, int *offset)
461 {
462     struct object_surface *obj_surface;
463     struct object_image *obj_image;
464     dri_bo *bo;
465     int fourcc = pp_get_surface_fourcc(ctx, surface);
466     const i965_fourcc_info *fourcc_info = get_fourcc_info(fourcc);
467
468     if (fourcc_info == NULL)
469         return;
470
471     if (surface->type == I965_SURFACE_TYPE_SURFACE) {
472         obj_surface = (struct object_surface *)surface->base;
473         bo = obj_surface->bo;
474         width[0] = obj_surface->orig_width;
475         height[0] = obj_surface->orig_height;
476         pitch[0] = obj_surface->width;
477         offset[0] = 0;
478
479         if (fourcc_info->num_planes == 1 && is_target)
480             width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
481
482         width[1] = obj_surface->cb_cr_width;
483         height[1] = obj_surface->cb_cr_height;
484         pitch[1] = obj_surface->cb_cr_pitch;
485         offset[1] = obj_surface->y_cb_offset * obj_surface->width;
486
487         width[2] = obj_surface->cb_cr_width;
488         height[2] = obj_surface->cb_cr_height;
489         pitch[2] = obj_surface->cb_cr_pitch;
490         offset[2] = obj_surface->y_cr_offset * obj_surface->width;
491     } else {
492         int U = 0, V = 0;
493
494         /* FIXME: add support for ARGB/ABGR image */
495         obj_image = (struct object_image *)surface->base;
496         bo = obj_image->bo;
497         width[0] = obj_image->image.width;
498         height[0] = obj_image->image.height;
499         pitch[0] = obj_image->image.pitches[0];
500         offset[0] = obj_image->image.offsets[0];
501
502         if (fourcc_info->num_planes == 1) {
503             if (is_target)
504                 width[0] = width[0] * (fourcc_info->bpp[0] / 8); /* surface format is R8 */
505         } else if (fourcc_info->num_planes == 2) {
506             U = 1, V = 1;
507         } else {
508             assert(fourcc_info->num_components == 3);
509
510             U = fourcc_info->components[1].plane;
511             V = fourcc_info->components[2].plane;
512             assert((U == 1 && V == 2) ||
513                    (U == 2 && V == 1));
514         }
515
516         /* Always set width/height although they aren't used for fourcc_info->num_planes == 1 */
517         width[1] = obj_image->image.width / fourcc_info->hfactor;
518         height[1] = obj_image->image.height / fourcc_info->vfactor;
519         pitch[1] = obj_image->image.pitches[U];
520         offset[1] = obj_image->image.offsets[U];
521
522         width[2] = obj_image->image.width / fourcc_info->hfactor;
523         height[2] = obj_image->image.height / fourcc_info->vfactor;
524         pitch[2] = obj_image->image.pitches[V];
525         offset[2] = obj_image->image.offsets[V];
526     }
527
528     if (is_target) {
529         gen8_pp_set_surface_state(ctx, pp_context,
530                                   bo, 0,
531                                   width[0] / 4, height[0], pitch[0],
532                                   I965_SURFACEFORMAT_R8_UINT,
533                                   base_index, 1);
534
535         if (fourcc_info->num_planes == 2) {
536             gen8_pp_set_surface_state(ctx, pp_context,
537                                       bo, offset[1],
538                                       width[1] / 2, height[1], pitch[1],
539                                       I965_SURFACEFORMAT_R8G8_SINT,
540                                       base_index + 1, 1);
541         } else if (fourcc_info->num_planes == 3) {
542             gen8_pp_set_surface_state(ctx, pp_context,
543                                       bo, offset[1],
544                                       width[1] / 4, height[1], pitch[1],
545                                       I965_SURFACEFORMAT_R8_SINT,
546                                       base_index + 1, 1);
547             gen8_pp_set_surface_state(ctx, pp_context,
548                                       bo, offset[2],
549                                       width[2] / 4, height[2], pitch[2],
550                                       I965_SURFACEFORMAT_R8_SINT,
551                                       base_index + 2, 1);
552         }
553
554         if (fourcc_info->format == I965_COLOR_RGB) {
555             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
556             /* the format is MSB: X-B-G-R */
557             pp_static_parameter->grf2.save_avs_rgb_swap = 0;
558             if ((fourcc == VA_FOURCC_BGRA) ||
559                 (fourcc == VA_FOURCC_BGRX)) {
560                 /* It is stored as MSB: X-R-G-B */
561                 pp_static_parameter->grf2.save_avs_rgb_swap = 1;
562             }
563         }
564     } else {
565         int format0 = SURFACE_FORMAT_Y8_UNORM;
566
567         switch (fourcc) {
568         case VA_FOURCC_YUY2:
569             format0 = SURFACE_FORMAT_YCRCB_NORMAL;
570             break;
571
572         case VA_FOURCC_UYVY:
573             format0 = SURFACE_FORMAT_YCRCB_SWAPY;
574             break;
575
576         default:
577             break;
578         }
579
580         if (fourcc_info->format == I965_COLOR_RGB) {
581             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
582             /* Only R8G8B8A8_UNORM is supported for BGRX or RGBX */
583             format0 = SURFACE_FORMAT_R8G8B8A8_UNORM;
584             pp_static_parameter->grf2.src_avs_rgb_swap = 0;
585             if ((fourcc == VA_FOURCC_BGRA) ||
586                 (fourcc == VA_FOURCC_BGRX)) {
587                 pp_static_parameter->grf2.src_avs_rgb_swap = 1;
588             }
589         }
590
591         gen8_pp_set_surface2_state(ctx, pp_context,
592                                    bo, offset[0],
593                                    width[0], height[0], pitch[0],
594                                    0, 0,
595                                    format0, 0,
596                                    base_index);
597
598         if (fourcc_info->num_planes == 2) {
599             gen8_pp_set_surface2_state(ctx, pp_context,
600                                        bo, offset[1],
601                                        width[1], height[1], pitch[1],
602                                        0, 0,
603                                        SURFACE_FORMAT_R8B8_UNORM, 0,
604                                        base_index + 1);
605         } else if (fourcc_info->num_planes == 3) {
606             gen8_pp_set_surface2_state(ctx, pp_context,
607                                        bo, offset[1],
608                                        width[1], height[1], pitch[1],
609                                        0, 0,
610                                        SURFACE_FORMAT_R8_UNORM, 0,
611                                        base_index + 1);
612             gen8_pp_set_surface2_state(ctx, pp_context,
613                                        bo, offset[2],
614                                        width[2], height[2], pitch[2],
615                                        0, 0,
616                                        SURFACE_FORMAT_R8_UNORM, 0,
617                                        base_index + 2);
618         }
619     }
620 }
621
622 static int
623 pp_null_x_steps(void *private_context)
624 {
625     return 1;
626 }
627
628 static int
629 pp_null_y_steps(void *private_context)
630 {
631     return 1;
632 }
633
634 static int
635 pp_null_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
636 {
637     return 0;
638 }
639
640 static VAStatus
641 pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
642                    const struct i965_surface *src_surface,
643                    const VARectangle *src_rect,
644                    struct i965_surface *dst_surface,
645                    const VARectangle *dst_rect,
646                    void *filter_param)
647 {
648     /* private function & data */
649     pp_context->pp_x_steps = pp_null_x_steps;
650     pp_context->pp_y_steps = pp_null_y_steps;
651     pp_context->private_context = NULL;
652     pp_context->pp_set_block_parameter = pp_null_set_block_parameter;
653
654     dst_surface->flags = src_surface->flags;
655
656     return VA_STATUS_SUCCESS;
657 }
658
659 static void calculate_boundary_block_mask(struct i965_post_processing_context *pp_context, const VARectangle *dst_rect)
660 {
661     int i, dst_width_adjust;
662     /* x offset of dest surface must be dword aligned.
663      * so we have to extend dst surface on left edge, and mask out pixels not interested
664      */
665     if (dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT) {
666         pp_context->block_horizontal_mask_left = 0;
667         for (i=dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT; i<GPU_ASM_BLOCK_WIDTH; i++)
668         {
669             pp_context->block_horizontal_mask_left |= 1<<i;
670         }
671     }
672     else {
673         pp_context->block_horizontal_mask_left = 0xffff;
674     }
675
676     dst_width_adjust = dst_rect->width + dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT;
677     if (dst_width_adjust%GPU_ASM_BLOCK_WIDTH){
678         pp_context->block_horizontal_mask_right = (1 << (dst_width_adjust%GPU_ASM_BLOCK_WIDTH)) - 1;
679     }
680     else {
681         pp_context->block_horizontal_mask_right = 0xffff;
682     }
683
684     if (dst_rect->height%GPU_ASM_BLOCK_HEIGHT){
685         pp_context->block_vertical_mask_bottom = (1 << (dst_rect->height%GPU_ASM_BLOCK_HEIGHT)) - 1;
686     }
687     else {
688         pp_context->block_vertical_mask_bottom = 0xff;
689     }
690
691 }
692
693 static int
694 gen7_pp_avs_x_steps(void *private_context)
695 {
696     struct pp_avs_context *pp_avs_context = private_context;
697
698     return pp_avs_context->dest_w / 16;
699 }
700
701 static int
702 gen7_pp_avs_y_steps(void *private_context)
703 {
704     struct pp_avs_context *pp_avs_context = private_context;
705
706     return pp_avs_context->dest_h / 16;
707 }
708
709 static int
710 gen7_pp_avs_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
711 {
712     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)pp_context->private_context;
713     struct gen7_pp_inline_parameter *pp_inline_parameter = pp_context->pp_inline_parameter;
714
715     pp_inline_parameter->grf7.destination_block_horizontal_origin = x * 16 + pp_avs_context->dest_x;
716     pp_inline_parameter->grf7.destination_block_vertical_origin = y * 16 + pp_avs_context->dest_y;
717     pp_inline_parameter->grf7.constant_0 = 0xffffffff;
718     pp_inline_parameter->grf7.sampler_load_main_video_x_scaling_step = pp_avs_context->horiz_range / pp_avs_context->src_w;
719
720     return 0;
721 }
722
723 static void gen7_update_src_surface_uv_offset(VADriverContextP    ctx,
724                                               struct i965_post_processing_context *pp_context,
725                                               const struct i965_surface *surface)
726 {
727     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
728     int fourcc = pp_get_surface_fourcc(ctx, surface);
729
730     if (fourcc == VA_FOURCC_YUY2) {
731         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 0;
732         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 1;
733         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 3;
734     } else if (fourcc == VA_FOURCC_UYVY) {
735         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 1;
736         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 0;
737         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 2;
738     }
739 }
740
741 static VAStatus
742 gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
743                            const struct i965_surface *src_surface,
744                            const VARectangle *src_rect,
745                            struct i965_surface *dst_surface,
746                            const VARectangle *dst_rect,
747                            void *filter_param)
748 {
749 /* TODO: Add the sampler_8x8 state */
750     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->pp_avs_context;
751     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
752     struct gen8_sampler_8x8_avs *sampler_8x8;
753     struct i965_sampler_8x8_coefficient *sampler_8x8_state;
754     int i;
755     int width[3], height[3], pitch[3], offset[3];
756     int src_width, src_height;
757     unsigned char *cc_ptr;
758
759     memset(pp_static_parameter, 0, sizeof(struct gen7_pp_static_parameter));
760
761     /* source surface */
762     gen8_pp_set_media_rw_message_surface(ctx, pp_context, src_surface, 0, 0,
763                                          width, height, pitch, offset);
764     src_height = height[0];
765     src_width  = width[0];
766
767     /* destination surface */
768     gen8_pp_set_media_rw_message_surface(ctx, pp_context, dst_surface, 24, 1,
769                                          width, height, pitch, offset);
770
771     /* sampler 8x8 state */
772     dri_bo_map(pp_context->dynamic_state.bo, True);
773     assert(pp_context->dynamic_state.bo->virtual);
774
775     cc_ptr = (unsigned char *) pp_context->dynamic_state.bo->virtual +
776                         pp_context->sampler_offset;
777     /* Currently only one gen8 sampler_8x8 is initialized */
778     sampler_8x8 = (struct gen8_sampler_8x8_avs *) cc_ptr;
779     memset(sampler_8x8, 0, sizeof(*sampler_8x8));
780
781     sampler_8x8->dw0.gain_factor = 44;
782     sampler_8x8->dw0.weak_edge_threshold = 1;
783     sampler_8x8->dw0.strong_edge_threshold = 8;
784     /* Use the value like that on Ivy instead of default
785      * sampler_8x8->dw0.r3x_coefficient = 5;
786      */
787     sampler_8x8->dw0.r3x_coefficient = 27;
788     sampler_8x8->dw0.r3c_coefficient = 5;
789
790     sampler_8x8->dw2.global_noise_estimation = 255;
791     sampler_8x8->dw2.non_edge_weight = 1;
792     sampler_8x8->dw2.regular_weight = 2;
793     sampler_8x8->dw2.strong_edge_weight = 7;
794     /* Use the value like that on Ivy instead of default
795      * sampler_8x8->dw2.r5x_coefficient = 7;
796      * sampler_8x8->dw2.r5cx_coefficient = 7;
797      * sampler_8x8->dw2.r5c_coefficient = 7;
798      */
799     sampler_8x8->dw2.r5x_coefficient = 9;
800     sampler_8x8->dw2.r5cx_coefficient = 8;
801     sampler_8x8->dw2.r5c_coefficient = 3;
802
803     sampler_8x8->dw3.sin_alpha = 101; /* sin_alpha = 0 */
804     sampler_8x8->dw3.cos_alpha = 79; /* cos_alpha = 0 */
805     sampler_8x8->dw3.sat_max = 0x1f;
806     sampler_8x8->dw3.hue_max = 14;
807     /* The 8tap filter will determine whether the adaptive Filter is
808      * applied for all channels(dw153).
809      * If the 8tap filter is disabled, the adaptive filter should be disabled.
810      * Only when 8tap filter is enabled, it can be enabled or not.
811      */
812     sampler_8x8->dw3.enable_8tap_filter = 3;
813     sampler_8x8->dw3.ief4_smooth_enable = 0;
814
815     sampler_8x8->dw4.s3u = 0;
816     sampler_8x8->dw4.diamond_margin = 4;
817     sampler_8x8->dw4.vy_std_enable = 0;
818     sampler_8x8->dw4.umid = 110;
819     sampler_8x8->dw4.vmid = 154;
820
821     sampler_8x8->dw5.diamond_dv = 0;
822     sampler_8x8->dw5.diamond_th = 35;
823     sampler_8x8->dw5.diamond_alpha = 100; /* diamond_alpha = 0 */
824     sampler_8x8->dw5.hs_margin = 3;
825     sampler_8x8->dw5.diamond_du = 2;
826
827     sampler_8x8->dw6.y_point1 = 46;
828     sampler_8x8->dw6.y_point2 = 47;
829     sampler_8x8->dw6.y_point3 = 254;
830     sampler_8x8->dw6.y_point4 = 255;
831
832     sampler_8x8->dw7.inv_margin_vyl = 3300; /* inv_margin_vyl = 0 */
833
834     sampler_8x8->dw8.inv_margin_vyu = 1600; /* inv_margin_vyu = 0 */
835     sampler_8x8->dw8.p0l = 46;
836     sampler_8x8->dw8.p1l = 216;
837
838     sampler_8x8->dw9.p2l = 236;
839     sampler_8x8->dw9.p3l = 236;
840     sampler_8x8->dw9.b0l = 133;
841     sampler_8x8->dw9.b1l = 130;
842
843     sampler_8x8->dw10.b2l = 130;
844     sampler_8x8->dw10.b3l = 130;
845     /* s0l = -5 / 256. s2.8 */
846     sampler_8x8->dw10.s0l = 1029;    /* s0l = 0 */
847     sampler_8x8->dw10.y_slope2 = 31; /* y_slop2 = 0 */
848
849     sampler_8x8->dw11.s1l = 0;
850     sampler_8x8->dw11.s2l = 0;
851
852     sampler_8x8->dw12.s3l = 0;
853     sampler_8x8->dw12.p0u = 46;
854     sampler_8x8->dw12.p1u = 66;
855     sampler_8x8->dw12.y_slope1 = 31; /* y_slope1 = 0 */
856
857     sampler_8x8->dw13.p2u = 130;
858     sampler_8x8->dw13.p3u = 236;
859     sampler_8x8->dw13.b0u = 143;
860     sampler_8x8->dw13.b1u = 163;
861
862     sampler_8x8->dw14.b2u = 200;
863     sampler_8x8->dw14.b3u = 140;
864     sampler_8x8->dw14.s0u = 256;  /* s0u = 0 */
865
866     sampler_8x8->dw15.s1u = 113; /* s1u = 0 */
867     sampler_8x8->dw15.s2u = 1203; /* s2u = 0 */
868
869     sampler_8x8_state = sampler_8x8->coefficients;
870
871     for (i = 0; i < 17; i++) {
872         float coff;
873         coff = i;
874         coff = coff / 16;
875
876         memset(sampler_8x8_state, 0, sizeof(*sampler_8x8_state));
877         /* for Y channel, currently ignore */
878         sampler_8x8_state->dw0.table_0x_filter_c0 = 0x0;
879         sampler_8x8_state->dw0.table_0x_filter_c1 = 0x0;
880         sampler_8x8_state->dw0.table_0x_filter_c2 = 0x0;
881         sampler_8x8_state->dw0.table_0x_filter_c3 =
882                                 intel_format_convert(1 - coff, 1, 6, 0);
883         sampler_8x8_state->dw1.table_0x_filter_c4 =
884                                 intel_format_convert(coff, 1, 6, 0);
885         sampler_8x8_state->dw1.table_0x_filter_c5 = 0x0;
886         sampler_8x8_state->dw1.table_0x_filter_c6 = 0x0;
887         sampler_8x8_state->dw1.table_0x_filter_c7 = 0x0;
888         sampler_8x8_state->dw2.table_0y_filter_c0 = 0x0;
889         sampler_8x8_state->dw2.table_0y_filter_c1 = 0x0;
890         sampler_8x8_state->dw2.table_0y_filter_c2 = 0x0;
891         sampler_8x8_state->dw2.table_0y_filter_c3 =
892                                 intel_format_convert(1 - coff, 1, 6, 0);
893         sampler_8x8_state->dw3.table_0y_filter_c4 =
894                                 intel_format_convert(coff, 1, 6, 0);
895         sampler_8x8_state->dw3.table_0y_filter_c5 = 0x0;
896         sampler_8x8_state->dw3.table_0y_filter_c6 = 0x0;
897         sampler_8x8_state->dw3.table_0y_filter_c7 = 0x0;
898         /* for U/V channel, 0.25 */
899         sampler_8x8_state->dw4.table_1x_filter_c0 = 0x0;
900         sampler_8x8_state->dw4.table_1x_filter_c1 = 0x0;
901         sampler_8x8_state->dw4.table_1x_filter_c2 = 0x0;
902         sampler_8x8_state->dw4.table_1x_filter_c3 =
903                                 intel_format_convert(1 - coff, 1, 6, 0);
904         sampler_8x8_state->dw5.table_1x_filter_c4 =
905                                 intel_format_convert(coff, 1, 6, 0);
906         sampler_8x8_state->dw5.table_1x_filter_c5 = 0x00;
907         sampler_8x8_state->dw5.table_1x_filter_c6 = 0x0;
908         sampler_8x8_state->dw5.table_1x_filter_c7 = 0x0;
909         sampler_8x8_state->dw6.table_1y_filter_c0 = 0x0;
910         sampler_8x8_state->dw6.table_1y_filter_c1 = 0x0;
911         sampler_8x8_state->dw6.table_1y_filter_c2 = 0x0;
912         sampler_8x8_state->dw6.table_1y_filter_c3 =
913                                 intel_format_convert(1 - coff, 1, 6, 0);
914         sampler_8x8_state->dw7.table_1y_filter_c4 =
915                                 intel_format_convert(coff, 1, 6,0);
916         sampler_8x8_state->dw7.table_1y_filter_c5 = 0x0;
917         sampler_8x8_state->dw7.table_1y_filter_c6 = 0x0;
918         sampler_8x8_state->dw7.table_1y_filter_c7 = 0x0;
919         sampler_8x8_state++;
920     }
921
922     sampler_8x8->dw152.default_sharpness_level = 0;
923     sampler_8x8->dw153.adaptive_filter_for_all_channel = 1;
924     sampler_8x8->dw153.bypass_y_adaptive_filtering = 1;
925     sampler_8x8->dw153.bypass_x_adaptive_filtering = 1;
926
927     dri_bo_unmap(pp_context->dynamic_state.bo);
928
929
930     /* private function & data */
931     pp_context->pp_x_steps = gen7_pp_avs_x_steps;
932     pp_context->pp_y_steps = gen7_pp_avs_y_steps;
933     pp_context->private_context = &pp_context->pp_avs_context;
934     pp_context->pp_set_block_parameter = gen7_pp_avs_set_block_parameter;
935
936     pp_avs_context->dest_x = dst_rect->x;
937     pp_avs_context->dest_y = dst_rect->y;
938     pp_avs_context->dest_w = ALIGN(dst_rect->width, 16);
939     pp_avs_context->dest_h = ALIGN(dst_rect->height, 16);
940     pp_avs_context->src_w = src_rect->width;
941     pp_avs_context->src_h = src_rect->height;
942     pp_avs_context->horiz_range = (float)src_rect->width / src_width;
943
944     int dw = (pp_avs_context->src_w - 1) / 16 + 1;
945     dw = MAX(dw, dst_rect->width);
946
947     pp_static_parameter->grf1.pointer_to_inline_parameter = 7;
948     pp_static_parameter->grf2.avs_wa_enable = 0; /* It is not required on GEN8+ */
949     pp_static_parameter->grf2.avs_wa_width = src_width;
950     pp_static_parameter->grf2.avs_wa_one_div_256_width = (float) 1.0 / (256 * src_width);
951     pp_static_parameter->grf2.avs_wa_five_div_256_width = (float) 5.0 / (256 * src_width);
952     pp_static_parameter->grf2.alpha = 255;
953
954     pp_static_parameter->grf3.sampler_load_horizontal_scaling_step_ratio = (float) pp_avs_context->src_w / dw;
955     pp_static_parameter->grf4.sampler_load_vertical_scaling_step = (float) src_rect->height / src_height / dst_rect->height;
956     pp_static_parameter->grf5.sampler_load_vertical_frame_origin = (float) src_rect->y / src_height -
957         (float) pp_avs_context->dest_y * pp_static_parameter->grf4.sampler_load_vertical_scaling_step;
958     pp_static_parameter->grf6.sampler_load_horizontal_frame_origin = (float) src_rect->x / src_width -
959         (float) pp_avs_context->dest_x * pp_avs_context->horiz_range / dw;
960
961     gen7_update_src_surface_uv_offset(ctx, pp_context, dst_surface);
962
963     dst_surface->flags = src_surface->flags;
964
965     return VA_STATUS_SUCCESS;
966 }
967
968 static VAStatus
969 gen8_pp_initialize(
970     VADriverContextP   ctx,
971     struct i965_post_processing_context *pp_context,
972     const struct i965_surface *src_surface,
973     const VARectangle *src_rect,
974     struct i965_surface *dst_surface,
975     const VARectangle *dst_rect,
976     int                pp_index,
977     void * filter_param
978 )
979 {
980     VAStatus va_status;
981     struct i965_driver_data *i965 = i965_driver_data(ctx);
982     dri_bo *bo;
983     int bo_size;
984     unsigned int end_offset;
985     struct pp_module *pp_module;
986     int static_param_size, inline_param_size;
987
988     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
989     bo = dri_bo_alloc(i965->intel.bufmgr,
990                       "surface state & binding table",
991                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_PP_SURFACES,
992                       4096);
993     assert(bo);
994     pp_context->surface_state_binding_table.bo = bo;
995
996     pp_context->idrt.num_interface_descriptors = 0;
997
998     pp_context->sampler_size = 2 * 4096;
999
1000     bo_size = 4096 + pp_context->curbe_size + pp_context->sampler_size
1001                 + pp_context->idrt_size;
1002
1003     dri_bo_unreference(pp_context->dynamic_state.bo);
1004     bo = dri_bo_alloc(i965->intel.bufmgr,
1005                       "dynamic_state",
1006                       bo_size,
1007                       4096);
1008
1009     assert(bo);
1010     pp_context->dynamic_state.bo = bo;
1011     pp_context->dynamic_state.bo_size = bo_size;
1012
1013     end_offset = 0;
1014     pp_context->dynamic_state.end_offset = 0;
1015
1016     /* Constant buffer offset */
1017     pp_context->curbe_offset = ALIGN(end_offset, 64);
1018     end_offset = pp_context->curbe_offset + pp_context->curbe_size;
1019
1020     /* Interface descriptor offset */
1021     pp_context->idrt_offset = ALIGN(end_offset, 64);
1022     end_offset = pp_context->idrt_offset + pp_context->idrt_size;
1023
1024     /* Sampler state offset */
1025     pp_context->sampler_offset = ALIGN(end_offset, 64);
1026     end_offset = pp_context->sampler_offset + pp_context->sampler_size;
1027
1028     /* update the end offset of dynamic_state */
1029     pp_context->dynamic_state.end_offset = ALIGN(end_offset, 64);
1030
1031     static_param_size = sizeof(struct gen7_pp_static_parameter);
1032     inline_param_size = sizeof(struct gen7_pp_inline_parameter);
1033
1034     memset(pp_context->pp_static_parameter, 0, static_param_size);
1035     memset(pp_context->pp_inline_parameter, 0, inline_param_size);
1036
1037     assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
1038     pp_context->current_pp = pp_index;
1039     pp_module = &pp_context->pp_modules[pp_index];
1040
1041     if (pp_module->initialize)
1042         va_status = pp_module->initialize(ctx, pp_context,
1043                                           src_surface,
1044                                           src_rect,
1045                                           dst_surface,
1046                                           dst_rect,
1047                                           filter_param);
1048     else
1049         va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
1050
1051     calculate_boundary_block_mask(pp_context, dst_rect);
1052
1053     return va_status;
1054 }
1055
1056 static void
1057 gen8_pp_interface_descriptor_table(VADriverContextP   ctx,
1058                                    struct i965_post_processing_context *pp_context)
1059 {
1060     struct gen8_interface_descriptor_data *desc;
1061     dri_bo *bo;
1062     int pp_index = pp_context->current_pp;
1063     unsigned char *cc_ptr;
1064
1065     bo = pp_context->dynamic_state.bo;
1066
1067     dri_bo_map(bo, 1);
1068     assert(bo->virtual);
1069     cc_ptr = (unsigned char *)bo->virtual + pp_context->idrt_offset;
1070
1071     desc = (struct gen8_interface_descriptor_data *) cc_ptr +
1072                 pp_context->idrt.num_interface_descriptors;
1073
1074     memset(desc, 0, sizeof(*desc));
1075     desc->desc0.kernel_start_pointer =
1076                 pp_context->pp_modules[pp_index].kernel.kernel_offset >> 6; /* reloc */
1077     desc->desc2.single_program_flow = 1;
1078     desc->desc2.floating_point_mode = FLOATING_POINT_IEEE_754;
1079     desc->desc3.sampler_count = 0;      /* 1 - 4 samplers used */
1080     desc->desc3.sampler_state_pointer = pp_context->sampler_offset >> 5;
1081     desc->desc4.binding_table_entry_count = 0;
1082     desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET >> 5);
1083     desc->desc5.constant_urb_entry_read_offset = 0;
1084
1085     desc->desc5.constant_urb_entry_read_length = 6; /* grf 1-6 */
1086
1087     dri_bo_unmap(bo);
1088     pp_context->idrt.num_interface_descriptors++;
1089 }
1090
1091
1092 static void
1093 gen8_pp_upload_constants(VADriverContextP ctx,
1094                          struct i965_post_processing_context *pp_context)
1095 {
1096     unsigned char *constant_buffer;
1097     int param_size;
1098
1099     assert(sizeof(struct gen7_pp_static_parameter) == 192);
1100
1101     param_size = sizeof(struct gen7_pp_static_parameter);
1102
1103     dri_bo_map(pp_context->dynamic_state.bo, 1);
1104     assert(pp_context->dynamic_state.bo->virtual);
1105     constant_buffer = (unsigned char *) pp_context->dynamic_state.bo->virtual +
1106                         pp_context->curbe_offset;
1107
1108     memcpy(constant_buffer, pp_context->pp_static_parameter, param_size);
1109     dri_bo_unmap(pp_context->dynamic_state.bo);
1110     return;
1111 }
1112
1113 static void
1114 gen8_pp_states_setup(VADriverContextP ctx,
1115                      struct i965_post_processing_context *pp_context)
1116 {
1117     gen8_pp_interface_descriptor_table(ctx, pp_context);
1118     gen8_pp_upload_constants(ctx, pp_context);
1119 }
1120
1121 static void
1122 gen6_pp_pipeline_select(VADriverContextP ctx,
1123                         struct i965_post_processing_context *pp_context)
1124 {
1125     struct intel_batchbuffer *batch = pp_context->batch;
1126
1127     BEGIN_BATCH(batch, 1);
1128     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
1129     ADVANCE_BATCH(batch);
1130 }
1131
1132 static void
1133 gen8_pp_state_base_address(VADriverContextP ctx,
1134                            struct i965_post_processing_context *pp_context)
1135 {
1136     struct intel_batchbuffer *batch = pp_context->batch;
1137
1138     BEGIN_BATCH(batch, 16);
1139     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (16 - 2));
1140         /* DW1 Generate state address */
1141     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1142         OUT_BATCH(batch, 0);
1143         OUT_BATCH(batch, 0);
1144         /* DW4. Surface state address */
1145     OUT_RELOC(batch, pp_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1146         OUT_BATCH(batch, 0);
1147         /* DW6. Dynamic state address */
1148     OUT_RELOC(batch, pp_context->dynamic_state.bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
1149                 0, 0 | BASE_ADDRESS_MODIFY);
1150         OUT_BATCH(batch, 0);
1151
1152         /* DW8. Indirect object address */
1153     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1154         OUT_BATCH(batch, 0);
1155
1156         /* DW10. Instruction base address */
1157     OUT_RELOC(batch, pp_context->instruction_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1158         OUT_BATCH(batch, 0);
1159
1160     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1161     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1162     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1163     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1164     ADVANCE_BATCH(batch);
1165 }
1166
1167 static void
1168 gen8_pp_vfe_state(VADriverContextP ctx,
1169                   struct i965_post_processing_context *pp_context)
1170 {
1171     struct intel_batchbuffer *batch = pp_context->batch;
1172
1173     BEGIN_BATCH(batch, 9);
1174     OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (9 - 2));
1175     OUT_BATCH(batch, 0);
1176     OUT_BATCH(batch, 0);
1177     OUT_BATCH(batch,
1178               (pp_context->vfe_gpu_state.max_num_threads - 1) << 16 |
1179               pp_context->vfe_gpu_state.num_urb_entries << 8);
1180     OUT_BATCH(batch, 0);
1181     OUT_BATCH(batch,
1182               (pp_context->vfe_gpu_state.urb_entry_size) << 16 |
1183                 /* URB Entry Allocation Size, in 256 bits unit */
1184               (pp_context->vfe_gpu_state.curbe_allocation_size));
1185                 /* CURBE Allocation Size, in 256 bits unit */
1186     OUT_BATCH(batch, 0);
1187     OUT_BATCH(batch, 0);
1188     OUT_BATCH(batch, 0);
1189     ADVANCE_BATCH(batch);
1190 }
1191
1192 static void
1193 gen8_interface_descriptor_load(VADriverContextP ctx,
1194                                struct i965_post_processing_context *pp_context)
1195 {
1196     struct intel_batchbuffer *batch = pp_context->batch;
1197
1198     BEGIN_BATCH(batch, 6);
1199
1200     OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
1201     OUT_BATCH(batch, 0);
1202
1203     OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
1204     OUT_BATCH(batch, 0);
1205     OUT_BATCH(batch,
1206               pp_context->idrt.num_interface_descriptors * sizeof(struct gen8_interface_descriptor_data));
1207     OUT_BATCH(batch, pp_context->idrt_offset);
1208     ADVANCE_BATCH(batch);
1209 }
1210
1211 static void
1212 gen8_pp_curbe_load(VADriverContextP ctx,
1213                    struct i965_post_processing_context *pp_context)
1214 {
1215     struct intel_batchbuffer *batch = pp_context->batch;
1216     struct i965_driver_data *i965 = i965_driver_data(ctx);
1217     int param_size = 64;
1218
1219     param_size = sizeof(struct gen7_pp_static_parameter);
1220
1221     BEGIN_BATCH(batch, 4);
1222     OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
1223     OUT_BATCH(batch, 0);
1224     OUT_BATCH(batch,
1225               param_size);
1226     OUT_BATCH(batch, pp_context->curbe_offset);
1227     ADVANCE_BATCH(batch);
1228 }
1229
1230 static void
1231 gen8_pp_object_walker(VADriverContextP ctx,
1232                       struct i965_post_processing_context *pp_context)
1233 {
1234     struct i965_driver_data *i965 = i965_driver_data(ctx);
1235     struct intel_batchbuffer *batch = pp_context->batch;
1236     int x, x_steps, y, y_steps;
1237     int param_size, command_length_in_dws, extra_cmd_in_dws;
1238     dri_bo *command_buffer;
1239     unsigned int *command_ptr;
1240
1241     param_size = sizeof(struct gen7_pp_inline_parameter);
1242
1243     x_steps = pp_context->pp_x_steps(pp_context->private_context);
1244     y_steps = pp_context->pp_y_steps(pp_context->private_context);
1245     command_length_in_dws = 6 + (param_size >> 2);
1246     extra_cmd_in_dws = 2;
1247     command_buffer = dri_bo_alloc(i965->intel.bufmgr,
1248                                   "command objects buffer",
1249                                   (command_length_in_dws + extra_cmd_in_dws) * 4 * x_steps * y_steps + 64,
1250                                   4096);
1251
1252     dri_bo_map(command_buffer, 1);
1253     command_ptr = command_buffer->virtual;
1254
1255     for (y = 0; y < y_steps; y++) {
1256         for (x = 0; x < x_steps; x++) {
1257             if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
1258
1259                 *command_ptr++ = (CMD_MEDIA_OBJECT | (command_length_in_dws - 2));
1260                 *command_ptr++ = 0;
1261                 *command_ptr++ = 0;
1262                 *command_ptr++ = 0;
1263                 *command_ptr++ = 0;
1264                 *command_ptr++ = 0;
1265                 memcpy(command_ptr, pp_context->pp_inline_parameter, param_size);
1266                 command_ptr += (param_size >> 2);
1267
1268                 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1269                 *command_ptr++ = 0;
1270             }
1271         }
1272     }
1273
1274     if ((command_length_in_dws + extra_cmd_in_dws) * x_steps * y_steps % 2 == 0)
1275         *command_ptr++ = 0;
1276
1277     *command_ptr++ = MI_BATCH_BUFFER_END;
1278     *command_ptr++ = 0;
1279
1280     dri_bo_unmap(command_buffer);
1281
1282     BEGIN_BATCH(batch, 3);
1283     OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
1284     OUT_RELOC(batch, command_buffer,
1285               I915_GEM_DOMAIN_COMMAND, 0, 0);
1286     OUT_BATCH(batch, 0);
1287     ADVANCE_BATCH(batch);
1288
1289     dri_bo_unreference(command_buffer);
1290
1291     /* Have to execute the batch buffer here becuase MI_BATCH_BUFFER_END
1292      * will cause control to pass back to ring buffer
1293      */
1294     intel_batchbuffer_end_atomic(batch);
1295     intel_batchbuffer_flush(batch);
1296     intel_batchbuffer_start_atomic(batch, 0x1000);
1297 }
1298
1299 static void
1300 gen8_pp_pipeline_setup(VADriverContextP ctx,
1301                        struct i965_post_processing_context *pp_context)
1302 {
1303     struct intel_batchbuffer *batch = pp_context->batch;
1304
1305     intel_batchbuffer_start_atomic(batch, 0x1000);
1306     intel_batchbuffer_emit_mi_flush(batch);
1307     gen6_pp_pipeline_select(ctx, pp_context);
1308     gen8_pp_state_base_address(ctx, pp_context);
1309     gen8_pp_vfe_state(ctx, pp_context);
1310     gen8_pp_curbe_load(ctx, pp_context);
1311     gen8_interface_descriptor_load(ctx, pp_context);
1312     gen8_pp_vfe_state(ctx, pp_context);
1313     gen8_pp_object_walker(ctx, pp_context);
1314     intel_batchbuffer_end_atomic(batch);
1315 }
1316
1317 static VAStatus
1318 gen8_post_processing(
1319     VADriverContextP   ctx,
1320     struct i965_post_processing_context *pp_context,
1321     const struct i965_surface *src_surface,
1322     const VARectangle *src_rect,
1323     struct i965_surface *dst_surface,
1324     const VARectangle *dst_rect,
1325     int                pp_index,
1326     void * filter_param
1327 )
1328 {
1329     VAStatus va_status;
1330
1331     va_status = gen8_pp_initialize(ctx, pp_context,
1332                                    src_surface,
1333                                    src_rect,
1334                                    dst_surface,
1335                                    dst_rect,
1336                                    pp_index,
1337                                    filter_param);
1338
1339     if (va_status == VA_STATUS_SUCCESS) {
1340         gen8_pp_states_setup(ctx, pp_context);
1341         gen8_pp_pipeline_setup(ctx, pp_context);
1342     }
1343
1344     return va_status;
1345 }
1346
1347 static void
1348 gen8_post_processing_context_finalize(struct i965_post_processing_context *pp_context)
1349 {
1350     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1351     pp_context->surface_state_binding_table.bo = NULL;
1352
1353     dri_bo_unreference(pp_context->pp_dndi_context.stmm_bo);
1354     pp_context->pp_dndi_context.stmm_bo = NULL;
1355
1356     dri_bo_unreference(pp_context->pp_dn_context.stmm_bo);
1357     pp_context->pp_dn_context.stmm_bo = NULL;
1358
1359     if (pp_context->instruction_state.bo) {
1360         dri_bo_unreference(pp_context->instruction_state.bo);
1361         pp_context->instruction_state.bo = NULL;
1362     }
1363
1364     if (pp_context->indirect_state.bo) {
1365         dri_bo_unreference(pp_context->indirect_state.bo);
1366         pp_context->indirect_state.bo = NULL;
1367     }
1368
1369     if (pp_context->dynamic_state.bo) {
1370         dri_bo_unreference(pp_context->dynamic_state.bo);
1371         pp_context->dynamic_state.bo = NULL;
1372     }
1373
1374     free(pp_context->pp_static_parameter);
1375     free(pp_context->pp_inline_parameter);
1376     pp_context->pp_static_parameter = NULL;
1377     pp_context->pp_inline_parameter = NULL;
1378 }
1379
1380 #define VPP_CURBE_ALLOCATION_SIZE       32
1381
1382 void
1383 gen8_post_processing_context_init(VADriverContextP ctx,
1384                                   void *data,
1385                                   struct intel_batchbuffer *batch)
1386 {
1387     struct i965_driver_data *i965 = i965_driver_data(ctx);
1388     int i, kernel_size;
1389     unsigned int kernel_offset, end_offset;
1390     unsigned char *kernel_ptr;
1391     struct pp_module *pp_module;
1392     struct i965_post_processing_context *pp_context = data;
1393
1394     {
1395         pp_context->vfe_gpu_state.max_num_threads = 60;
1396         pp_context->vfe_gpu_state.num_urb_entries = 59;
1397         pp_context->vfe_gpu_state.gpgpu_mode = 0;
1398         pp_context->vfe_gpu_state.urb_entry_size = 16 - 1;
1399         pp_context->vfe_gpu_state.curbe_allocation_size = VPP_CURBE_ALLOCATION_SIZE;
1400     }
1401
1402     pp_context->intel_post_processing = gen8_post_processing;
1403     pp_context->finalize = gen8_post_processing_context_finalize;
1404
1405     assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen8));
1406
1407     memcpy(pp_context->pp_modules, pp_modules_gen8, sizeof(pp_context->pp_modules));
1408
1409     kernel_size = 4096 ;
1410
1411     for (i = 0; i < NUM_PP_MODULES; i++) {
1412         pp_module = &pp_context->pp_modules[i];
1413
1414         if (pp_module->kernel.bin && pp_module->kernel.size) {
1415             kernel_size += pp_module->kernel.size;
1416         }
1417     }
1418
1419     pp_context->instruction_state.bo = dri_bo_alloc(i965->intel.bufmgr,
1420                                   "kernel shader",
1421                                   kernel_size,
1422                                   0x1000);
1423     if (pp_context->instruction_state.bo == NULL) {
1424         WARN_ONCE("failure to allocate the buffer space for kernel shader in VPP\n");
1425         return;
1426     }
1427
1428     assert(pp_context->instruction_state.bo);
1429
1430
1431     pp_context->instruction_state.bo_size = kernel_size;
1432     pp_context->instruction_state.end_offset = 0;
1433     end_offset = 0;
1434
1435     dri_bo_map(pp_context->instruction_state.bo, 1);
1436     kernel_ptr = (unsigned char *)(pp_context->instruction_state.bo->virtual);
1437
1438     for (i = 0; i < NUM_PP_MODULES; i++) {
1439         pp_module = &pp_context->pp_modules[i];
1440
1441         kernel_offset = ALIGN(end_offset, 64);
1442         pp_module->kernel.kernel_offset = kernel_offset;
1443
1444         if (pp_module->kernel.bin && pp_module->kernel.size) {
1445
1446             memcpy(kernel_ptr + kernel_offset, pp_module->kernel.bin, pp_module->kernel.size);
1447             end_offset = kernel_offset + pp_module->kernel.size;
1448         }
1449     }
1450
1451     pp_context->instruction_state.end_offset = ALIGN(end_offset, 64);
1452
1453     dri_bo_unmap(pp_context->instruction_state.bo);
1454
1455     /* static & inline parameters */
1456     pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
1457     pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
1458
1459     pp_context->pp_dndi_context.current_out_surface = VA_INVALID_SURFACE;
1460     pp_context->pp_dndi_context.current_out_obj_surface = NULL;
1461     pp_context->pp_dndi_context.frame_order = -1;
1462     pp_context->batch = batch;
1463
1464     pp_context->idrt_size = 5 * sizeof(struct gen8_interface_descriptor_data);
1465     pp_context->curbe_size = 256;
1466 }