posst_processing_context_init()/finalize() callback functions for each platform
[platform/upstream/libva-intel-driver.git] / src / gen8_post_processing.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sub license, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19  * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Xiang Haihao <haihao.xiang@intel.com>
26  *    Zhao Yakui <yakui.zhao@intel.com>
27  *
28  */
29
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <assert.h>
34
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "i965_drv_video.h"
40 #include "i965_post_processing.h"
41 #include "i965_render.h"
42 #include "intel_media.h"
43
44 #define HAS_PP(ctx) (IS_IRONLAKE((ctx)->intel.device_info) ||     \
45                      IS_GEN6((ctx)->intel.device_info) ||         \
46                      IS_GEN7((ctx)->intel.device_info) ||         \
47                      IS_GEN8((ctx)->intel.device_info))
48
49
50 #define SURFACE_STATE_PADDED_SIZE               SURFACE_STATE_PADDED_SIZE_GEN8
51
52 #define SURFACE_STATE_OFFSET(index)             (SURFACE_STATE_PADDED_SIZE * index)
53 #define BINDING_TABLE_OFFSET                    SURFACE_STATE_OFFSET(MAX_PP_SURFACES)
54
55 #define GPU_ASM_BLOCK_WIDTH         16
56 #define GPU_ASM_BLOCK_HEIGHT        8
57 #define GPU_ASM_X_OFFSET_ALIGNMENT  4
58
59 #define VA_STATUS_SUCCESS_1                     0xFFFFFFFE
60
61 static VAStatus pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
62                                    const struct i965_surface *src_surface,
63                                    const VARectangle *src_rect,
64                                    struct i965_surface *dst_surface,
65                                    const VARectangle *dst_rect,
66                                    void *filter_param);
67
68 static VAStatus gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
69                                            const struct i965_surface *src_surface,
70                                            const VARectangle *src_rect,
71                                            struct i965_surface *dst_surface,
72                                            const VARectangle *dst_rect,
73                                            void *filter_param);
74
75 /* TODO: Modify the shader and then compile it again.
76  * Currently it is derived from Haswell*/
77 static const uint32_t pp_null_gen8[][4] = {
78 };
79
80 static const uint32_t pp_nv12_load_save_nv12_gen8[][4] = {
81 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
82 };
83
84 static const uint32_t pp_nv12_load_save_pl3_gen8[][4] = {
85 #include "shaders/post_processing/gen8/pl2_to_pl3.g8b"
86 };
87
88 static const uint32_t pp_pl3_load_save_nv12_gen8[][4] = {
89 #include "shaders/post_processing/gen8/pl3_to_pl2.g8b"
90 };
91
92 static const uint32_t pp_pl3_load_save_pl3_gen8[][4] = {
93 #include "shaders/post_processing/gen8/pl3_to_pl3.g8b"
94 };
95
96 static const uint32_t pp_nv12_scaling_gen8[][4] = {
97 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
98 };
99
100 static const uint32_t pp_nv12_avs_gen8[][4] = {
101 #include "shaders/post_processing/gen8/pl2_to_pl2.g8b"
102 };
103
104 static const uint32_t pp_nv12_dndi_gen8[][4] = {
105 // #include "shaders/post_processing/gen7/dndi.g75b"
106 };
107
108 static const uint32_t pp_nv12_dn_gen8[][4] = {
109 // #include "shaders/post_processing/gen7/nv12_dn_nv12.g75b"
110 };
111 static const uint32_t pp_nv12_load_save_pa_gen8[][4] = {
112 #include "shaders/post_processing/gen8/pl2_to_pa.g8b"
113 };
114 static const uint32_t pp_pl3_load_save_pa_gen8[][4] = {
115 #include "shaders/post_processing/gen8/pl3_to_pa.g8b"
116 };
117 static const uint32_t pp_pa_load_save_nv12_gen8[][4] = {
118 #include "shaders/post_processing/gen8/pa_to_pl2.g8b"
119 };
120 static const uint32_t pp_pa_load_save_pl3_gen8[][4] = {
121 #include "shaders/post_processing/gen8/pa_to_pl3.g8b"
122 };
123 static const uint32_t pp_pa_load_save_pa_gen8[][4] = {
124 #include "shaders/post_processing/gen8/pa_to_pa.g8b"
125 };
126 static const uint32_t pp_rgbx_load_save_nv12_gen8[][4] = {
127 #include "shaders/post_processing/gen8/rgbx_to_nv12.g8b"
128 };
129 static const uint32_t pp_nv12_load_save_rgbx_gen8[][4] = {
130 #include "shaders/post_processing/gen8/pl2_to_rgbx.g8b"
131 };
132
133 static struct pp_module pp_modules_gen8[] = {
134     {
135         {
136             "NULL module (for testing)",
137             PP_NULL,
138             pp_null_gen8,
139             sizeof(pp_null_gen8),
140             NULL,
141         },
142
143         pp_null_initialize,
144     },
145
146     {
147         {
148             "NV12_NV12",
149             PP_NV12_LOAD_SAVE_N12,
150             pp_nv12_load_save_nv12_gen8,
151             sizeof(pp_nv12_load_save_nv12_gen8),
152             NULL,
153         },
154
155         gen8_pp_plx_avs_initialize,
156     },
157
158     {
159         {
160             "NV12_PL3",
161             PP_NV12_LOAD_SAVE_PL3,
162             pp_nv12_load_save_pl3_gen8,
163             sizeof(pp_nv12_load_save_pl3_gen8),
164             NULL,
165         },
166         gen8_pp_plx_avs_initialize,
167     },
168
169     {
170         {
171             "PL3_NV12",
172             PP_PL3_LOAD_SAVE_N12,
173             pp_pl3_load_save_nv12_gen8,
174             sizeof(pp_pl3_load_save_nv12_gen8),
175             NULL,
176         },
177
178         gen8_pp_plx_avs_initialize,
179     },
180
181     {
182         {
183             "PL3_PL3",
184             PP_PL3_LOAD_SAVE_N12,
185             pp_pl3_load_save_pl3_gen8,
186             sizeof(pp_pl3_load_save_pl3_gen8),
187             NULL,
188         },
189
190         gen8_pp_plx_avs_initialize,
191     },
192
193     {
194         {
195             "NV12 Scaling module",
196             PP_NV12_SCALING,
197             pp_nv12_scaling_gen8,
198             sizeof(pp_nv12_scaling_gen8),
199             NULL,
200         },
201
202         gen8_pp_plx_avs_initialize,
203     },
204
205     {
206         {
207             "NV12 AVS module",
208             PP_NV12_AVS,
209             pp_nv12_avs_gen8,
210             sizeof(pp_nv12_avs_gen8),
211             NULL,
212         },
213
214         gen8_pp_plx_avs_initialize,
215     },
216
217     {
218         {
219             "NV12 DNDI module",
220             PP_NV12_DNDI,
221             pp_nv12_dndi_gen8,
222             sizeof(pp_nv12_dndi_gen8),
223             NULL,
224         },
225
226         pp_null_initialize,
227     },
228
229     {
230         {
231             "NV12 DN module",
232             PP_NV12_DN,
233             pp_nv12_dn_gen8,
234             sizeof(pp_nv12_dn_gen8),
235             NULL,
236         },
237
238         pp_null_initialize,
239     },
240     {
241         {
242             "NV12_PA module",
243             PP_NV12_LOAD_SAVE_PA,
244             pp_nv12_load_save_pa_gen8,
245             sizeof(pp_nv12_load_save_pa_gen8),
246             NULL,
247         },
248
249         gen8_pp_plx_avs_initialize,
250     },
251
252     {
253         {
254             "PL3_PA module",
255             PP_PL3_LOAD_SAVE_PA,
256             pp_pl3_load_save_pa_gen8,
257             sizeof(pp_pl3_load_save_pa_gen8),
258             NULL,
259         },
260
261         gen8_pp_plx_avs_initialize,
262     },
263
264     {
265         {
266             "PA_NV12 module",
267             PP_PA_LOAD_SAVE_NV12,
268             pp_pa_load_save_nv12_gen8,
269             sizeof(pp_pa_load_save_nv12_gen8),
270             NULL,
271         },
272
273         gen8_pp_plx_avs_initialize,
274     },
275
276     {
277         {
278             "PA_PL3 module",
279             PP_PA_LOAD_SAVE_PL3,
280             pp_pa_load_save_pl3_gen8,
281             sizeof(pp_pa_load_save_pl3_gen8),
282             NULL,
283         },
284
285         gen8_pp_plx_avs_initialize,
286     },
287
288     {
289         {
290             "PA_PA module",
291             PP_PA_LOAD_SAVE_PA,
292             pp_pa_load_save_pa_gen8,
293             sizeof(pp_pa_load_save_pa_gen8),
294             NULL,
295         },
296
297         gen8_pp_plx_avs_initialize,
298     },
299
300     {
301         {
302             "RGBX_NV12 module",
303             PP_RGBX_LOAD_SAVE_NV12,
304             pp_rgbx_load_save_nv12_gen8,
305             sizeof(pp_rgbx_load_save_nv12_gen8),
306             NULL,
307         },
308
309         gen8_pp_plx_avs_initialize,
310     },
311
312     {
313         {
314             "NV12_RGBX module",
315             PP_NV12_LOAD_SAVE_RGBX,
316             pp_nv12_load_save_rgbx_gen8,
317             sizeof(pp_nv12_load_save_rgbx_gen8),
318             NULL,
319         },
320
321         gen8_pp_plx_avs_initialize,
322     },
323 };
324
325 static int
326 pp_get_surface_fourcc(VADriverContextP ctx, const struct i965_surface *surface)
327 {
328     int fourcc;
329
330     if (surface->type == I965_SURFACE_TYPE_IMAGE) {
331         struct object_image *obj_image = (struct object_image *)surface->base;
332         fourcc = obj_image->image.format.fourcc;
333     } else {
334         struct object_surface *obj_surface = (struct object_surface *)surface->base;
335         fourcc = obj_surface->fourcc;
336     }
337
338     return fourcc;
339 }
340
341 static void
342 gen8_pp_set_surface_tiling(struct gen8_surface_state *ss, unsigned int tiling)
343 {
344     switch (tiling) {
345     case I915_TILING_NONE:
346         ss->ss0.tiled_surface = 0;
347         ss->ss0.tile_walk = 0;
348         break;
349     case I915_TILING_X:
350         ss->ss0.tiled_surface = 1;
351         ss->ss0.tile_walk = I965_TILEWALK_XMAJOR;
352         break;
353     case I915_TILING_Y:
354         ss->ss0.tiled_surface = 1;
355         ss->ss0.tile_walk = I965_TILEWALK_YMAJOR;
356         break;
357     }
358 }
359
360 static void
361 gen8_pp_set_surface2_tiling(struct gen8_surface_state2 *ss, unsigned int tiling)
362 {
363     switch (tiling) {
364     case I915_TILING_NONE:
365         ss->ss2.tiled_surface = 0;
366         ss->ss2.tile_walk = 0;
367         break;
368     case I915_TILING_X:
369         ss->ss2.tiled_surface = 1;
370         ss->ss2.tile_walk = I965_TILEWALK_XMAJOR;
371         break;
372     case I915_TILING_Y:
373         ss->ss2.tiled_surface = 1;
374         ss->ss2.tile_walk = I965_TILEWALK_YMAJOR;
375         break;
376     }
377 }
378
379
380 static void
381 gen8_pp_set_surface_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
382                           dri_bo *surf_bo, unsigned long surf_bo_offset,
383                           int width, int height, int pitch, int format,
384                           int index, int is_target)
385 {
386     struct gen8_surface_state *ss;
387     dri_bo *ss_bo;
388     unsigned int tiling;
389     unsigned int swizzle;
390
391     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
392     ss_bo = pp_context->surface_state_binding_table.bo;
393     assert(ss_bo);
394
395     dri_bo_map(ss_bo, True);
396     assert(ss_bo->virtual);
397     ss = (struct gen8_surface_state *)((char *)ss_bo->virtual + SURFACE_STATE_OFFSET(index));
398     memset(ss, 0, sizeof(*ss));
399     ss->ss0.surface_type = I965_SURFACE_2D;
400     ss->ss0.surface_format = format;
401     ss->ss8.base_addr = surf_bo->offset + surf_bo_offset;
402     ss->ss2.width = width - 1;
403     ss->ss2.height = height - 1;
404     ss->ss3.pitch = pitch - 1;
405
406     /* Always set 1(align 4 mode) per B-spec */
407     ss->ss0.vertical_alignment = 1;
408     ss->ss0.horizontal_alignment = 1;
409
410     gen8_pp_set_surface_tiling(ss, tiling);
411     gen8_render_set_surface_scs(ss);
412     dri_bo_emit_reloc(ss_bo,
413                       I915_GEM_DOMAIN_RENDER, is_target ? I915_GEM_DOMAIN_RENDER : 0,
414                       surf_bo_offset,
415                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state, ss8),
416                       surf_bo);
417     ((unsigned int *)((char *)ss_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
418     dri_bo_unmap(ss_bo);
419 }
420
421
422 static void
423 gen8_pp_set_surface2_state(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
424                            dri_bo *surf_bo, unsigned long surf_bo_offset,
425                            int width, int height, int wpitch,
426                            int xoffset, int yoffset,
427                            int format, int interleave_chroma,
428                            int index)
429 {
430     struct gen8_surface_state2 *ss2;
431     dri_bo *ss2_bo;
432     unsigned int tiling;
433     unsigned int swizzle;
434
435     dri_bo_get_tiling(surf_bo, &tiling, &swizzle);
436     ss2_bo = pp_context->surface_state_binding_table.bo;
437     assert(ss2_bo);
438
439     dri_bo_map(ss2_bo, True);
440     assert(ss2_bo->virtual);
441     ss2 = (struct gen8_surface_state2 *)((char *)ss2_bo->virtual + SURFACE_STATE_OFFSET(index));
442     memset(ss2, 0, sizeof(*ss2));
443     ss2->ss6.base_addr = surf_bo->offset + surf_bo_offset;
444     ss2->ss1.cbcr_pixel_offset_v_direction = 0;
445     ss2->ss1.width = width - 1;
446     ss2->ss1.height = height - 1;
447     ss2->ss2.pitch = wpitch - 1;
448     ss2->ss2.interleave_chroma = interleave_chroma;
449     ss2->ss2.surface_format = format;
450     ss2->ss3.x_offset_for_cb = xoffset;
451     ss2->ss3.y_offset_for_cb = yoffset;
452     gen8_pp_set_surface2_tiling(ss2, tiling);
453     dri_bo_emit_reloc(ss2_bo,
454                       I915_GEM_DOMAIN_RENDER, 0,
455                       surf_bo_offset,
456                       SURFACE_STATE_OFFSET(index) + offsetof(struct gen8_surface_state2, ss6),
457                       surf_bo);
458     ((unsigned int *)((char *)ss2_bo->virtual + BINDING_TABLE_OFFSET))[index] = SURFACE_STATE_OFFSET(index);
459     dri_bo_unmap(ss2_bo);
460 }
461
462 static void
463 gen8_pp_set_media_rw_message_surface(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
464                                      const struct i965_surface *surface,
465                                      int base_index, int is_target,
466                                      int *width, int *height, int *pitch, int *offset)
467 {
468     struct object_surface *obj_surface;
469     struct object_image *obj_image;
470     dri_bo *bo;
471     int fourcc = pp_get_surface_fourcc(ctx, surface);
472     const int U = (fourcc == VA_FOURCC_YV12 ||
473                    fourcc == VA_FOURCC_YV16 ||
474                    fourcc == VA_FOURCC_IMC1) ? 2 : 1;
475     const int V = (fourcc == VA_FOURCC_YV12 ||
476                    fourcc == VA_FOURCC_YV16 ||
477                    fourcc == VA_FOURCC_IMC1) ? 1 : 2;
478     int interleaved_uv = fourcc == VA_FOURCC_NV12;
479     int packed_yuv = (fourcc == VA_FOURCC_YUY2 || fourcc == VA_FOURCC_UYVY);
480     int rgbx_format = (fourcc == VA_FOURCC_RGBA ||
481                               fourcc == VA_FOURCC_RGBX ||
482                               fourcc == VA_FOURCC_BGRA ||
483                               fourcc == VA_FOURCC_BGRX);
484
485     if (surface->type == I965_SURFACE_TYPE_SURFACE) {
486         obj_surface = (struct object_surface *)surface->base;
487         bo = obj_surface->bo;
488         width[0] = obj_surface->orig_width;
489         height[0] = obj_surface->orig_height;
490         pitch[0] = obj_surface->width;
491         offset[0] = 0;
492
493         if (packed_yuv) {
494             if (is_target)
495                 width[0] = obj_surface->orig_width * 2; /* surface format is R8, so double the width */
496             else
497                 width[0] = obj_surface->orig_width;     /* surface foramt is YCBCR, width is specified in units of pixels */
498
499         } else if (rgbx_format) {
500             if (is_target)
501                 width[0] = obj_surface->orig_width * 4; /* surface format is R8, so quad the width */
502         }
503
504         width[1] = obj_surface->cb_cr_width;
505         height[1] = obj_surface->cb_cr_height;
506         pitch[1] = obj_surface->cb_cr_pitch;
507         offset[1] = obj_surface->y_cb_offset * obj_surface->width;
508
509         width[2] = obj_surface->cb_cr_width;
510         height[2] = obj_surface->cb_cr_height;
511         pitch[2] = obj_surface->cb_cr_pitch;
512         offset[2] = obj_surface->y_cr_offset * obj_surface->width;
513     } else {
514         obj_image = (struct object_image *)surface->base;
515         bo = obj_image->bo;
516         width[0] = obj_image->image.width;
517         height[0] = obj_image->image.height;
518         pitch[0] = obj_image->image.pitches[0];
519         offset[0] = obj_image->image.offsets[0];
520
521         if (rgbx_format) {
522             if (is_target)
523                 width[0] = obj_image->image.width * 4; /* surface format is R8, so quad the width */
524         } else if (packed_yuv) {
525             if (is_target)
526                 width[0] = obj_image->image.width * 2;  /* surface format is R8, so double the width */
527             else
528                 width[0] = obj_image->image.width;      /* surface foramt is YCBCR, width is specified in units of pixels */
529         } else if (interleaved_uv) {
530             width[1] = obj_image->image.width / 2;
531             height[1] = obj_image->image.height / 2;
532             pitch[1] = obj_image->image.pitches[1];
533             offset[1] = obj_image->image.offsets[1];
534         } else {
535             width[1] = obj_image->image.width / 2;
536             height[1] = obj_image->image.height / 2;
537             pitch[1] = obj_image->image.pitches[U];
538             offset[1] = obj_image->image.offsets[U];
539             width[2] = obj_image->image.width / 2;
540             height[2] = obj_image->image.height / 2;
541             pitch[2] = obj_image->image.pitches[V];
542             offset[2] = obj_image->image.offsets[V];
543             if (fourcc == VA_FOURCC_YV16 || fourcc == VA_FOURCC_422H) {
544                 width[1] = obj_image->image.width / 2;
545                 height[1] = obj_image->image.height;
546                 width[2] = obj_image->image.width / 2;
547                 height[2] = obj_image->image.height;
548             }
549         }
550     }
551
552     if (is_target) {
553         gen8_pp_set_surface_state(ctx, pp_context,
554                                   bo, 0,
555                                   width[0] / 4, height[0], pitch[0],
556                                   I965_SURFACEFORMAT_R8_UINT,
557                                   base_index, 1);
558         if (rgbx_format) {
559                 struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
560                 /* the format is MSB: X-B-G-R */
561                 pp_static_parameter->grf2.save_avs_rgb_swap = 0;
562                 if ((fourcc == VA_FOURCC_BGRA) ||
563                         (fourcc == VA_FOURCC_BGRX)) {
564                         /* It is stored as MSB: X-R-G-B */
565                         pp_static_parameter->grf2.save_avs_rgb_swap = 1;
566                 }
567         }
568         if (!packed_yuv && !rgbx_format) {
569             if (interleaved_uv) {
570                 gen8_pp_set_surface_state(ctx, pp_context,
571                                           bo, offset[1],
572                                           width[1] / 2, height[1], pitch[1],
573                                           I965_SURFACEFORMAT_R8G8_SINT,
574                                           base_index + 1, 1);
575             } else {
576                 gen8_pp_set_surface_state(ctx, pp_context,
577                                           bo, offset[1],
578                                           width[1] / 4, height[1], pitch[1],
579                                           I965_SURFACEFORMAT_R8_SINT,
580                                           base_index + 1, 1);
581                 gen8_pp_set_surface_state(ctx, pp_context,
582                                           bo, offset[2],
583                                           width[2] / 4, height[2], pitch[2],
584                                           I965_SURFACEFORMAT_R8_SINT,
585                                           base_index + 2, 1);
586             }
587         }
588     } else {
589         int format0 = SURFACE_FORMAT_Y8_UNORM;
590
591         switch (fourcc) {
592         case VA_FOURCC_YUY2:
593             format0 = SURFACE_FORMAT_YCRCB_NORMAL;
594             break;
595
596         case VA_FOURCC_UYVY:
597             format0 = SURFACE_FORMAT_YCRCB_SWAPY;
598             break;
599
600         default:
601             break;
602         }
603         if (rgbx_format) {
604             struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
605             /* Only R8G8B8A8_UNORM is supported for BGRX or RGBX */
606             format0 = SURFACE_FORMAT_R8G8B8A8_UNORM;
607             pp_static_parameter->grf2.src_avs_rgb_swap = 0;
608             if ((fourcc == VA_FOURCC_BGRA) ||
609                 (fourcc == VA_FOURCC_BGRX)) {
610                 pp_static_parameter->grf2.src_avs_rgb_swap = 1;
611             }
612         }
613         gen8_pp_set_surface2_state(ctx, pp_context,
614                                    bo, offset[0],
615                                    width[0], height[0], pitch[0],
616                                    0, 0,
617                                    format0, 0,
618                                    base_index);
619
620         if (!packed_yuv && !rgbx_format) {
621             if (interleaved_uv) {
622                 gen8_pp_set_surface2_state(ctx, pp_context,
623                                            bo, offset[1],
624                                            width[1], height[1], pitch[1],
625                                            0, 0,
626                                            SURFACE_FORMAT_R8B8_UNORM, 0,
627                                            base_index + 1);
628             } else {
629                 gen8_pp_set_surface2_state(ctx, pp_context,
630                                            bo, offset[1],
631                                            width[1], height[1], pitch[1],
632                                            0, 0,
633                                            SURFACE_FORMAT_R8_UNORM, 0,
634                                            base_index + 1);
635                 gen8_pp_set_surface2_state(ctx, pp_context,
636                                            bo, offset[2],
637                                            width[2], height[2], pitch[2],
638                                            0, 0,
639                                            SURFACE_FORMAT_R8_UNORM, 0,
640                                            base_index + 2);
641             }
642         }
643     }
644 }
645
646 static int
647 pp_null_x_steps(void *private_context)
648 {
649     return 1;
650 }
651
652 static int
653 pp_null_y_steps(void *private_context)
654 {
655     return 1;
656 }
657
658 static int
659 pp_null_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
660 {
661     return 0;
662 }
663
664 static VAStatus
665 pp_null_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
666                    const struct i965_surface *src_surface,
667                    const VARectangle *src_rect,
668                    struct i965_surface *dst_surface,
669                    const VARectangle *dst_rect,
670                    void *filter_param)
671 {
672     /* private function & data */
673     pp_context->pp_x_steps = pp_null_x_steps;
674     pp_context->pp_y_steps = pp_null_y_steps;
675     pp_context->private_context = NULL;
676     pp_context->pp_set_block_parameter = pp_null_set_block_parameter;
677
678     dst_surface->flags = src_surface->flags;
679
680     return VA_STATUS_SUCCESS;
681 }
682
683 static void calculate_boundary_block_mask(struct i965_post_processing_context *pp_context, const VARectangle *dst_rect)
684 {
685     int i, dst_width_adjust;
686     /* x offset of dest surface must be dword aligned.
687      * so we have to extend dst surface on left edge, and mask out pixels not interested
688      */
689     if (dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT) {
690         pp_context->block_horizontal_mask_left = 0;
691         for (i=dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT; i<GPU_ASM_BLOCK_WIDTH; i++)
692         {
693             pp_context->block_horizontal_mask_left |= 1<<i;
694         }
695     }
696     else {
697         pp_context->block_horizontal_mask_left = 0xffff;
698     }
699
700     dst_width_adjust = dst_rect->width + dst_rect->x%GPU_ASM_X_OFFSET_ALIGNMENT;
701     if (dst_width_adjust%GPU_ASM_BLOCK_WIDTH){
702         pp_context->block_horizontal_mask_right = (1 << (dst_width_adjust%GPU_ASM_BLOCK_WIDTH)) - 1;
703     }
704     else {
705         pp_context->block_horizontal_mask_right = 0xffff;
706     }
707
708     if (dst_rect->height%GPU_ASM_BLOCK_HEIGHT){
709         pp_context->block_vertical_mask_bottom = (1 << (dst_rect->height%GPU_ASM_BLOCK_HEIGHT)) - 1;
710     }
711     else {
712         pp_context->block_vertical_mask_bottom = 0xff;
713     }
714
715 }
716
717 static int
718 gen7_pp_avs_x_steps(void *private_context)
719 {
720     struct pp_avs_context *pp_avs_context = private_context;
721
722     return pp_avs_context->dest_w / 16;
723 }
724
725 static int
726 gen7_pp_avs_y_steps(void *private_context)
727 {
728     struct pp_avs_context *pp_avs_context = private_context;
729
730     return pp_avs_context->dest_h / 16;
731 }
732
733 static int
734 gen7_pp_avs_set_block_parameter(struct i965_post_processing_context *pp_context, int x, int y)
735 {
736     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)pp_context->private_context;
737     struct gen7_pp_inline_parameter *pp_inline_parameter = pp_context->pp_inline_parameter;
738
739     pp_inline_parameter->grf7.destination_block_horizontal_origin = x * 16 + pp_avs_context->dest_x;
740     pp_inline_parameter->grf7.destination_block_vertical_origin = y * 16 + pp_avs_context->dest_y;
741     pp_inline_parameter->grf7.constant_0 = 0xffffffff;
742     pp_inline_parameter->grf7.sampler_load_main_video_x_scaling_step = pp_avs_context->horiz_range / pp_avs_context->src_w;
743
744     return 0;
745 }
746
747 static void gen7_update_src_surface_uv_offset(VADriverContextP    ctx,
748                                               struct i965_post_processing_context *pp_context,
749                                               const struct i965_surface *surface)
750 {
751     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
752     int fourcc = pp_get_surface_fourcc(ctx, surface);
753
754     if (fourcc == VA_FOURCC_YUY2) {
755         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 0;
756         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 1;
757         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 3;
758     } else if (fourcc == VA_FOURCC_UYVY) {
759         pp_static_parameter->grf2.di_destination_packed_y_component_offset = 1;
760         pp_static_parameter->grf2.di_destination_packed_u_component_offset = 0;
761         pp_static_parameter->grf2.di_destination_packed_v_component_offset = 2;
762     }
763 }
764
765 static VAStatus
766 gen8_pp_plx_avs_initialize(VADriverContextP ctx, struct i965_post_processing_context *pp_context,
767                            const struct i965_surface *src_surface,
768                            const VARectangle *src_rect,
769                            struct i965_surface *dst_surface,
770                            const VARectangle *dst_rect,
771                            void *filter_param)
772 {
773 /* TODO: Add the sampler_8x8 state */
774     struct pp_avs_context *pp_avs_context = (struct pp_avs_context *)&pp_context->pp_avs_context;
775     struct gen7_pp_static_parameter *pp_static_parameter = pp_context->pp_static_parameter;
776     struct gen8_sampler_8x8_avs *sampler_8x8;
777     struct i965_sampler_8x8_coefficient *sampler_8x8_state;
778     int i;
779     int width[3], height[3], pitch[3], offset[3];
780     int src_width, src_height;
781     unsigned char *cc_ptr;
782
783     memset(pp_static_parameter, 0, sizeof(struct gen7_pp_static_parameter));
784
785     /* source surface */
786     gen8_pp_set_media_rw_message_surface(ctx, pp_context, src_surface, 0, 0,
787                                          width, height, pitch, offset);
788     src_height = height[0];
789     src_width  = width[0];
790
791     /* destination surface */
792     gen8_pp_set_media_rw_message_surface(ctx, pp_context, dst_surface, 24, 1,
793                                          width, height, pitch, offset);
794
795     /* sampler 8x8 state */
796     dri_bo_map(pp_context->dynamic_state.bo, True);
797     assert(pp_context->dynamic_state.bo->virtual);
798
799     cc_ptr = (unsigned char *) pp_context->dynamic_state.bo->virtual +
800                         pp_context->sampler_offset;
801     /* Currently only one gen8 sampler_8x8 is initialized */
802     sampler_8x8 = (struct gen8_sampler_8x8_avs *) cc_ptr;
803     memset(sampler_8x8, 0, sizeof(*sampler_8x8));
804
805     sampler_8x8->dw0.gain_factor = 44;
806     sampler_8x8->dw0.weak_edge_threshold = 1;
807     sampler_8x8->dw0.strong_edge_threshold = 8;
808     /* Use the value like that on Ivy instead of default
809      * sampler_8x8->dw0.r3x_coefficient = 5;
810      */
811     sampler_8x8->dw0.r3x_coefficient = 27;
812     sampler_8x8->dw0.r3c_coefficient = 5;
813
814     sampler_8x8->dw2.global_noise_estimation = 255;
815     sampler_8x8->dw2.non_edge_weight = 1;
816     sampler_8x8->dw2.regular_weight = 2;
817     sampler_8x8->dw2.strong_edge_weight = 7;
818     /* Use the value like that on Ivy instead of default
819      * sampler_8x8->dw2.r5x_coefficient = 7;
820      * sampler_8x8->dw2.r5cx_coefficient = 7;
821      * sampler_8x8->dw2.r5c_coefficient = 7;
822      */
823     sampler_8x8->dw2.r5x_coefficient = 9;
824     sampler_8x8->dw2.r5cx_coefficient = 8;
825     sampler_8x8->dw2.r5c_coefficient = 3;
826
827     sampler_8x8->dw3.sin_alpha = 101; /* sin_alpha = 0 */
828     sampler_8x8->dw3.cos_alpha = 79; /* cos_alpha = 0 */
829     sampler_8x8->dw3.sat_max = 0x1f;
830     sampler_8x8->dw3.hue_max = 14;
831     /* The 8tap filter will determine whether the adaptive Filter is
832      * applied for all channels(dw153).
833      * If the 8tap filter is disabled, the adaptive filter should be disabled.
834      * Only when 8tap filter is enabled, it can be enabled or not.
835      */
836     sampler_8x8->dw3.enable_8tap_filter = 3;
837     sampler_8x8->dw3.ief4_smooth_enable = 0;
838
839     sampler_8x8->dw4.s3u = 0;
840     sampler_8x8->dw4.diamond_margin = 4;
841     sampler_8x8->dw4.vy_std_enable = 0;
842     sampler_8x8->dw4.umid = 110;
843     sampler_8x8->dw4.vmid = 154;
844
845     sampler_8x8->dw5.diamond_dv = 0;
846     sampler_8x8->dw5.diamond_th = 35;
847     sampler_8x8->dw5.diamond_alpha = 100; /* diamond_alpha = 0 */
848     sampler_8x8->dw5.hs_margin = 3;
849     sampler_8x8->dw5.diamond_du = 2;
850
851     sampler_8x8->dw6.y_point1 = 46;
852     sampler_8x8->dw6.y_point2 = 47;
853     sampler_8x8->dw6.y_point3 = 254;
854     sampler_8x8->dw6.y_point4 = 255;
855
856     sampler_8x8->dw7.inv_margin_vyl = 3300; /* inv_margin_vyl = 0 */
857
858     sampler_8x8->dw8.inv_margin_vyu = 1600; /* inv_margin_vyu = 0 */
859     sampler_8x8->dw8.p0l = 46;
860     sampler_8x8->dw8.p1l = 216;
861
862     sampler_8x8->dw9.p2l = 236;
863     sampler_8x8->dw9.p3l = 236;
864     sampler_8x8->dw9.b0l = 133;
865     sampler_8x8->dw9.b1l = 130;
866
867     sampler_8x8->dw10.b2l = 130;
868     sampler_8x8->dw10.b3l = 130;
869     /* s0l = -5 / 256. s2.8 */
870     sampler_8x8->dw10.s0l = 1029;    /* s0l = 0 */
871     sampler_8x8->dw10.y_slope2 = 31; /* y_slop2 = 0 */
872
873     sampler_8x8->dw11.s1l = 0;
874     sampler_8x8->dw11.s2l = 0;
875
876     sampler_8x8->dw12.s3l = 0;
877     sampler_8x8->dw12.p0u = 46;
878     sampler_8x8->dw12.p1u = 66;
879     sampler_8x8->dw12.y_slope1 = 31; /* y_slope1 = 0 */
880
881     sampler_8x8->dw13.p2u = 130;
882     sampler_8x8->dw13.p3u = 236;
883     sampler_8x8->dw13.b0u = 143;
884     sampler_8x8->dw13.b1u = 163;
885
886     sampler_8x8->dw14.b2u = 200;
887     sampler_8x8->dw14.b3u = 140;
888     sampler_8x8->dw14.s0u = 256;  /* s0u = 0 */
889
890     sampler_8x8->dw15.s1u = 113; /* s1u = 0 */
891     sampler_8x8->dw15.s2u = 1203; /* s2u = 0 */
892
893     sampler_8x8_state = sampler_8x8->coefficients;
894
895     for (i = 0; i < 17; i++) {
896         float coff;
897         coff = i;
898         coff = coff / 16;
899
900         memset(sampler_8x8_state, 0, sizeof(*sampler_8x8_state));
901         /* for Y channel, currently ignore */
902         sampler_8x8_state->dw0.table_0x_filter_c0 = 0x0;
903         sampler_8x8_state->dw0.table_0x_filter_c1 = 0x0;
904         sampler_8x8_state->dw0.table_0x_filter_c2 = 0x0;
905         sampler_8x8_state->dw0.table_0x_filter_c3 =
906                                 intel_format_convert(1 - coff, 1, 6, 0);
907         sampler_8x8_state->dw1.table_0x_filter_c4 =
908                                 intel_format_convert(coff, 1, 6, 0);
909         sampler_8x8_state->dw1.table_0x_filter_c5 = 0x0;
910         sampler_8x8_state->dw1.table_0x_filter_c6 = 0x0;
911         sampler_8x8_state->dw1.table_0x_filter_c7 = 0x0;
912         sampler_8x8_state->dw2.table_0y_filter_c0 = 0x0;
913         sampler_8x8_state->dw2.table_0y_filter_c1 = 0x0;
914         sampler_8x8_state->dw2.table_0y_filter_c2 = 0x0;
915         sampler_8x8_state->dw2.table_0y_filter_c3 =
916                                 intel_format_convert(1 - coff, 1, 6, 0);
917         sampler_8x8_state->dw3.table_0y_filter_c4 =
918                                 intel_format_convert(coff, 1, 6, 0);
919         sampler_8x8_state->dw3.table_0y_filter_c5 = 0x0;
920         sampler_8x8_state->dw3.table_0y_filter_c6 = 0x0;
921         sampler_8x8_state->dw3.table_0y_filter_c7 = 0x0;
922         /* for U/V channel, 0.25 */
923         sampler_8x8_state->dw4.table_1x_filter_c0 = 0x0;
924         sampler_8x8_state->dw4.table_1x_filter_c1 = 0x0;
925         sampler_8x8_state->dw4.table_1x_filter_c2 = 0x0;
926         sampler_8x8_state->dw4.table_1x_filter_c3 =
927                                 intel_format_convert(1 - coff, 1, 6, 0);
928         sampler_8x8_state->dw5.table_1x_filter_c4 =
929                                 intel_format_convert(coff, 1, 6, 0);
930         sampler_8x8_state->dw5.table_1x_filter_c5 = 0x00;
931         sampler_8x8_state->dw5.table_1x_filter_c6 = 0x0;
932         sampler_8x8_state->dw5.table_1x_filter_c7 = 0x0;
933         sampler_8x8_state->dw6.table_1y_filter_c0 = 0x0;
934         sampler_8x8_state->dw6.table_1y_filter_c1 = 0x0;
935         sampler_8x8_state->dw6.table_1y_filter_c2 = 0x0;
936         sampler_8x8_state->dw6.table_1y_filter_c3 =
937                                 intel_format_convert(1 - coff, 1, 6, 0);
938         sampler_8x8_state->dw7.table_1y_filter_c4 =
939                                 intel_format_convert(coff, 1, 6,0);
940         sampler_8x8_state->dw7.table_1y_filter_c5 = 0x0;
941         sampler_8x8_state->dw7.table_1y_filter_c6 = 0x0;
942         sampler_8x8_state->dw7.table_1y_filter_c7 = 0x0;
943         sampler_8x8_state++;
944     }
945
946     sampler_8x8->dw152.default_sharpness_level = 0;
947     sampler_8x8->dw153.adaptive_filter_for_all_channel = 1;
948     sampler_8x8->dw153.bypass_y_adaptive_filtering = 1;
949     sampler_8x8->dw153.bypass_x_adaptive_filtering = 1;
950
951     dri_bo_unmap(pp_context->dynamic_state.bo);
952
953
954     /* private function & data */
955     pp_context->pp_x_steps = gen7_pp_avs_x_steps;
956     pp_context->pp_y_steps = gen7_pp_avs_y_steps;
957     pp_context->private_context = &pp_context->pp_avs_context;
958     pp_context->pp_set_block_parameter = gen7_pp_avs_set_block_parameter;
959
960     pp_avs_context->dest_x = dst_rect->x;
961     pp_avs_context->dest_y = dst_rect->y;
962     pp_avs_context->dest_w = ALIGN(dst_rect->width, 16);
963     pp_avs_context->dest_h = ALIGN(dst_rect->height, 16);
964     pp_avs_context->src_w = src_rect->width;
965     pp_avs_context->src_h = src_rect->height;
966     pp_avs_context->horiz_range = (float)src_rect->width / src_width;
967
968     int dw = (pp_avs_context->src_w - 1) / 16 + 1;
969     dw = MAX(dw, dst_rect->width);
970
971     pp_static_parameter->grf1.pointer_to_inline_parameter = 7;
972     pp_static_parameter->grf2.avs_wa_enable = 0; /* It is not required on GEN8+ */
973     pp_static_parameter->grf2.avs_wa_width = src_width;
974     pp_static_parameter->grf2.avs_wa_one_div_256_width = (float) 1.0 / (256 * src_width);
975     pp_static_parameter->grf2.avs_wa_five_div_256_width = (float) 5.0 / (256 * src_width);
976     pp_static_parameter->grf2.alpha = 255;
977
978     pp_static_parameter->grf3.sampler_load_horizontal_scaling_step_ratio = (float) pp_avs_context->src_w / dw;
979     pp_static_parameter->grf4.sampler_load_vertical_scaling_step = (float) src_rect->height / src_height / dst_rect->height;
980     pp_static_parameter->grf5.sampler_load_vertical_frame_origin = (float) src_rect->y / src_height -
981         (float) pp_avs_context->dest_y * pp_static_parameter->grf4.sampler_load_vertical_scaling_step;
982     pp_static_parameter->grf6.sampler_load_horizontal_frame_origin = (float) src_rect->x / src_width -
983         (float) pp_avs_context->dest_x * pp_avs_context->horiz_range / dw;
984
985     gen7_update_src_surface_uv_offset(ctx, pp_context, dst_surface);
986
987     dst_surface->flags = src_surface->flags;
988
989     return VA_STATUS_SUCCESS;
990 }
991
992 static VAStatus
993 gen8_pp_initialize(
994     VADriverContextP   ctx,
995     struct i965_post_processing_context *pp_context,
996     const struct i965_surface *src_surface,
997     const VARectangle *src_rect,
998     struct i965_surface *dst_surface,
999     const VARectangle *dst_rect,
1000     int                pp_index,
1001     void * filter_param
1002 )
1003 {
1004     VAStatus va_status;
1005     struct i965_driver_data *i965 = i965_driver_data(ctx);
1006     dri_bo *bo;
1007     int bo_size;
1008     unsigned int end_offset;
1009     struct pp_module *pp_module;
1010     int static_param_size, inline_param_size;
1011
1012     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1013     bo = dri_bo_alloc(i965->intel.bufmgr,
1014                       "surface state & binding table",
1015                       (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_PP_SURFACES,
1016                       4096);
1017     assert(bo);
1018     pp_context->surface_state_binding_table.bo = bo;
1019
1020     pp_context->idrt.num_interface_descriptors = 0;
1021
1022     pp_context->sampler_size = 2 * 4096;
1023
1024     bo_size = 4096 + pp_context->curbe_size + pp_context->sampler_size
1025                 + pp_context->idrt_size;
1026
1027     dri_bo_unreference(pp_context->dynamic_state.bo);
1028     bo = dri_bo_alloc(i965->intel.bufmgr,
1029                       "dynamic_state",
1030                       bo_size,
1031                       4096);
1032
1033     assert(bo);
1034     pp_context->dynamic_state.bo = bo;
1035     pp_context->dynamic_state.bo_size = bo_size;
1036
1037     end_offset = 0;
1038     pp_context->dynamic_state.end_offset = 0;
1039
1040     /* Constant buffer offset */
1041     pp_context->curbe_offset = ALIGN(end_offset, 64);
1042     end_offset = pp_context->curbe_offset + pp_context->curbe_size;
1043
1044     /* Interface descriptor offset */
1045     pp_context->idrt_offset = ALIGN(end_offset, 64);
1046     end_offset = pp_context->idrt_offset + pp_context->idrt_size;
1047
1048     /* Sampler state offset */
1049     pp_context->sampler_offset = ALIGN(end_offset, 64);
1050     end_offset = pp_context->sampler_offset + pp_context->sampler_size;
1051
1052     /* update the end offset of dynamic_state */
1053     pp_context->dynamic_state.end_offset = ALIGN(end_offset, 64);
1054
1055     static_param_size = sizeof(struct gen7_pp_static_parameter);
1056     inline_param_size = sizeof(struct gen7_pp_inline_parameter);
1057
1058     memset(pp_context->pp_static_parameter, 0, static_param_size);
1059     memset(pp_context->pp_inline_parameter, 0, inline_param_size);
1060
1061     assert(pp_index >= PP_NULL && pp_index < NUM_PP_MODULES);
1062     pp_context->current_pp = pp_index;
1063     pp_module = &pp_context->pp_modules[pp_index];
1064
1065     if (pp_module->initialize)
1066         va_status = pp_module->initialize(ctx, pp_context,
1067                                           src_surface,
1068                                           src_rect,
1069                                           dst_surface,
1070                                           dst_rect,
1071                                           filter_param);
1072     else
1073         va_status = VA_STATUS_ERROR_UNIMPLEMENTED;
1074
1075     calculate_boundary_block_mask(pp_context, dst_rect);
1076
1077     return va_status;
1078 }
1079
1080 static void
1081 gen8_pp_interface_descriptor_table(VADriverContextP   ctx,
1082                                    struct i965_post_processing_context *pp_context)
1083 {
1084     struct gen8_interface_descriptor_data *desc;
1085     dri_bo *bo;
1086     int pp_index = pp_context->current_pp;
1087     unsigned char *cc_ptr;
1088
1089     bo = pp_context->dynamic_state.bo;
1090
1091     dri_bo_map(bo, 1);
1092     assert(bo->virtual);
1093     cc_ptr = (unsigned char *)bo->virtual + pp_context->idrt_offset;
1094
1095     desc = (struct gen8_interface_descriptor_data *) cc_ptr +
1096                 pp_context->idrt.num_interface_descriptors;
1097
1098     memset(desc, 0, sizeof(*desc));
1099     desc->desc0.kernel_start_pointer =
1100                 pp_context->pp_modules[pp_index].kernel.kernel_offset >> 6; /* reloc */
1101     desc->desc2.single_program_flow = 1;
1102     desc->desc2.floating_point_mode = FLOATING_POINT_IEEE_754;
1103     desc->desc3.sampler_count = 0;      /* 1 - 4 samplers used */
1104     desc->desc3.sampler_state_pointer = pp_context->sampler_offset >> 5;
1105     desc->desc4.binding_table_entry_count = 0;
1106     desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET >> 5);
1107     desc->desc5.constant_urb_entry_read_offset = 0;
1108
1109     desc->desc5.constant_urb_entry_read_length = 6; /* grf 1-6 */
1110
1111     dri_bo_unmap(bo);
1112     pp_context->idrt.num_interface_descriptors++;
1113 }
1114
1115
1116 static void
1117 gen8_pp_upload_constants(VADriverContextP ctx,
1118                          struct i965_post_processing_context *pp_context)
1119 {
1120     unsigned char *constant_buffer;
1121     int param_size;
1122
1123     assert(sizeof(struct gen7_pp_static_parameter) == 192);
1124
1125     param_size = sizeof(struct gen7_pp_static_parameter);
1126
1127     dri_bo_map(pp_context->dynamic_state.bo, 1);
1128     assert(pp_context->dynamic_state.bo->virtual);
1129     constant_buffer = (unsigned char *) pp_context->dynamic_state.bo->virtual +
1130                         pp_context->curbe_offset;
1131
1132     memcpy(constant_buffer, pp_context->pp_static_parameter, param_size);
1133     dri_bo_unmap(pp_context->dynamic_state.bo);
1134     return;
1135 }
1136
1137 static void
1138 gen8_pp_states_setup(VADriverContextP ctx,
1139                      struct i965_post_processing_context *pp_context)
1140 {
1141     gen8_pp_interface_descriptor_table(ctx, pp_context);
1142     gen8_pp_upload_constants(ctx, pp_context);
1143 }
1144
1145 static void
1146 gen6_pp_pipeline_select(VADriverContextP ctx,
1147                         struct i965_post_processing_context *pp_context)
1148 {
1149     struct intel_batchbuffer *batch = pp_context->batch;
1150
1151     BEGIN_BATCH(batch, 1);
1152     OUT_BATCH(batch, CMD_PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
1153     ADVANCE_BATCH(batch);
1154 }
1155
1156 static void
1157 gen8_pp_state_base_address(VADriverContextP ctx,
1158                            struct i965_post_processing_context *pp_context)
1159 {
1160     struct intel_batchbuffer *batch = pp_context->batch;
1161
1162     BEGIN_BATCH(batch, 16);
1163     OUT_BATCH(batch, CMD_STATE_BASE_ADDRESS | (16 - 2));
1164         /* DW1 Generate state address */
1165     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1166         OUT_BATCH(batch, 0);
1167         OUT_BATCH(batch, 0);
1168         /* DW4. Surface state address */
1169     OUT_RELOC(batch, pp_context->surface_state_binding_table.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY); /* Surface state base address */
1170         OUT_BATCH(batch, 0);
1171         /* DW6. Dynamic state address */
1172     OUT_RELOC(batch, pp_context->dynamic_state.bo, I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER,
1173                 0, 0 | BASE_ADDRESS_MODIFY);
1174         OUT_BATCH(batch, 0);
1175
1176         /* DW8. Indirect object address */
1177     OUT_BATCH(batch, 0 | BASE_ADDRESS_MODIFY);
1178         OUT_BATCH(batch, 0);
1179
1180         /* DW10. Instruction base address */
1181     OUT_RELOC(batch, pp_context->instruction_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
1182         OUT_BATCH(batch, 0);
1183
1184     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1185     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1186     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1187     OUT_BATCH(batch, 0xFFFF0000 | BASE_ADDRESS_MODIFY);
1188     ADVANCE_BATCH(batch);
1189 }
1190
1191 static void
1192 gen8_pp_vfe_state(VADriverContextP ctx,
1193                   struct i965_post_processing_context *pp_context)
1194 {
1195     struct intel_batchbuffer *batch = pp_context->batch;
1196
1197     BEGIN_BATCH(batch, 9);
1198     OUT_BATCH(batch, CMD_MEDIA_VFE_STATE | (9 - 2));
1199     OUT_BATCH(batch, 0);
1200     OUT_BATCH(batch, 0);
1201     OUT_BATCH(batch,
1202               (pp_context->vfe_gpu_state.max_num_threads - 1) << 16 |
1203               pp_context->vfe_gpu_state.num_urb_entries << 8);
1204     OUT_BATCH(batch, 0);
1205     OUT_BATCH(batch,
1206               (pp_context->vfe_gpu_state.urb_entry_size) << 16 |
1207                 /* URB Entry Allocation Size, in 256 bits unit */
1208               (pp_context->vfe_gpu_state.curbe_allocation_size));
1209                 /* CURBE Allocation Size, in 256 bits unit */
1210     OUT_BATCH(batch, 0);
1211     OUT_BATCH(batch, 0);
1212     OUT_BATCH(batch, 0);
1213     ADVANCE_BATCH(batch);
1214 }
1215
1216 static void
1217 gen8_interface_descriptor_load(VADriverContextP ctx,
1218                                struct i965_post_processing_context *pp_context)
1219 {
1220     struct intel_batchbuffer *batch = pp_context->batch;
1221
1222     BEGIN_BATCH(batch, 6);
1223
1224     OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
1225     OUT_BATCH(batch, 0);
1226
1227     OUT_BATCH(batch, CMD_MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2));
1228     OUT_BATCH(batch, 0);
1229     OUT_BATCH(batch,
1230               pp_context->idrt.num_interface_descriptors * sizeof(struct gen8_interface_descriptor_data));
1231     OUT_BATCH(batch, pp_context->idrt_offset);
1232     ADVANCE_BATCH(batch);
1233 }
1234
1235 static void
1236 gen8_pp_curbe_load(VADriverContextP ctx,
1237                    struct i965_post_processing_context *pp_context)
1238 {
1239     struct intel_batchbuffer *batch = pp_context->batch;
1240     struct i965_driver_data *i965 = i965_driver_data(ctx);
1241     int param_size = 64;
1242
1243     if (IS_GEN8(i965->intel.device_info))
1244         param_size = sizeof(struct gen7_pp_static_parameter);
1245
1246     BEGIN_BATCH(batch, 4);
1247     OUT_BATCH(batch, CMD_MEDIA_CURBE_LOAD | (4 - 2));
1248     OUT_BATCH(batch, 0);
1249     OUT_BATCH(batch,
1250               param_size);
1251     OUT_BATCH(batch, pp_context->curbe_offset);
1252     ADVANCE_BATCH(batch);
1253 }
1254
1255 static void
1256 gen8_pp_object_walker(VADriverContextP ctx,
1257                       struct i965_post_processing_context *pp_context)
1258 {
1259     struct i965_driver_data *i965 = i965_driver_data(ctx);
1260     struct intel_batchbuffer *batch = pp_context->batch;
1261     int x, x_steps, y, y_steps;
1262     int param_size, command_length_in_dws, extra_cmd_in_dws;
1263     dri_bo *command_buffer;
1264     unsigned int *command_ptr;
1265
1266     param_size = sizeof(struct gen7_pp_inline_parameter);
1267     if (IS_GEN8(i965->intel.device_info))
1268         param_size = sizeof(struct gen7_pp_inline_parameter);
1269
1270     x_steps = pp_context->pp_x_steps(pp_context->private_context);
1271     y_steps = pp_context->pp_y_steps(pp_context->private_context);
1272     command_length_in_dws = 6 + (param_size >> 2);
1273     extra_cmd_in_dws = 2;
1274     command_buffer = dri_bo_alloc(i965->intel.bufmgr,
1275                                   "command objects buffer",
1276                                   (command_length_in_dws + extra_cmd_in_dws) * 4 * x_steps * y_steps + 64,
1277                                   4096);
1278
1279     dri_bo_map(command_buffer, 1);
1280     command_ptr = command_buffer->virtual;
1281
1282     for (y = 0; y < y_steps; y++) {
1283         for (x = 0; x < x_steps; x++) {
1284             if (!pp_context->pp_set_block_parameter(pp_context, x, y)) {
1285
1286                 *command_ptr++ = (CMD_MEDIA_OBJECT | (command_length_in_dws - 2));
1287                 *command_ptr++ = 0;
1288                 *command_ptr++ = 0;
1289                 *command_ptr++ = 0;
1290                 *command_ptr++ = 0;
1291                 *command_ptr++ = 0;
1292                 memcpy(command_ptr, pp_context->pp_inline_parameter, param_size);
1293                 command_ptr += (param_size >> 2);
1294
1295                 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1296                 *command_ptr++ = 0;
1297             }
1298         }
1299     }
1300
1301     if ((command_length_in_dws + extra_cmd_in_dws) * x_steps * y_steps % 2 == 0)
1302         *command_ptr++ = 0;
1303
1304     *command_ptr++ = MI_BATCH_BUFFER_END;
1305     *command_ptr++ = 0;
1306
1307     dri_bo_unmap(command_buffer);
1308
1309     if (IS_GEN8(i965->intel.device_info)) {
1310         BEGIN_BATCH(batch, 3);
1311         OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
1312         OUT_RELOC(batch, command_buffer,
1313                   I915_GEM_DOMAIN_COMMAND, 0, 0);
1314         OUT_BATCH(batch, 0);
1315         ADVANCE_BATCH(batch);
1316     }
1317
1318     dri_bo_unreference(command_buffer);
1319
1320     /* Have to execute the batch buffer here becuase MI_BATCH_BUFFER_END
1321      * will cause control to pass back to ring buffer
1322      */
1323     intel_batchbuffer_end_atomic(batch);
1324     intel_batchbuffer_flush(batch);
1325     intel_batchbuffer_start_atomic(batch, 0x1000);
1326 }
1327
1328 static void
1329 gen8_pp_pipeline_setup(VADriverContextP ctx,
1330                        struct i965_post_processing_context *pp_context)
1331 {
1332     struct intel_batchbuffer *batch = pp_context->batch;
1333
1334     intel_batchbuffer_start_atomic(batch, 0x1000);
1335     intel_batchbuffer_emit_mi_flush(batch);
1336     gen6_pp_pipeline_select(ctx, pp_context);
1337     gen8_pp_state_base_address(ctx, pp_context);
1338     gen8_pp_vfe_state(ctx, pp_context);
1339     gen8_pp_curbe_load(ctx, pp_context);
1340     gen8_interface_descriptor_load(ctx, pp_context);
1341     gen8_pp_vfe_state(ctx, pp_context);
1342     gen8_pp_object_walker(ctx, pp_context);
1343     intel_batchbuffer_end_atomic(batch);
1344 }
1345
1346 static VAStatus
1347 gen8_post_processing(
1348     VADriverContextP   ctx,
1349     struct i965_post_processing_context *pp_context,
1350     const struct i965_surface *src_surface,
1351     const VARectangle *src_rect,
1352     struct i965_surface *dst_surface,
1353     const VARectangle *dst_rect,
1354     int                pp_index,
1355     void * filter_param
1356 )
1357 {
1358     VAStatus va_status;
1359
1360     va_status = gen8_pp_initialize(ctx, pp_context,
1361                                    src_surface,
1362                                    src_rect,
1363                                    dst_surface,
1364                                    dst_rect,
1365                                    pp_index,
1366                                    filter_param);
1367
1368     if (va_status == VA_STATUS_SUCCESS) {
1369         gen8_pp_states_setup(ctx, pp_context);
1370         gen8_pp_pipeline_setup(ctx, pp_context);
1371     }
1372
1373     return va_status;
1374 }
1375
1376 static void
1377 gen8_post_processing_context_finalize(struct i965_post_processing_context *pp_context)
1378 {
1379     dri_bo_unreference(pp_context->surface_state_binding_table.bo);
1380     pp_context->surface_state_binding_table.bo = NULL;
1381
1382     dri_bo_unreference(pp_context->pp_dndi_context.stmm_bo);
1383     pp_context->pp_dndi_context.stmm_bo = NULL;
1384
1385     dri_bo_unreference(pp_context->pp_dn_context.stmm_bo);
1386     pp_context->pp_dn_context.stmm_bo = NULL;
1387
1388     if (pp_context->instruction_state.bo) {
1389         dri_bo_unreference(pp_context->instruction_state.bo);
1390         pp_context->instruction_state.bo = NULL;
1391     }
1392
1393     if (pp_context->indirect_state.bo) {
1394         dri_bo_unreference(pp_context->indirect_state.bo);
1395         pp_context->indirect_state.bo = NULL;
1396     }
1397
1398     if (pp_context->dynamic_state.bo) {
1399         dri_bo_unreference(pp_context->dynamic_state.bo);
1400         pp_context->dynamic_state.bo = NULL;
1401     }
1402
1403     free(pp_context->pp_static_parameter);
1404     free(pp_context->pp_inline_parameter);
1405     pp_context->pp_static_parameter = NULL;
1406     pp_context->pp_inline_parameter = NULL;
1407 }
1408
1409 #define VPP_CURBE_ALLOCATION_SIZE       32
1410
1411 void
1412 gen8_post_processing_context_init(VADriverContextP ctx,
1413                                   void *data,
1414                                   struct intel_batchbuffer *batch)
1415 {
1416     struct i965_driver_data *i965 = i965_driver_data(ctx);
1417     int i, kernel_size;
1418     unsigned int kernel_offset, end_offset;
1419     unsigned char *kernel_ptr;
1420     struct pp_module *pp_module;
1421     struct i965_post_processing_context *pp_context = data;
1422
1423     {
1424         pp_context->vfe_gpu_state.max_num_threads = 60;
1425         pp_context->vfe_gpu_state.num_urb_entries = 59;
1426         pp_context->vfe_gpu_state.gpgpu_mode = 0;
1427         pp_context->vfe_gpu_state.urb_entry_size = 16 - 1;
1428         pp_context->vfe_gpu_state.curbe_allocation_size = VPP_CURBE_ALLOCATION_SIZE;
1429     }
1430
1431     pp_context->intel_post_processing = gen8_post_processing;
1432     pp_context->finalize = gen8_post_processing_context_finalize;
1433
1434     assert(NUM_PP_MODULES == ARRAY_ELEMS(pp_modules_gen8));
1435
1436     if (IS_GEN8(i965->intel.device_info))
1437         memcpy(pp_context->pp_modules, pp_modules_gen8, sizeof(pp_context->pp_modules));
1438     else {
1439         /* should never get here !!! */
1440         assert(0);
1441     }
1442
1443     kernel_size = 4096 ;
1444
1445     for (i = 0; i < NUM_PP_MODULES; i++) {
1446         pp_module = &pp_context->pp_modules[i];
1447
1448         if (pp_module->kernel.bin && pp_module->kernel.size) {
1449             kernel_size += pp_module->kernel.size;
1450         }
1451     }
1452
1453     pp_context->instruction_state.bo = dri_bo_alloc(i965->intel.bufmgr,
1454                                   "kernel shader",
1455                                   kernel_size,
1456                                   0x1000);
1457     if (pp_context->instruction_state.bo == NULL) {
1458         WARN_ONCE("failure to allocate the buffer space for kernel shader in VPP\n");
1459         return;
1460     }
1461
1462     assert(pp_context->instruction_state.bo);
1463
1464
1465     pp_context->instruction_state.bo_size = kernel_size;
1466     pp_context->instruction_state.end_offset = 0;
1467     end_offset = 0;
1468
1469     dri_bo_map(pp_context->instruction_state.bo, 1);
1470     kernel_ptr = (unsigned char *)(pp_context->instruction_state.bo->virtual);
1471
1472     for (i = 0; i < NUM_PP_MODULES; i++) {
1473         pp_module = &pp_context->pp_modules[i];
1474
1475         kernel_offset = ALIGN(end_offset, 64);
1476         pp_module->kernel.kernel_offset = kernel_offset;
1477
1478         if (pp_module->kernel.bin && pp_module->kernel.size) {
1479
1480             memcpy(kernel_ptr + kernel_offset, pp_module->kernel.bin, pp_module->kernel.size);
1481             end_offset = kernel_offset + pp_module->kernel.size;
1482         }
1483     }
1484
1485     pp_context->instruction_state.end_offset = ALIGN(end_offset, 64);
1486
1487     dri_bo_unmap(pp_context->instruction_state.bo);
1488
1489     /* static & inline parameters */
1490     if (IS_GEN8(i965->intel.device_info)) {
1491         pp_context->pp_static_parameter = calloc(sizeof(struct gen7_pp_static_parameter), 1);
1492         pp_context->pp_inline_parameter = calloc(sizeof(struct gen7_pp_inline_parameter), 1);
1493     }
1494
1495     pp_context->pp_dndi_context.current_out_surface = VA_INVALID_SURFACE;
1496     pp_context->pp_dndi_context.current_out_obj_surface = NULL;
1497     pp_context->pp_dndi_context.frame_order = -1;
1498     pp_context->batch = batch;
1499
1500     pp_context->idrt_size = 5 * sizeof(struct gen8_interface_descriptor_data);
1501     pp_context->curbe_size = 256;
1502 }