2 * Copyright © 2010-2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Zhou Chang <chang.zhou@intel.com>
35 #include "intel_batchbuffer.h"
36 #include "i965_defines.h"
37 #include "i965_structs.h"
38 #include "i965_drv_video.h"
39 #include "i965_encoder.h"
42 gen6_mfc_pipe_mode_select(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
44 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
46 BEGIN_BCS_BATCH(batch, 4);
48 OUT_BCS_BATCH(batch, MFX_PIPE_MODE_SELECT | (4 - 2));
50 (0 << 10) | /* disable Stream-Out , advanced QP/bitrate control need enable it*/
51 (1 << 9) | /* Post Deblocking Output */
52 (0 << 8) | /* Pre Deblocking Output */
53 (0 << 7) | /* disable TLB prefectch */
54 (0 << 5) | /* not in stitch mode */
55 (1 << 4) | /* encoding mode */
56 (2 << 0)); /* Standard Select: AVC */
58 (0 << 20) | /* round flag in PB slice */
59 (0 << 19) | /* round flag in Intra8x8 */
60 (0 << 7) | /* expand NOA bus flag */
61 (1 << 6) | /* must be 1 */
62 (0 << 5) | /* disable clock gating for NOA */
63 (0 << 4) | /* terminate if AVC motion and POC table error occurs */
64 (0 << 3) | /* terminate if AVC mbdata error occurs */
65 (0 << 2) | /* terminate if AVC CABAC/CAVLC decode error occurs */
66 (0 << 1) | /* AVC long field motion vector */
67 (0 << 0)); /* always calculate AVC ILDB boundary strength */
68 OUT_BCS_BATCH(batch, 0);
70 ADVANCE_BCS_BATCH(batch);
74 gen7_mfc_pipe_mode_select(VADriverContextP ctx,
76 struct gen6_encoder_context *gen6_encoder_context)
78 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
80 assert(standard_select == MFX_FORMAT_MPEG2 ||
81 standard_select == MFX_FORMAT_AVC);
83 BEGIN_BCS_BATCH(batch, 5);
84 OUT_BCS_BATCH(batch, MFX_PIPE_MODE_SELECT | (5 - 2));
86 (MFX_LONG_MODE << 17) | /* Must be long format for encoder */
87 (MFD_MODE_VLD << 15) | /* VLD mode */
88 (0 << 10) | /* disable Stream-Out */
89 (1 << 9) | /* Post Deblocking Output */
90 (0 << 8) | /* Pre Deblocking Output */
91 (0 << 5) | /* not in stitch mode */
92 (1 << 4) | /* encoding mode */
93 (standard_select << 0)); /* standard select: avc or mpeg2 */
95 (0 << 7) | /* expand NOA bus flag */
96 (0 << 6) | /* disable slice-level clock gating */
97 (0 << 5) | /* disable clock gating for NOA */
98 (0 << 4) | /* terminate if AVC motion and POC table error occurs */
99 (0 << 3) | /* terminate if AVC mbdata error occurs */
100 (0 << 2) | /* terminate if AVC CABAC/CAVLC decode error occurs */
103 OUT_BCS_BATCH(batch, 0);
104 OUT_BCS_BATCH(batch, 0);
106 ADVANCE_BCS_BATCH(batch);
110 gen6_mfc_surface_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
112 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
113 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
115 BEGIN_BCS_BATCH(batch, 6);
117 OUT_BCS_BATCH(batch, MFX_SURFACE_STATE | (6 - 2));
118 OUT_BCS_BATCH(batch, 0);
120 ((mfc_context->surface_state.height - 1) << 19) |
121 ((mfc_context->surface_state.width - 1) << 6));
123 (MFX_SURFACE_PLANAR_420_8 << 28) | /* 420 planar YUV surface */
124 (1 << 27) | /* must be 1 for interleave U/V, hardware requirement */
125 (0 << 22) | /* surface object control state, FIXME??? */
126 ((mfc_context->surface_state.w_pitch - 1) << 3) | /* pitch */
127 (0 << 2) | /* must be 0 for interleave U/V */
128 (1 << 1) | /* must be y-tiled */
129 (I965_TILEWALK_YMAJOR << 0)); /* tile walk, TILEWALK_YMAJOR */
131 (0 << 16) | /* must be 0 for interleave U/V */
132 (mfc_context->surface_state.h_pitch)); /* y offset for U(cb) */
133 OUT_BCS_BATCH(batch, 0);
134 ADVANCE_BCS_BATCH(batch);
138 gen7_mfc_surface_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
140 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
141 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
143 BEGIN_BCS_BATCH(batch, 6);
145 OUT_BCS_BATCH(batch, MFX_SURFACE_STATE | (6 - 2));
146 OUT_BCS_BATCH(batch, 0);
148 ((mfc_context->surface_state.height - 1) << 18) |
149 ((mfc_context->surface_state.width - 1) << 4));
151 (MFX_SURFACE_PLANAR_420_8 << 28) | /* 420 planar YUV surface */
152 (1 << 27) | /* must be 1 for interleave U/V, hardware requirement */
153 (0 << 22) | /* surface object control state, FIXME??? */
154 ((mfc_context->surface_state.w_pitch - 1) << 3) | /* pitch */
155 (0 << 2) | /* must be 0 for interleave U/V */
156 (1 << 1) | /* must be tiled */
157 (I965_TILEWALK_YMAJOR << 0)); /* tile walk, TILEWALK_YMAJOR */
159 (0 << 16) | /* must be 0 for interleave U/V */
160 (mfc_context->surface_state.h_pitch)); /* y offset for U(cb) */
161 OUT_BCS_BATCH(batch, 0);
162 ADVANCE_BCS_BATCH(batch);
166 gen6_mfc_pipe_buf_addr_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
168 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
169 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
172 BEGIN_BCS_BATCH(batch, 24);
174 OUT_BCS_BATCH(batch, MFX_PIPE_BUF_ADDR_STATE | (24 - 2));
176 OUT_BCS_BATCH(batch, 0); /* pre output addr */
178 OUT_BCS_RELOC(batch, mfc_context->post_deblocking_output.bo,
179 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
180 0); /* post output addr */
182 OUT_BCS_RELOC(batch, mfc_context->uncompressed_picture_source.bo,
183 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
184 0); /* uncompressed data */
185 OUT_BCS_RELOC(batch, mfc_context->macroblock_status_buffer.bo,
186 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
187 0); /* StreamOut data*/
188 OUT_BCS_RELOC(batch, mfc_context->intra_row_store_scratch_buffer.bo,
189 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
191 OUT_BCS_RELOC(batch, mfc_context->deblocking_filter_row_store_scratch_buffer.bo,
192 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
194 /* 7..22 Reference pictures*/
195 for (i = 0; i < ARRAY_ELEMS(mfc_context->reference_surfaces); i++) {
196 if ( mfc_context->reference_surfaces[i].bo != NULL) {
197 OUT_BCS_RELOC(batch, mfc_context->reference_surfaces[i].bo,
198 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
201 OUT_BCS_BATCH(batch, 0);
204 OUT_BCS_RELOC(batch, mfc_context->macroblock_status_buffer.bo,
205 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
206 0); /* Macroblock status buffer*/
208 ADVANCE_BCS_BATCH(batch);
212 gen6_mfc_ind_obj_base_addr_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
214 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
215 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
217 BEGIN_BCS_BATCH(batch, 11);
219 OUT_BCS_BATCH(batch, MFX_IND_OBJ_BASE_ADDR_STATE | (11 - 2));
220 OUT_BCS_BATCH(batch, 0);
221 OUT_BCS_BATCH(batch, 0);
222 /* MFX Indirect MV Object Base Address */
223 OUT_BCS_RELOC(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
224 OUT_BCS_BATCH(batch, 0);
225 OUT_BCS_BATCH(batch, 0);
226 OUT_BCS_BATCH(batch, 0);
227 OUT_BCS_BATCH(batch, 0);
228 OUT_BCS_BATCH(batch, 0);
229 /*MFC Indirect PAK-BSE Object Base Address for Encoder*/
230 OUT_BCS_BATCH(batch, 0);
231 OUT_BCS_BATCH(batch, 0);
233 ADVANCE_BCS_BATCH(batch);
237 gen7_mfc_ind_obj_base_addr_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
239 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
240 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
242 BEGIN_BCS_BATCH(batch, 11);
244 OUT_BCS_BATCH(batch, MFX_IND_OBJ_BASE_ADDR_STATE | (11 - 2));
245 OUT_BCS_BATCH(batch, 0);
246 OUT_BCS_BATCH(batch, 0);
247 /* MFX Indirect MV Object Base Address */
248 OUT_BCS_RELOC(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
249 OUT_BCS_BATCH(batch, 0x80000000); /* must set, up to 2G */
250 OUT_BCS_BATCH(batch, 0);
251 OUT_BCS_BATCH(batch, 0);
252 OUT_BCS_BATCH(batch, 0);
253 OUT_BCS_BATCH(batch, 0);
254 /*MFC Indirect PAK-BSE Object Base Address for Encoder*/
255 OUT_BCS_BATCH(batch, 0);
256 OUT_BCS_BATCH(batch, 0x80000000); /* must set, up to 2G */
258 ADVANCE_BCS_BATCH(batch);
262 gen6_mfc_bsp_buf_base_addr_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
264 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
265 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
267 BEGIN_BCS_BATCH(batch, 4);
269 OUT_BCS_BATCH(batch, MFX_BSP_BUF_BASE_ADDR_STATE | (4 - 2));
270 OUT_BCS_RELOC(batch, mfc_context->bsd_mpc_row_store_scratch_buffer.bo,
271 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
273 OUT_BCS_BATCH(batch, 0);
274 OUT_BCS_BATCH(batch, 0);
276 ADVANCE_BCS_BATCH(batch);
280 gen6_mfc_avc_img_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
282 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
283 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
285 int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
286 int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
288 BEGIN_BCS_BATCH(batch, 13);
289 OUT_BCS_BATCH(batch, MFX_AVC_IMG_STATE | (13 - 2));
291 ((width_in_mbs * height_in_mbs) & 0xFFFF));
293 (height_in_mbs << 16) |
294 (width_in_mbs << 0));
296 (0 << 24) | /*Second Chroma QP Offset*/
297 (0 << 16) | /*Chroma QP Offset*/
298 (0 << 14) | /*Max-bit conformance Intra flag*/
299 (0 << 13) | /*Max Macroblock size conformance Inter flag*/
300 (1 << 12) | /*Should always be written as "1" */
301 (0 << 10) | /*QM Preset FLag */
302 (0 << 8) | /*Image Structure*/
303 (0 << 0) ); /*Current Decoed Image Frame Store ID, reserved in Encode mode*/
305 (400 << 16) | /*Mininum Frame size*/
306 (0 << 15) | /*Disable reading of Macroblock Status Buffer*/
307 (0 << 14) | /*Load BitStream Pointer only once, 1 slic 1 frame*/
308 (0 << 13) | /*CABAC 0 word insertion test enable*/
309 (1 << 12) | /*MVUnpackedEnable,compliant to DXVA*/
310 (1 << 10) | /*Chroma Format IDC, 4:2:0*/
311 (1 << 7) | /*0:CAVLC encoding mode,1:CABAC*/
312 (0 << 6) | /*Only valid for VLD decoding mode*/
313 (0 << 5) | /*Constrained Intra Predition Flag, from PPS*/
314 (0 << 4) | /*Direct 8x8 inference flag*/
315 (0 << 3) | /*Only 8x8 IDCT Transform Mode Flag*/
316 (1 << 2) | /*Frame MB only flag*/
317 (0 << 1) | /*MBAFF mode is in active*/
318 (0 << 0) ); /*Field picture flag*/
320 (1<<16) | /*Frame Size Rate Control Flag*/
322 (1<<9) | /*MB level Rate Control Enabling Flag*/
323 (1 << 3) | /*FrameBitRateMinReportMask*/
324 (1 << 2) | /*FrameBitRateMaxReportMask*/
325 (1 << 1) | /*InterMBMaxSizeReportMask*/
326 (1 << 0) ); /*IntraMBMaxSizeReportMask*/
327 OUT_BCS_BATCH(batch, /*Inter and Intra Conformance Max size limit*/
328 (0x0600 << 16) | /*InterMbMaxSz 192 Byte*/
329 (0x0800) ); /*IntraMbMaxSz 256 Byte*/
330 OUT_BCS_BATCH(batch, 0x00000000); /*Reserved : MBZReserved*/
331 OUT_BCS_BATCH(batch, 0x01020304); /*Slice QP Delta for bitrate control*/
332 OUT_BCS_BATCH(batch, 0xFEFDFCFB);
333 OUT_BCS_BATCH(batch, 0x80601004); /*MAX = 128KB, MIN = 64KB*/
334 OUT_BCS_BATCH(batch, 0x00800001);
335 OUT_BCS_BATCH(batch, 0);
337 ADVANCE_BCS_BATCH(batch);
341 gen7_mfc_avc_img_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
343 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
344 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
346 int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
347 int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
349 BEGIN_BCS_BATCH(batch, 16);
350 OUT_BCS_BATCH(batch, MFX_AVC_IMG_STATE | (16 - 2));
352 ((width_in_mbs * height_in_mbs) & 0xFFFF));
354 ((height_in_mbs - 1) << 16) |
355 ((width_in_mbs - 1) << 0));
357 (0 << 24) | /* Second Chroma QP Offset */
358 (0 << 16) | /* Chroma QP Offset */
359 (0 << 14) | /* Max-bit conformance Intra flag */
360 (0 << 13) | /* Max Macroblock size conformance Inter flag */
361 (0 << 12) | /* FIXME: Weighted_Pred_Flag */
362 (0 << 10) | /* FIXME: Weighted_BiPred_Idc */
363 (0 << 8) | /* FIXME: Image Structure */
364 (0 << 0) ); /* Current Decoed Image Frame Store ID, reserved in Encode mode */
366 (0 << 16) | /* Mininum Frame size */
367 (0 << 15) | /* Disable reading of Macroblock Status Buffer */
368 (0 << 14) | /* Load BitStream Pointer only once, 1 slic 1 frame */
369 (0 << 13) | /* CABAC 0 word insertion test enable */
370 (1 << 12) | /* MVUnpackedEnable,compliant to DXVA */
371 (1 << 10) | /* Chroma Format IDC, 4:2:0 */
372 (0 << 9) | /* FIXME: MbMvFormatFlag */
373 (1 << 7) | /* 0:CAVLC encoding mode,1:CABAC */
374 (0 << 6) | /* Only valid for VLD decoding mode */
375 (0 << 5) | /* Constrained Intra Predition Flag, from PPS */
376 (0 << 4) | /* Direct 8x8 inference flag */
377 (0 << 3) | /* Only 8x8 IDCT Transform Mode Flag */
378 (1 << 2) | /* Frame MB only flag */
379 (0 << 1) | /* MBAFF mode is in active */
380 (0 << 0)); /* Field picture flag */
381 OUT_BCS_BATCH(batch, 0); /* Mainly about MB rate control and debug, just ignoring */
382 OUT_BCS_BATCH(batch, /* Inter and Intra Conformance Max size limit */
383 (0xBB8 << 16) | /* InterMbMaxSz */
384 (0xEE8) ); /* IntraMbMaxSz */
385 OUT_BCS_BATCH(batch, 0); /* Reserved */
386 OUT_BCS_BATCH(batch, 0); /* Slice QP Delta for bitrate control */
387 OUT_BCS_BATCH(batch, 0); /* Slice QP Delta for bitrate control */
388 OUT_BCS_BATCH(batch, 0x8C000000);
389 OUT_BCS_BATCH(batch, 0x00010000);
390 OUT_BCS_BATCH(batch, 0);
391 OUT_BCS_BATCH(batch, 0);
392 OUT_BCS_BATCH(batch, 0);
393 OUT_BCS_BATCH(batch, 0);
395 ADVANCE_BCS_BATCH(batch);
398 static void gen6_mfc_avc_directmode_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
400 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
403 BEGIN_BCS_BATCH(batch, 69);
405 OUT_BCS_BATCH(batch, MFX_AVC_DIRECTMODE_STATE | (69 - 2));
406 //TODO: reference DMV
407 for(i = 0; i < 16; i++){
408 OUT_BCS_BATCH(batch, 0);
409 OUT_BCS_BATCH(batch, 0);
412 //TODO: current DMV just for test
414 OUT_BCS_RELOC(batch, mfc_context->direct_mv_buffers[0].bo,
415 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
418 //drm_intel_bo_pin(mfc_context->direct_mv_buffers[0].bo, 0x1000);
419 //OUT_BCS_BATCH(batch, mfc_context->direct_mv_buffers[0].bo->offset);
420 OUT_BCS_BATCH(batch, 0);
424 OUT_BCS_BATCH(batch, 0);
427 for(i = 0; i < 34; i++) {
428 OUT_BCS_BATCH(batch, 0);
431 ADVANCE_BCS_BATCH(batch);
434 static void gen6_mfc_avc_slice_state(VADriverContextP ctx,
436 struct gen6_encoder_context *gen6_encoder_context,
437 int rate_control_enable,
440 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
441 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
443 BEGIN_BCS_BATCH(batch, 11);;
445 OUT_BCS_BATCH(batch, MFX_AVC_SLICE_STATE | (11 - 2) );
447 OUT_BCS_BATCH(batch, slice_type); /*Slice Type: I:P:B Slice*/
449 if ( slice_type == SLICE_TYPE_I )
450 OUT_BCS_BATCH(batch, 0); /*no reference frames and pred_weight_table*/
452 OUT_BCS_BATCH(batch, 0x00010000); /*1 reference frame*/
454 OUT_BCS_BATCH(batch, (0<<24) | /*Enable deblocking operation*/
455 (qp<<16) | /*Slice Quantization Parameter*/
457 OUT_BCS_BATCH(batch, 0); /*First MB X&Y , the postion of current slice*/
458 OUT_BCS_BATCH(batch, ( ((mfc_context->surface_state.height+15)/16) << 16) );
461 (rate_control_enable<<31) | /*in CBR mode RateControlCounterEnable = enable*/
462 (1<<30) | /*ResetRateControlCounter*/
463 (0<<28) | /*RC Triggle Mode = Always Rate Control*/
464 (8<<24) | /*RC Stable Tolerance, middle level*/
465 (rate_control_enable<<23) | /*RC Panic Enable*/
466 (0<<22) | /*QP mode, don't modfiy CBP*/
467 (1<<19) | /*IsLastSlice*/
468 (0<<18) | /*BitstreamOutputFlag Compressed BitStream Output Disable Flag 0:enable 1:disable*/
469 (0<<17) | /*HeaderPresentFlag*/
470 (1<<16) | /*SliceData PresentFlag*/
471 (0<<15) | /*TailPresentFlag*/
472 (1<<13) | /*RBSP NAL TYPE*/
473 (0<<12) ); /*CabacZeroWordInsertionEnable*/
475 OUT_BCS_RELOC(batch, mfc_context->mfc_indirect_pak_bse_object.bo,
476 I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
477 mfc_context->mfc_indirect_pak_bse_object.offset);
479 OUT_BCS_BATCH(batch, (24<<24) | /*Target QP - 24 is lowest QP*/
480 (20<<16) | /*Target QP + 20 is highest QP*/
485 OUT_BCS_BATCH(batch, 0x08888888);
486 OUT_BCS_BATCH(batch, 0);
488 ADVANCE_BCS_BATCH(batch);
490 static void gen6_mfc_avc_qm_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
492 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
495 BEGIN_BCS_BATCH(batch, 58);
497 OUT_BCS_BATCH(batch, MFX_AVC_QM_STATE | 56);
498 OUT_BCS_BATCH(batch, 0xFF ) ;
499 for( i = 0; i < 56; i++) {
500 OUT_BCS_BATCH(batch, 0x10101010);
503 ADVANCE_BCS_BATCH(batch);
506 static void gen6_mfc_avc_fqm_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
508 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
511 BEGIN_BCS_BATCH(batch, 113);
512 OUT_BCS_BATCH(batch, MFC_AVC_FQM_STATE | (113 - 2));
514 for(i = 0; i < 112;i++) {
515 OUT_BCS_BATCH(batch, 0x10001000);
518 ADVANCE_BCS_BATCH(batch);
522 gen7_mfc_qm_state(VADriverContextP ctx,
526 struct gen6_encoder_context *gen6_encoder_context)
528 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
529 unsigned int qm_buffer[16];
531 assert(qm_length <= 16);
532 assert(sizeof(*qm) == 4);
533 memcpy(qm_buffer, qm, qm_length * 4);
535 BEGIN_BCS_BATCH(batch, 18);
536 OUT_BCS_BATCH(batch, MFX_QM_STATE | (18 - 2));
537 OUT_BCS_BATCH(batch, qm_type << 0);
538 intel_batchbuffer_data(batch, qm_buffer, 16 * 4);
539 ADVANCE_BCS_BATCH(batch);
542 static void gen7_mfc_avc_qm_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
544 unsigned int qm[16] = {
545 0x10101010, 0x10101010, 0x10101010, 0x10101010,
546 0x10101010, 0x10101010, 0x10101010, 0x10101010,
547 0x10101010, 0x10101010, 0x10101010, 0x10101010,
548 0x10101010, 0x10101010, 0x10101010, 0x10101010
551 gen7_mfc_qm_state(ctx, MFX_QM_AVC_4X4_INTRA_MATRIX, qm, 12, gen6_encoder_context);
552 gen7_mfc_qm_state(ctx, MFX_QM_AVC_4X4_INTER_MATRIX, qm, 12, gen6_encoder_context);
553 gen7_mfc_qm_state(ctx, MFX_QM_AVC_8x8_INTRA_MATRIX, qm, 16, gen6_encoder_context);
554 gen7_mfc_qm_state(ctx, MFX_QM_AVC_8x8_INTER_MATRIX, qm, 16, gen6_encoder_context);
558 gen7_mfc_fqm_state(VADriverContextP ctx,
562 struct gen6_encoder_context *gen6_encoder_context)
564 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
565 unsigned int fqm_buffer[32];
567 assert(fqm_length <= 32);
568 assert(sizeof(*fqm) == 4);
569 memcpy(fqm_buffer, fqm, fqm_length * 4);
571 BEGIN_BCS_BATCH(batch, 34);
572 OUT_BCS_BATCH(batch, MFX_FQM_STATE | (34 - 2));
573 OUT_BCS_BATCH(batch, fqm_type << 0);
574 intel_batchbuffer_data(batch, fqm_buffer, 32 * 4);
575 ADVANCE_BCS_BATCH(batch);
578 static void gen7_mfc_avc_fqm_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
580 unsigned int qm[32] = {
581 0x10001000, 0x10001000, 0x10001000, 0x10001000,
582 0x10001000, 0x10001000, 0x10001000, 0x10001000,
583 0x10001000, 0x10001000, 0x10001000, 0x10001000,
584 0x10001000, 0x10001000, 0x10001000, 0x10001000,
585 0x10001000, 0x10001000, 0x10001000, 0x10001000,
586 0x10001000, 0x10001000, 0x10001000, 0x10001000,
587 0x10001000, 0x10001000, 0x10001000, 0x10001000,
588 0x10001000, 0x10001000, 0x10001000, 0x10001000
591 gen7_mfc_fqm_state(ctx, MFX_QM_AVC_4X4_INTRA_MATRIX, qm, 24, gen6_encoder_context);
592 gen7_mfc_fqm_state(ctx, MFX_QM_AVC_4X4_INTER_MATRIX, qm, 24, gen6_encoder_context);
593 gen7_mfc_fqm_state(ctx, MFX_QM_AVC_8x8_INTRA_MATRIX, qm, 32, gen6_encoder_context);
594 gen7_mfc_fqm_state(ctx, MFX_QM_AVC_8x8_INTER_MATRIX, qm, 32, gen6_encoder_context);
597 static void gen6_mfc_avc_ref_idx_state(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
599 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
602 BEGIN_BCS_BATCH(batch, 10);
603 OUT_BCS_BATCH(batch, MFX_AVC_REF_IDX_STATE | 8);
604 OUT_BCS_BATCH(batch, 0); //Select L0
605 OUT_BCS_BATCH(batch, 0x80808020); //Only 1 reference
606 for(i = 0; i < 7; i++) {
607 OUT_BCS_BATCH(batch, 0x80808080);
609 ADVANCE_BCS_BATCH(batch);
611 BEGIN_BCS_BATCH(batch, 10);
612 OUT_BCS_BATCH(batch, MFX_AVC_REF_IDX_STATE | 8);
613 OUT_BCS_BATCH(batch, 1); //Select L1
614 OUT_BCS_BATCH(batch, 0x80808022); //Only 1 reference
615 for(i = 0; i < 7; i++) {
616 OUT_BCS_BATCH(batch, 0x80808080);
618 ADVANCE_BCS_BATCH(batch);
622 gen6_mfc_avc_insert_object(VADriverContextP ctx, int flush_data, struct gen6_encoder_context *gen6_encoder_context)
624 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
626 BEGIN_BCS_BATCH(batch, 4);
628 OUT_BCS_BATCH(batch, MFC_AVC_INSERT_OBJECT | (4 -2 ) );
629 OUT_BCS_BATCH(batch, (32<<8) |
634 OUT_BCS_BATCH(batch, 0x00000003);
635 OUT_BCS_BATCH(batch, 0xABCD1234);
637 ADVANCE_BCS_BATCH(batch);
641 gen6_mfc_avc_pak_object_intra(VADriverContextP ctx, int x, int y, int end_mb, int qp,unsigned int *msg,
642 struct gen6_encoder_context *gen6_encoder_context,
643 int intra_mb_size_in_bits)
645 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
646 int len_in_dwords = 11;
647 unsigned char target_mb_size = intra_mb_size_in_bits / 16; //In Words
648 unsigned char max_mb_size = target_mb_size * 2 > 255? 255: target_mb_size * 2 ;
650 BEGIN_BCS_BATCH(batch, len_in_dwords);
652 OUT_BCS_BATCH(batch, MFC_AVC_PAK_OBJECT | (len_in_dwords - 2));
653 OUT_BCS_BATCH(batch, 0);
654 OUT_BCS_BATCH(batch, 0);
656 (0 << 24) | /* PackedMvNum, Debug*/
657 (0 << 20) | /* No motion vector */
658 (1 << 19) | /* CbpDcY */
659 (1 << 18) | /* CbpDcU */
660 (1 << 17) | /* CbpDcV */
663 OUT_BCS_BATCH(batch, (0xFFFF<<16) | (y << 8) | x); /* Code Block Pattern for Y*/
664 OUT_BCS_BATCH(batch, 0x000F000F); /* Code Block Pattern */
665 OUT_BCS_BATCH(batch, (0 << 27) | (end_mb << 26) | qp); /* Last MB */
667 /*Stuff for Intra MB*/
668 OUT_BCS_BATCH(batch, msg[1]); /* We using Intra16x16 no 4x4 predmode*/
669 OUT_BCS_BATCH(batch, msg[2]);
670 OUT_BCS_BATCH(batch, msg[3]&0xFC);
672 /*MaxSizeInWord and TargetSzieInWord*/
673 OUT_BCS_BATCH(batch, (max_mb_size << 24) |
674 (target_mb_size << 16) );
676 ADVANCE_BCS_BATCH(batch);
678 return len_in_dwords;
681 static int gen6_mfc_avc_pak_object_inter(VADriverContextP ctx, int x, int y, int end_mb, int qp, unsigned int offset,
682 struct gen6_encoder_context *gen6_encoder_context,
683 int inter_mb_size_in_bits)
685 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
686 int len_in_dwords = 11;
687 unsigned char target_mb_size = inter_mb_size_in_bits / 16; //In Words
688 unsigned char max_mb_size = target_mb_size * 16 > 255? 255: target_mb_size * 16 ;
690 BEGIN_BCS_BATCH(batch, len_in_dwords);
692 OUT_BCS_BATCH(batch, MFC_AVC_PAK_OBJECT | (len_in_dwords - 2));
694 OUT_BCS_BATCH(batch, 32); /* 32 MV*/
695 OUT_BCS_BATCH(batch, offset);
698 (1 << 24) | /* PackedMvNum, Debug*/
699 (4 << 20) | /* 8 MV, SNB don't use it*/
700 (1 << 19) | /* CbpDcY */
701 (1 << 18) | /* CbpDcU */
702 (1 << 17) | /* CbpDcV */
703 (0 << 15) | /* Transform8x8Flag = 0*/
704 (0 << 14) | /* Frame based*/
705 (0 << 13) | /* Inter MB */
706 (1 << 8) | /* MbType = P_L0_16x16 */
707 (0 << 7) | /* MBZ for frame */
709 (2 << 4) | /* MBZ for inter*/
711 (0 << 2) | /* SkipMbFlag */
712 (0 << 0)); /* InterMbMode */
714 OUT_BCS_BATCH(batch, (0xFFFF<<16) | (y << 8) | x); /* Code Block Pattern for Y*/
715 OUT_BCS_BATCH(batch, 0x000F000F); /* Code Block Pattern */
716 OUT_BCS_BATCH(batch, (0 << 27) | (end_mb << 26) | qp); /* Last MB */
718 /*Stuff for Inter MB*/
719 OUT_BCS_BATCH(batch, 0x0);
720 OUT_BCS_BATCH(batch, 0x0);
721 OUT_BCS_BATCH(batch, 0x0);
723 /*MaxSizeInWord and TargetSzieInWord*/
724 OUT_BCS_BATCH(batch, (max_mb_size << 24) |
725 (target_mb_size << 16) );
727 ADVANCE_BCS_BATCH(batch);
729 return len_in_dwords;
732 static void gen6_mfc_init(VADriverContextP ctx, struct gen6_encoder_context *gen6_encoder_context)
734 struct i965_driver_data *i965 = i965_driver_data(ctx);
735 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
739 /*Encode common setup for MFC*/
740 dri_bo_unreference(mfc_context->post_deblocking_output.bo);
741 mfc_context->post_deblocking_output.bo = NULL;
743 dri_bo_unreference(mfc_context->pre_deblocking_output.bo);
744 mfc_context->pre_deblocking_output.bo = NULL;
746 dri_bo_unreference(mfc_context->uncompressed_picture_source.bo);
747 mfc_context->uncompressed_picture_source.bo = NULL;
749 dri_bo_unreference(mfc_context->mfc_indirect_pak_bse_object.bo);
750 mfc_context->mfc_indirect_pak_bse_object.bo = NULL;
752 for (i = 0; i < NUM_MFC_DMV_BUFFERS; i++){
753 dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
754 mfc_context->direct_mv_buffers[i].bo = NULL;
757 for (i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++){
758 if (mfc_context->reference_surfaces[i].bo != NULL)
759 dri_bo_unreference(mfc_context->reference_surfaces[i].bo);
760 mfc_context->reference_surfaces[i].bo = NULL;
763 dri_bo_unreference(mfc_context->intra_row_store_scratch_buffer.bo);
764 bo = dri_bo_alloc(i965->intel.bufmgr,
769 mfc_context->intra_row_store_scratch_buffer.bo = bo;
771 dri_bo_unreference(mfc_context->macroblock_status_buffer.bo);
772 bo = dri_bo_alloc(i965->intel.bufmgr,
777 mfc_context->macroblock_status_buffer.bo = bo;
779 dri_bo_unreference(mfc_context->deblocking_filter_row_store_scratch_buffer.bo);
780 bo = dri_bo_alloc(i965->intel.bufmgr,
782 49152, /* 6 * 128 * 64 */
785 mfc_context->deblocking_filter_row_store_scratch_buffer.bo = bo;
787 dri_bo_unreference(mfc_context->bsd_mpc_row_store_scratch_buffer.bo);
788 bo = dri_bo_alloc(i965->intel.bufmgr,
790 12288, /* 1.5 * 128 * 64 */
793 mfc_context->bsd_mpc_row_store_scratch_buffer.bo = bo;
796 void gen6_mfc_avc_pipeline_programing(VADriverContextP ctx,
797 struct encode_state *encode_state,
798 struct gen6_encoder_context *gen6_encoder_context)
800 struct i965_driver_data *i965 = i965_driver_data(ctx);
801 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
802 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
803 struct gen6_vme_context *vme_context = &gen6_encoder_context->vme_context;
804 VAEncSequenceParameterBufferH264Ext *pSequenceParameter = (VAEncSequenceParameterBufferH264Ext *)encode_state->seq_param_ext->buffer;
805 VAEncPictureParameterBufferH264Ext *pPicParameter = (VAEncPictureParameterBufferH264Ext *)encode_state->pic_param_ext->buffer;
806 VAEncSliceParameterBufferH264Ext *pSliceParameter = (VAEncSliceParameterBufferH264Ext *)encode_state->slice_params_ext[0]->buffer; /* FIXME: multi slices */
808 unsigned int *msg = NULL, offset = 0;
809 int emit_new_state = 1, object_len_in_bytes;
810 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
811 int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
812 int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
814 int rate_control_mode = pSequenceParameter->rate_control_method;
815 float fps = pSequenceParameter->time_scale * 0.5 / pSequenceParameter->num_units_in_tick ;
817 int inter_mb_size = pSequenceParameter->bits_per_second * 1.0 / fps / width_in_mbs / height_in_mbs;
818 int intra_mb_size = inter_mb_size * 5.0;
819 int qp = pPicParameter->pic_init_qp;
821 if ( rate_control_mode != 2) {
823 if ( intra_mb_size > 384*8) //ONE MB raw data is 384 bytes
824 intra_mb_size = 384*8;
825 if ( inter_mb_size > 256*8)
826 intra_mb_size = 256*8;
829 intel_batchbuffer_start_atomic_bcs(batch, 0x1000);
832 dri_bo_map(vme_context->vme_output.bo , 1);
833 msg = (unsigned int *)vme_context->vme_output.bo->virtual;
836 for (y = 0; y < height_in_mbs; y++) {
837 for (x = 0; x < width_in_mbs; x++) {
838 int last_mb = (y == (height_in_mbs-1)) && ( x == (width_in_mbs-1) );
840 if (emit_new_state) {
841 intel_batchbuffer_emit_mi_flush(batch);
843 if (IS_GEN7(i965->intel.device_id)) {
844 gen7_mfc_pipe_mode_select(ctx, MFX_FORMAT_AVC, gen6_encoder_context);
845 gen7_mfc_surface_state(ctx, gen6_encoder_context);
846 gen7_mfc_ind_obj_base_addr_state(ctx, gen6_encoder_context);
848 gen6_mfc_pipe_mode_select(ctx, gen6_encoder_context);
849 gen6_mfc_surface_state(ctx, gen6_encoder_context);
850 gen6_mfc_ind_obj_base_addr_state(ctx, gen6_encoder_context);
853 gen6_mfc_pipe_buf_addr_state(ctx, gen6_encoder_context);
854 gen6_mfc_bsp_buf_base_addr_state(ctx, gen6_encoder_context);
856 if (IS_GEN7(i965->intel.device_id)) {
857 gen7_mfc_avc_img_state(ctx, gen6_encoder_context);
858 gen7_mfc_avc_qm_state(ctx, gen6_encoder_context);
859 gen7_mfc_avc_fqm_state(ctx, gen6_encoder_context);
861 gen6_mfc_avc_img_state(ctx, gen6_encoder_context);
862 gen6_mfc_avc_qm_state(ctx, gen6_encoder_context);
863 gen6_mfc_avc_fqm_state(ctx, gen6_encoder_context);
866 gen6_mfc_avc_ref_idx_state(ctx, gen6_encoder_context);
867 gen6_mfc_avc_slice_state(ctx, pSliceParameter->slice_type, gen6_encoder_context, rate_control_mode == 0, qp);
873 object_len_in_bytes = gen6_mfc_avc_pak_object_intra(ctx, x, y, last_mb, qp, msg, gen6_encoder_context, intra_mb_size);
876 object_len_in_bytes = gen6_mfc_avc_pak_object_inter(ctx, x, y, last_mb, qp, offset, gen6_encoder_context, inter_mb_size);
880 if (intel_batchbuffer_check_free_space(batch, object_len_in_bytes) == 0) {
881 intel_batchbuffer_end_atomic(batch);
882 intel_batchbuffer_flush(batch);
884 intel_batchbuffer_start_atomic_bcs(batch, 0x1000);
890 dri_bo_unmap(vme_context->vme_output.bo);
892 intel_batchbuffer_end_atomic(batch);
895 static VAStatus gen6_mfc_avc_prepare(VADriverContextP ctx,
896 struct encode_state *encode_state,
897 struct gen6_encoder_context *gen6_encoder_context)
899 struct i965_driver_data *i965 = i965_driver_data(ctx);
900 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
901 struct object_surface *obj_surface;
902 struct object_buffer *obj_buffer;
904 VAEncPictureParameterBufferH264Ext *pPicParameter = (VAEncPictureParameterBufferH264Ext *)encode_state->pic_param_ext->buffer;
905 VAStatus vaStatus = VA_STATUS_SUCCESS;
908 /*Setup all the input&output object*/
909 obj_surface = SURFACE(pPicParameter->CurrPic.picture_id);
911 i965_check_alloc_surface_bo(ctx, obj_surface, 1, VA_FOURCC('N','V','1','2'));
913 mfc_context->post_deblocking_output.bo = obj_surface->bo;
914 dri_bo_reference(mfc_context->post_deblocking_output.bo);
916 mfc_context->surface_state.width = obj_surface->orig_width;
917 mfc_context->surface_state.height = obj_surface->orig_height;
918 mfc_context->surface_state.w_pitch = obj_surface->width;
919 mfc_context->surface_state.h_pitch = obj_surface->height;
921 for(i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++) {
922 if ( pPicParameter->ReferenceFrames[i].picture_id != VA_INVALID_ID ) {
923 obj_surface = SURFACE(pPicParameter->ReferenceFrames[i].picture_id);
925 if (obj_surface->bo != NULL) {
926 mfc_context->reference_surfaces[i].bo = obj_surface->bo;
927 dri_bo_reference(obj_surface->bo);
934 obj_surface = SURFACE(encode_state->current_render_target);
935 assert(obj_surface && obj_surface->bo);
936 mfc_context->uncompressed_picture_source.bo = obj_surface->bo;
937 dri_bo_reference(mfc_context->uncompressed_picture_source.bo);
939 obj_buffer = BUFFER (pPicParameter->CodedBuf); /* FIXME: fix this later */
940 bo = obj_buffer->buffer_store->bo;
942 mfc_context->mfc_indirect_pak_bse_object.bo = bo;
943 mfc_context->mfc_indirect_pak_bse_object.offset = ALIGN(sizeof(VACodedBufferSegment), 64);
944 dri_bo_reference(mfc_context->mfc_indirect_pak_bse_object.bo);
946 /*Programing bcs pipeline*/
947 gen6_mfc_avc_pipeline_programing(ctx, encode_state, gen6_encoder_context); //filling the pipeline
952 static VAStatus gen6_mfc_run(VADriverContextP ctx,
953 struct encode_state *encode_state,
954 struct gen6_encoder_context *gen6_encoder_context)
956 struct intel_batchbuffer *batch = gen6_encoder_context->base.batch;
958 intel_batchbuffer_flush(batch); //run the pipeline
960 return VA_STATUS_SUCCESS;
963 static VAStatus gen6_mfc_stop(VADriverContextP ctx,
964 struct encode_state *encode_state,
965 struct gen6_encoder_context *gen6_encoder_context)
968 struct i965_driver_data *i965 = i965_driver_data(ctx);
969 struct gen6_mfc_context *mfc_context = &gen6_encoder_context->mfc_context;
971 VAEncPictureParameterBufferH264Ext *pPicParameter = (VAEncPictureParameterBufferH264Ext *)encode_state->pic_param_ext->buffer;
973 struct object_surface *obj_surface = SURFACE(pPicParameter->reconstructed_picture);
974 //struct object_surface *obj_surface = SURFACE(pPicParameter->reference_picture[0]);
975 //struct object_surface *obj_surface = SURFACE(encode_state->current_render_target);
976 my_debug(obj_surface);
980 return VA_STATUS_SUCCESS;
984 gen6_mfc_avc_encode_picture(VADriverContextP ctx,
985 struct encode_state *encode_state,
986 struct gen6_encoder_context *gen6_encoder_context)
988 gen6_mfc_init(ctx, gen6_encoder_context);
989 gen6_mfc_avc_prepare(ctx, encode_state, gen6_encoder_context);
990 gen6_mfc_run(ctx, encode_state, gen6_encoder_context);
991 gen6_mfc_stop(ctx, encode_state, gen6_encoder_context);
993 return VA_STATUS_SUCCESS;
997 gen6_mfc_pipeline(VADriverContextP ctx,
999 struct encode_state *encode_state,
1000 struct gen6_encoder_context *gen6_encoder_context)
1005 case VAProfileH264Baseline:
1006 vaStatus = gen6_mfc_avc_encode_picture(ctx, encode_state, gen6_encoder_context);
1009 /* FIXME: add for other profile */
1011 vaStatus = VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
1018 Bool gen6_mfc_context_init(VADriverContextP ctx, struct gen6_mfc_context *mfc_context)
1023 Bool gen6_mfc_context_destroy(struct gen6_mfc_context *mfc_context)
1027 dri_bo_unreference(mfc_context->post_deblocking_output.bo);
1028 mfc_context->post_deblocking_output.bo = NULL;
1030 dri_bo_unreference(mfc_context->pre_deblocking_output.bo);
1031 mfc_context->pre_deblocking_output.bo = NULL;
1033 dri_bo_unreference(mfc_context->uncompressed_picture_source.bo);
1034 mfc_context->uncompressed_picture_source.bo = NULL;
1036 dri_bo_unreference(mfc_context->mfc_indirect_pak_bse_object.bo);
1037 mfc_context->mfc_indirect_pak_bse_object.bo = NULL;
1039 for (i = 0; i < NUM_MFC_DMV_BUFFERS; i++){
1040 dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
1041 mfc_context->direct_mv_buffers[i].bo = NULL;
1044 dri_bo_unreference(mfc_context->intra_row_store_scratch_buffer.bo);
1045 mfc_context->intra_row_store_scratch_buffer.bo = NULL;
1047 dri_bo_unreference(mfc_context->macroblock_status_buffer.bo);
1048 mfc_context->macroblock_status_buffer.bo = NULL;
1050 dri_bo_unreference(mfc_context->deblocking_filter_row_store_scratch_buffer.bo);
1051 mfc_context->deblocking_filter_row_store_scratch_buffer.bo = NULL;
1053 dri_bo_unreference(mfc_context->bsd_mpc_row_store_scratch_buffer.bo);
1054 mfc_context->bsd_mpc_row_store_scratch_buffer.bo = NULL;