2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "./vpx_config.h"
12 #include "vp9/common/vp9_loopfilter.h"
13 #include "vp9/common/vp9_onyxc_int.h"
14 #include "vp9/common/vp9_reconinter.h"
15 #include "vpx_mem/vpx_mem.h"
17 #include "vp9/common/vp9_seg_common.h"
19 // 64 bit masks for left transform size. Each 1 represents a position where
20 // we should apply a loop filter across the left border of an 8x8 block
23 // In the case of TX_16X16-> ( in low order byte first we end up with
24 // a mask that looks like this
35 // A loopfilter should be applied to every other 8x8 horizontally.
36 static const uint64_t left_64x64_txform_mask[TX_SIZES]= {
37 0xffffffffffffffff, // TX_4X4
38 0xffffffffffffffff, // TX_8x8
39 0x5555555555555555, // TX_16x16
40 0x1111111111111111, // TX_32x32
43 // 64 bit masks for above transform size. Each 1 represents a position where
44 // we should apply a loop filter across the top border of an 8x8 block
47 // In the case of TX_32x32 -> ( in low order byte first we end up with
48 // a mask that looks like this
59 // A loopfilter should be applied to every other 4 the row vertically.
60 static const uint64_t above_64x64_txform_mask[TX_SIZES]= {
61 0xffffffffffffffff, // TX_4X4
62 0xffffffffffffffff, // TX_8x8
63 0x00ff00ff00ff00ff, // TX_16x16
64 0x000000ff000000ff, // TX_32x32
67 // 64 bit masks for prediction sizes (left). Each 1 represents a position
68 // where left border of an 8x8 block. These are aligned to the right most
69 // appropriate bit, and then shifted into place.
71 // In the case of TX_16x32 -> ( low order byte first ) we end up with
72 // a mask that looks like this :
82 static const uint64_t left_prediction_mask[BLOCK_SIZES] = {
83 0x0000000000000001, // BLOCK_4X4,
84 0x0000000000000001, // BLOCK_4X8,
85 0x0000000000000001, // BLOCK_8X4,
86 0x0000000000000001, // BLOCK_8X8,
87 0x0000000000000101, // BLOCK_8X16,
88 0x0000000000000001, // BLOCK_16X8,
89 0x0000000000000101, // BLOCK_16X16,
90 0x0000000001010101, // BLOCK_16X32,
91 0x0000000000000101, // BLOCK_32X16,
92 0x0000000001010101, // BLOCK_32X32,
93 0x0101010101010101, // BLOCK_32X64,
94 0x0000000001010101, // BLOCK_64X32,
95 0x0101010101010101, // BLOCK_64X64
98 // 64 bit mask to shift and set for each prediction size.
99 static const uint64_t above_prediction_mask[BLOCK_SIZES] = {
100 0x0000000000000001, // BLOCK_4X4
101 0x0000000000000001, // BLOCK_4X8
102 0x0000000000000001, // BLOCK_8X4
103 0x0000000000000001, // BLOCK_8X8
104 0x0000000000000001, // BLOCK_8X16,
105 0x0000000000000003, // BLOCK_16X8
106 0x0000000000000003, // BLOCK_16X16
107 0x0000000000000003, // BLOCK_16X32,
108 0x000000000000000f, // BLOCK_32X16,
109 0x000000000000000f, // BLOCK_32X32,
110 0x000000000000000f, // BLOCK_32X64,
111 0x00000000000000ff, // BLOCK_64X32,
112 0x00000000000000ff, // BLOCK_64X64
114 // 64 bit mask to shift and set for each prediction size. A bit is set for
115 // each 8x8 block that would be in the left most block of the given block
116 // size in the 64x64 block.
117 static const uint64_t size_mask[BLOCK_SIZES] = {
118 0x0000000000000001, // BLOCK_4X4
119 0x0000000000000001, // BLOCK_4X8
120 0x0000000000000001, // BLOCK_8X4
121 0x0000000000000001, // BLOCK_8X8
122 0x0000000000000101, // BLOCK_8X16,
123 0x0000000000000003, // BLOCK_16X8
124 0x0000000000000303, // BLOCK_16X16
125 0x0000000003030303, // BLOCK_16X32,
126 0x0000000000000f0f, // BLOCK_32X16,
127 0x000000000f0f0f0f, // BLOCK_32X32,
128 0x0f0f0f0f0f0f0f0f, // BLOCK_32X64,
129 0x00000000ffffffff, // BLOCK_64X32,
130 0xffffffffffffffff, // BLOCK_64X64
133 // These are used for masking the left and above borders.
134 static const uint64_t left_border = 0x1111111111111111;
135 static const uint64_t above_border = 0x000000ff000000ff;
137 // 16 bit masks for uv transform sizes.
138 static const uint16_t left_64x64_txform_mask_uv[TX_SIZES]= {
145 static const uint16_t above_64x64_txform_mask_uv[TX_SIZES]= {
152 // 16 bit left mask to shift and set for each uv prediction size.
153 static const uint16_t left_prediction_mask_uv[BLOCK_SIZES] = {
154 0x0001, // BLOCK_4X4,
155 0x0001, // BLOCK_4X8,
156 0x0001, // BLOCK_8X4,
157 0x0001, // BLOCK_8X8,
158 0x0001, // BLOCK_8X16,
159 0x0001, // BLOCK_16X8,
160 0x0001, // BLOCK_16X16,
161 0x0011, // BLOCK_16X32,
162 0x0001, // BLOCK_32X16,
163 0x0011, // BLOCK_32X32,
164 0x1111, // BLOCK_32X64
165 0x0011, // BLOCK_64X32,
166 0x1111, // BLOCK_64X64
168 // 16 bit above mask to shift and set for uv each prediction size.
169 static const uint16_t above_prediction_mask_uv[BLOCK_SIZES] = {
174 0x0001, // BLOCK_8X16,
175 0x0001, // BLOCK_16X8
176 0x0001, // BLOCK_16X16
177 0x0001, // BLOCK_16X32,
178 0x0003, // BLOCK_32X16,
179 0x0003, // BLOCK_32X32,
180 0x0003, // BLOCK_32X64,
181 0x000f, // BLOCK_64X32,
182 0x000f, // BLOCK_64X64
185 // 64 bit mask to shift and set for each uv prediction size
186 static const uint16_t size_mask_uv[BLOCK_SIZES] = {
191 0x0001, // BLOCK_8X16,
192 0x0001, // BLOCK_16X8
193 0x0001, // BLOCK_16X16
194 0x0011, // BLOCK_16X32,
195 0x0003, // BLOCK_32X16,
196 0x0033, // BLOCK_32X32,
197 0x3333, // BLOCK_32X64,
198 0x00ff, // BLOCK_64X32,
199 0xffff, // BLOCK_64X64
201 static const uint16_t left_border_uv = 0x1111;
202 static const uint16_t above_border_uv = 0x000f;
204 static const int mode_lf_lut[MB_MODE_COUNT] = {
205 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // INTRA_MODES
206 1, 1, 0, 1 // INTER_MODES (ZEROMV == 0)
209 static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) {
212 // For each possible value for the loop filter fill out limits
213 for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++) {
214 // Set loop filter parameters that control sharpness.
215 int block_inside_limit = lvl >> ((sharpness_lvl > 0) + (sharpness_lvl > 4));
217 if (sharpness_lvl > 0) {
218 if (block_inside_limit > (9 - sharpness_lvl))
219 block_inside_limit = (9 - sharpness_lvl);
222 if (block_inside_limit < 1)
223 block_inside_limit = 1;
225 vpx_memset(lfi->lfthr[lvl].lim, block_inside_limit, SIMD_WIDTH);
226 vpx_memset(lfi->lfthr[lvl].mblim, (2 * (lvl + 2) + block_inside_limit),
231 static uint8_t get_filter_level(const loop_filter_info_n *lfi_n,
232 const MB_MODE_INFO *mbmi) {
233 return lfi_n->lvl[mbmi->segment_id][mbmi->ref_frame[0]]
234 [mode_lf_lut[mbmi->mode]];
237 void vp9_loop_filter_init(VP9_COMMON *cm) {
238 loop_filter_info_n *lfi = &cm->lf_info;
239 struct loopfilter *lf = &cm->lf;
242 // init limits for given sharpness
243 update_sharpness(lfi, lf->sharpness_level);
244 lf->last_sharpness_level = lf->sharpness_level;
246 // init hev threshold const vectors
247 for (lvl = 0; lvl <= MAX_LOOP_FILTER; lvl++)
248 vpx_memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH);
251 void vp9_loop_filter_frame_init(VP9_COMMON *cm, int default_filt_lvl) {
253 // n_shift is the multiplier for lf_deltas
254 // the multiplier is 1 for when filter_lvl is between 0 and 31;
255 // 2 when filter_lvl is between 32 and 63
256 const int scale = 1 << (default_filt_lvl >> 5);
257 loop_filter_info_n *const lfi = &cm->lf_info;
258 struct loopfilter *const lf = &cm->lf;
259 const struct segmentation *const seg = &cm->seg;
261 // update limits if sharpness has changed
262 if (lf->last_sharpness_level != lf->sharpness_level) {
263 update_sharpness(lfi, lf->sharpness_level);
264 lf->last_sharpness_level = lf->sharpness_level;
267 for (seg_id = 0; seg_id < MAX_SEGMENTS; seg_id++) {
268 int lvl_seg = default_filt_lvl;
269 if (vp9_segfeature_active(seg, seg_id, SEG_LVL_ALT_LF)) {
270 const int data = vp9_get_segdata(seg, seg_id, SEG_LVL_ALT_LF);
271 lvl_seg = clamp(seg->abs_delta == SEGMENT_ABSDATA ?
272 data : default_filt_lvl + data,
276 if (!lf->mode_ref_delta_enabled) {
277 // we could get rid of this if we assume that deltas are set to
278 // zero when not in use; encoder always uses deltas
279 vpx_memset(lfi->lvl[seg_id], lvl_seg, sizeof(lfi->lvl[seg_id]));
282 const int intra_lvl = lvl_seg + lf->ref_deltas[INTRA_FRAME] * scale;
283 lfi->lvl[seg_id][INTRA_FRAME][0] = clamp(intra_lvl, 0, MAX_LOOP_FILTER);
285 for (ref = LAST_FRAME; ref < MAX_REF_FRAMES; ++ref) {
286 for (mode = 0; mode < MAX_MODE_LF_DELTAS; ++mode) {
287 const int inter_lvl = lvl_seg + lf->ref_deltas[ref] * scale
288 + lf->mode_deltas[mode] * scale;
289 lfi->lvl[seg_id][ref][mode] = clamp(inter_lvl, 0, MAX_LOOP_FILTER);
296 static void filter_selectively_vert_row2(PLANE_TYPE plane_type,
297 uint8_t *s, int pitch,
298 unsigned int mask_16x16_l,
299 unsigned int mask_8x8_l,
300 unsigned int mask_4x4_l,
301 unsigned int mask_4x4_int_l,
302 const loop_filter_info_n *lfi_n,
303 const uint8_t *lfl) {
304 const int mask_shift = plane_type ? 4 : 8;
305 const int mask_cutoff = plane_type ? 0xf : 0xff;
306 const int lfl_forward = plane_type ? 4 : 8;
308 unsigned int mask_16x16_0 = mask_16x16_l & mask_cutoff;
309 unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff;
310 unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff;
311 unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff;
312 unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff;
313 unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff;
314 unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff;
315 unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff;
318 for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 |
319 mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1;
321 const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl;
322 const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward);
324 // TODO(yunqingwang): count in loopfilter functions should be removed.
326 if ((mask_16x16_0 | mask_16x16_1) & 1) {
327 if ((mask_16x16_0 & mask_16x16_1) & 1) {
328 vp9_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
330 } else if (mask_16x16_0 & 1) {
331 vp9_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
334 vp9_lpf_vertical_16(s + 8 *pitch, pitch, lfi1->mblim,
335 lfi1->lim, lfi1->hev_thr);
339 if ((mask_8x8_0 | mask_8x8_1) & 1) {
340 if ((mask_8x8_0 & mask_8x8_1) & 1) {
341 vp9_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
342 lfi0->hev_thr, lfi1->mblim, lfi1->lim,
344 } else if (mask_8x8_0 & 1) {
345 vp9_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr,
348 vp9_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
353 if ((mask_4x4_0 | mask_4x4_1) & 1) {
354 if ((mask_4x4_0 & mask_4x4_1) & 1) {
355 vp9_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
356 lfi0->hev_thr, lfi1->mblim, lfi1->lim,
358 } else if (mask_4x4_0 & 1) {
359 vp9_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim, lfi0->hev_thr,
362 vp9_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim, lfi1->lim,
367 if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
368 if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
369 vp9_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
370 lfi0->hev_thr, lfi1->mblim, lfi1->lim,
372 } else if (mask_4x4_int_0 & 1) {
373 vp9_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
376 vp9_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim, lfi1->lim,
387 mask_4x4_int_0 >>= 1;
391 mask_4x4_int_1 >>= 1;
395 #if CONFIG_VP9_HIGHBITDEPTH
396 static void highbd_filter_selectively_vert_row2(PLANE_TYPE plane_type,
397 uint16_t *s, int pitch,
398 unsigned int mask_16x16_l,
399 unsigned int mask_8x8_l,
400 unsigned int mask_4x4_l,
401 unsigned int mask_4x4_int_l,
402 const loop_filter_info_n *lfi_n,
403 const uint8_t *lfl, int bd) {
404 const int mask_shift = plane_type ? 4 : 8;
405 const int mask_cutoff = plane_type ? 0xf : 0xff;
406 const int lfl_forward = plane_type ? 4 : 8;
408 unsigned int mask_16x16_0 = mask_16x16_l & mask_cutoff;
409 unsigned int mask_8x8_0 = mask_8x8_l & mask_cutoff;
410 unsigned int mask_4x4_0 = mask_4x4_l & mask_cutoff;
411 unsigned int mask_4x4_int_0 = mask_4x4_int_l & mask_cutoff;
412 unsigned int mask_16x16_1 = (mask_16x16_l >> mask_shift) & mask_cutoff;
413 unsigned int mask_8x8_1 = (mask_8x8_l >> mask_shift) & mask_cutoff;
414 unsigned int mask_4x4_1 = (mask_4x4_l >> mask_shift) & mask_cutoff;
415 unsigned int mask_4x4_int_1 = (mask_4x4_int_l >> mask_shift) & mask_cutoff;
418 for (mask = mask_16x16_0 | mask_8x8_0 | mask_4x4_0 | mask_4x4_int_0 |
419 mask_16x16_1 | mask_8x8_1 | mask_4x4_1 | mask_4x4_int_1;
421 const loop_filter_thresh *lfi0 = lfi_n->lfthr + *lfl;
422 const loop_filter_thresh *lfi1 = lfi_n->lfthr + *(lfl + lfl_forward);
424 // TODO(yunqingwang): count in loopfilter functions should be removed.
426 if ((mask_16x16_0 | mask_16x16_1) & 1) {
427 if ((mask_16x16_0 & mask_16x16_1) & 1) {
428 vp9_highbd_lpf_vertical_16_dual(s, pitch, lfi0->mblim, lfi0->lim,
430 } else if (mask_16x16_0 & 1) {
431 vp9_highbd_lpf_vertical_16(s, pitch, lfi0->mblim, lfi0->lim,
434 vp9_highbd_lpf_vertical_16(s + 8 *pitch, pitch, lfi1->mblim,
435 lfi1->lim, lfi1->hev_thr, bd);
439 if ((mask_8x8_0 | mask_8x8_1) & 1) {
440 if ((mask_8x8_0 & mask_8x8_1) & 1) {
441 vp9_highbd_lpf_vertical_8_dual(s, pitch, lfi0->mblim, lfi0->lim,
442 lfi0->hev_thr, lfi1->mblim, lfi1->lim,
444 } else if (mask_8x8_0 & 1) {
445 vp9_highbd_lpf_vertical_8(s, pitch, lfi0->mblim, lfi0->lim,
446 lfi0->hev_thr, 1, bd);
448 vp9_highbd_lpf_vertical_8(s + 8 * pitch, pitch, lfi1->mblim,
449 lfi1->lim, lfi1->hev_thr, 1, bd);
453 if ((mask_4x4_0 | mask_4x4_1) & 1) {
454 if ((mask_4x4_0 & mask_4x4_1) & 1) {
455 vp9_highbd_lpf_vertical_4_dual(s, pitch, lfi0->mblim, lfi0->lim,
456 lfi0->hev_thr, lfi1->mblim, lfi1->lim,
458 } else if (mask_4x4_0 & 1) {
459 vp9_highbd_lpf_vertical_4(s, pitch, lfi0->mblim, lfi0->lim,
460 lfi0->hev_thr, 1, bd);
462 vp9_highbd_lpf_vertical_4(s + 8 * pitch, pitch, lfi1->mblim,
463 lfi1->lim, lfi1->hev_thr, 1, bd);
467 if ((mask_4x4_int_0 | mask_4x4_int_1) & 1) {
468 if ((mask_4x4_int_0 & mask_4x4_int_1) & 1) {
469 vp9_highbd_lpf_vertical_4_dual(s + 4, pitch, lfi0->mblim, lfi0->lim,
470 lfi0->hev_thr, lfi1->mblim, lfi1->lim,
472 } else if (mask_4x4_int_0 & 1) {
473 vp9_highbd_lpf_vertical_4(s + 4, pitch, lfi0->mblim, lfi0->lim,
474 lfi0->hev_thr, 1, bd);
476 vp9_highbd_lpf_vertical_4(s + 8 * pitch + 4, pitch, lfi1->mblim,
477 lfi1->lim, lfi1->hev_thr, 1, bd);
487 mask_4x4_int_0 >>= 1;
491 mask_4x4_int_1 >>= 1;
494 #endif // CONFIG_VP9_HIGHBITDEPTH
496 static void filter_selectively_horiz(uint8_t *s, int pitch,
497 unsigned int mask_16x16,
498 unsigned int mask_8x8,
499 unsigned int mask_4x4,
500 unsigned int mask_4x4_int,
501 const loop_filter_info_n *lfi_n,
502 const uint8_t *lfl) {
506 for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
507 mask; mask >>= count) {
508 const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;
512 if (mask_16x16 & 1) {
513 if ((mask_16x16 & 3) == 3) {
514 vp9_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
518 vp9_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
521 } else if (mask_8x8 & 1) {
522 if ((mask_8x8 & 3) == 3) {
523 // Next block's thresholds.
524 const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
526 vp9_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
527 lfi->hev_thr, lfin->mblim, lfin->lim,
530 if ((mask_4x4_int & 3) == 3) {
531 vp9_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
532 lfi->lim, lfi->hev_thr, lfin->mblim,
533 lfin->lim, lfin->hev_thr);
535 if (mask_4x4_int & 1)
536 vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
538 else if (mask_4x4_int & 2)
539 vp9_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
540 lfin->lim, lfin->hev_thr, 1);
544 vp9_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
546 if (mask_4x4_int & 1)
547 vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
550 } else if (mask_4x4 & 1) {
551 if ((mask_4x4 & 3) == 3) {
552 // Next block's thresholds.
553 const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
555 vp9_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
556 lfi->hev_thr, lfin->mblim, lfin->lim,
558 if ((mask_4x4_int & 3) == 3) {
559 vp9_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
560 lfi->lim, lfi->hev_thr, lfin->mblim,
561 lfin->lim, lfin->hev_thr);
563 if (mask_4x4_int & 1)
564 vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
566 else if (mask_4x4_int & 2)
567 vp9_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
568 lfin->lim, lfin->hev_thr, 1);
572 vp9_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
574 if (mask_4x4_int & 1)
575 vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
578 } else if (mask_4x4_int & 1) {
579 vp9_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
585 mask_16x16 >>= count;
588 mask_4x4_int >>= count;
592 #if CONFIG_VP9_HIGHBITDEPTH
593 static void highbd_filter_selectively_horiz(uint16_t *s, int pitch,
594 unsigned int mask_16x16,
595 unsigned int mask_8x8,
596 unsigned int mask_4x4,
597 unsigned int mask_4x4_int,
598 const loop_filter_info_n *lfi_n,
599 const uint8_t *lfl, int bd) {
603 for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
604 mask; mask >>= count) {
605 const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;
609 if (mask_16x16 & 1) {
610 if ((mask_16x16 & 3) == 3) {
611 vp9_highbd_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
612 lfi->hev_thr, 2, bd);
615 vp9_highbd_lpf_horizontal_16(s, pitch, lfi->mblim, lfi->lim,
616 lfi->hev_thr, 1, bd);
618 } else if (mask_8x8 & 1) {
619 if ((mask_8x8 & 3) == 3) {
620 // Next block's thresholds.
621 const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
623 vp9_highbd_lpf_horizontal_8_dual(s, pitch, lfi->mblim, lfi->lim,
624 lfi->hev_thr, lfin->mblim, lfin->lim,
627 if ((mask_4x4_int & 3) == 3) {
628 vp9_highbd_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
629 lfi->lim, lfi->hev_thr,
630 lfin->mblim, lfin->lim,
633 if (mask_4x4_int & 1) {
634 vp9_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
635 lfi->lim, lfi->hev_thr, 1, bd);
636 } else if (mask_4x4_int & 2) {
637 vp9_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
638 lfin->lim, lfin->hev_thr, 1, bd);
643 vp9_highbd_lpf_horizontal_8(s, pitch, lfi->mblim, lfi->lim,
644 lfi->hev_thr, 1, bd);
646 if (mask_4x4_int & 1) {
647 vp9_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
648 lfi->lim, lfi->hev_thr, 1, bd);
651 } else if (mask_4x4 & 1) {
652 if ((mask_4x4 & 3) == 3) {
653 // Next block's thresholds.
654 const loop_filter_thresh *lfin = lfi_n->lfthr + *(lfl + 1);
656 vp9_highbd_lpf_horizontal_4_dual(s, pitch, lfi->mblim, lfi->lim,
657 lfi->hev_thr, lfin->mblim, lfin->lim,
659 if ((mask_4x4_int & 3) == 3) {
660 vp9_highbd_lpf_horizontal_4_dual(s + 4 * pitch, pitch, lfi->mblim,
661 lfi->lim, lfi->hev_thr,
662 lfin->mblim, lfin->lim,
665 if (mask_4x4_int & 1) {
666 vp9_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
667 lfi->lim, lfi->hev_thr, 1, bd);
668 } else if (mask_4x4_int & 2) {
669 vp9_highbd_lpf_horizontal_4(s + 8 + 4 * pitch, pitch, lfin->mblim,
670 lfin->lim, lfin->hev_thr, 1, bd);
675 vp9_highbd_lpf_horizontal_4(s, pitch, lfi->mblim, lfi->lim,
676 lfi->hev_thr, 1, bd);
678 if (mask_4x4_int & 1) {
679 vp9_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim,
680 lfi->lim, lfi->hev_thr, 1, bd);
683 } else if (mask_4x4_int & 1) {
684 vp9_highbd_lpf_horizontal_4(s + 4 * pitch, pitch, lfi->mblim, lfi->lim,
685 lfi->hev_thr, 1, bd);
690 mask_16x16 >>= count;
693 mask_4x4_int >>= count;
696 #endif // CONFIG_VP9_HIGHBITDEPTH
698 // This function ors into the current lfm structure, where to do loop
699 // filters for the specific mi we are looking at. It uses information
700 // including the block_size_type (32x16, 32x32, etc.), the transform size,
701 // whether there were any coefficients encoded, and the loop filter strength
702 // block we are currently looking at. Shift is used to position the
704 // TODO(JBB) Need another function for different resolution color..
705 static void build_masks(const loop_filter_info_n *const lfi_n,
706 const MODE_INFO *mi, const int shift_y,
708 LOOP_FILTER_MASK *lfm) {
709 const MB_MODE_INFO *mbmi = &mi->mbmi;
710 const BLOCK_SIZE block_size = mbmi->sb_type;
711 const TX_SIZE tx_size_y = mbmi->tx_size;
712 const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1);
713 const int filter_level = get_filter_level(lfi_n, mbmi);
714 uint64_t *const left_y = &lfm->left_y[tx_size_y];
715 uint64_t *const above_y = &lfm->above_y[tx_size_y];
716 uint64_t *const int_4x4_y = &lfm->int_4x4_y;
717 uint16_t *const left_uv = &lfm->left_uv[tx_size_uv];
718 uint16_t *const above_uv = &lfm->above_uv[tx_size_uv];
719 uint16_t *const int_4x4_uv = &lfm->int_4x4_uv;
722 // If filter level is 0 we don't loop filter.
726 const int w = num_8x8_blocks_wide_lookup[block_size];
727 const int h = num_8x8_blocks_high_lookup[block_size];
729 for (i = 0; i < h; i++) {
730 vpx_memset(&lfm->lfl_y[index], filter_level, w);
735 // These set 1 in the current block size for the block size edges.
736 // For instance if the block size is 32x16, we'll set:
742 // NOTE : In this example the low bit is left most ( 1000 ) is stored as
745 // U and V set things on a 16 bit scale.
747 *above_y |= above_prediction_mask[block_size] << shift_y;
748 *above_uv |= above_prediction_mask_uv[block_size] << shift_uv;
749 *left_y |= left_prediction_mask[block_size] << shift_y;
750 *left_uv |= left_prediction_mask_uv[block_size] << shift_uv;
752 // If the block has no coefficients and is not intra we skip applying
753 // the loop filter on block edges.
754 if (mbmi->skip && is_inter_block(mbmi))
757 // Here we are adding a mask for the transform size. The transform
758 // size mask is set to be correct for a 64x64 prediction block size. We
759 // mask to match the size of the block we are working on and then shift it
761 *above_y |= (size_mask[block_size] &
762 above_64x64_txform_mask[tx_size_y]) << shift_y;
763 *above_uv |= (size_mask_uv[block_size] &
764 above_64x64_txform_mask_uv[tx_size_uv]) << shift_uv;
766 *left_y |= (size_mask[block_size] &
767 left_64x64_txform_mask[tx_size_y]) << shift_y;
768 *left_uv |= (size_mask_uv[block_size] &
769 left_64x64_txform_mask_uv[tx_size_uv]) << shift_uv;
771 // Here we are trying to determine what to do with the internal 4x4 block
772 // boundaries. These differ from the 4x4 boundaries on the outside edge of
773 // an 8x8 in that the internal ones can be skipped and don't depend on
774 // the prediction block size.
775 if (tx_size_y == TX_4X4)
776 *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffff) << shift_y;
778 if (tx_size_uv == TX_4X4)
779 *int_4x4_uv |= (size_mask_uv[block_size] & 0xffff) << shift_uv;
782 // This function does the same thing as the one above with the exception that
783 // it only affects the y masks. It exists because for blocks < 16x16 in size,
784 // we only update u and v masks on the first block.
785 static void build_y_mask(const loop_filter_info_n *const lfi_n,
786 const MODE_INFO *mi, const int shift_y,
787 LOOP_FILTER_MASK *lfm) {
788 const MB_MODE_INFO *mbmi = &mi->mbmi;
789 const BLOCK_SIZE block_size = mbmi->sb_type;
790 const TX_SIZE tx_size_y = mbmi->tx_size;
791 const int filter_level = get_filter_level(lfi_n, mbmi);
792 uint64_t *const left_y = &lfm->left_y[tx_size_y];
793 uint64_t *const above_y = &lfm->above_y[tx_size_y];
794 uint64_t *const int_4x4_y = &lfm->int_4x4_y;
800 const int w = num_8x8_blocks_wide_lookup[block_size];
801 const int h = num_8x8_blocks_high_lookup[block_size];
803 for (i = 0; i < h; i++) {
804 vpx_memset(&lfm->lfl_y[index], filter_level, w);
809 *above_y |= above_prediction_mask[block_size] << shift_y;
810 *left_y |= left_prediction_mask[block_size] << shift_y;
812 if (mbmi->skip && is_inter_block(mbmi))
815 *above_y |= (size_mask[block_size] &
816 above_64x64_txform_mask[tx_size_y]) << shift_y;
818 *left_y |= (size_mask[block_size] &
819 left_64x64_txform_mask[tx_size_y]) << shift_y;
821 if (tx_size_y == TX_4X4)
822 *int_4x4_y |= (size_mask[block_size] & 0xffffffffffffffff) << shift_y;
825 // This function sets up the bit masks for the entire 64x64 region represented
826 // by mi_row, mi_col.
827 // TODO(JBB): This function only works for yv12.
828 void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
829 MODE_INFO *mi, const int mode_info_stride,
830 LOOP_FILTER_MASK *lfm) {
831 int idx_32, idx_16, idx_8;
832 const loop_filter_info_n *const lfi_n = &cm->lf_info;
834 MODE_INFO *mip2 = mi;
836 // These are offsets to the next mi in the 64x64 block. It is what gets
837 // added to the mi ptr as we go through each loop. It helps us to avoid
838 // setting up special row and column counters for each index. The last step
839 // brings us out back to the starting position.
840 const int offset_32[] = {4, (mode_info_stride << 2) - 4, 4,
841 -(mode_info_stride << 2) - 4};
842 const int offset_16[] = {2, (mode_info_stride << 1) - 2, 2,
843 -(mode_info_stride << 1) - 2};
844 const int offset[] = {1, mode_info_stride - 1, 1, -mode_info_stride - 1};
846 // Following variables represent shifts to position the current block
847 // mask over the appropriate block. A shift of 36 to the left will move
848 // the bits for the final 32 by 32 block in the 64x64 up 4 rows and left
849 // 4 rows to the appropriate spot.
850 const int shift_32_y[] = {0, 4, 32, 36};
851 const int shift_16_y[] = {0, 2, 16, 18};
852 const int shift_8_y[] = {0, 1, 8, 9};
853 const int shift_32_uv[] = {0, 2, 8, 10};
854 const int shift_16_uv[] = {0, 1, 4, 5};
856 const int max_rows = (mi_row + MI_BLOCK_SIZE > cm->mi_rows ?
857 cm->mi_rows - mi_row : MI_BLOCK_SIZE);
858 const int max_cols = (mi_col + MI_BLOCK_SIZE > cm->mi_cols ?
859 cm->mi_cols - mi_col : MI_BLOCK_SIZE);
864 // TODO(jimbankoski): Try moving most of the following code into decode
865 // loop and storing lfm in the mbmi structure so that we don't have to go
866 // through the recursive loop structure multiple times.
867 switch (mip->mbmi.sb_type) {
869 build_masks(lfi_n, mip , 0, 0, lfm);
872 build_masks(lfi_n, mip, 0, 0, lfm);
873 mip2 = mip + mode_info_stride * 4;
876 build_masks(lfi_n, mip2, 32, 8, lfm);
879 build_masks(lfi_n, mip, 0, 0, lfm);
883 build_masks(lfi_n, mip2, 4, 2, lfm);
886 for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) {
887 const int shift_y = shift_32_y[idx_32];
888 const int shift_uv = shift_32_uv[idx_32];
889 const int mi_32_col_offset = ((idx_32 & 1) << 2);
890 const int mi_32_row_offset = ((idx_32 >> 1) << 2);
891 if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows)
893 switch (mip->mbmi.sb_type) {
895 build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
898 build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
899 if (mi_32_row_offset + 2 >= max_rows)
901 mip2 = mip + mode_info_stride * 2;
902 build_masks(lfi_n, mip2, shift_y + 16, shift_uv + 4, lfm);
905 build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
906 if (mi_32_col_offset + 2 >= max_cols)
909 build_masks(lfi_n, mip2, shift_y + 2, shift_uv + 1, lfm);
912 for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) {
913 const int shift_y = shift_32_y[idx_32] + shift_16_y[idx_16];
914 const int shift_uv = shift_32_uv[idx_32] + shift_16_uv[idx_16];
915 const int mi_16_col_offset = mi_32_col_offset +
917 const int mi_16_row_offset = mi_32_row_offset +
918 ((idx_16 >> 1) << 1);
920 if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows)
923 switch (mip->mbmi.sb_type) {
925 build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
928 build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
929 if (mi_16_row_offset + 1 >= max_rows)
931 mip2 = mip + mode_info_stride;
932 build_y_mask(lfi_n, mip2, shift_y+8, lfm);
935 build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
936 if (mi_16_col_offset +1 >= max_cols)
939 build_y_mask(lfi_n, mip2, shift_y+1, lfm);
942 const int shift_y = shift_32_y[idx_32] +
945 build_masks(lfi_n, mip, shift_y, shift_uv, lfm);
947 for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) {
948 const int shift_y = shift_32_y[idx_32] +
951 const int mi_8_col_offset = mi_16_col_offset +
953 const int mi_8_row_offset = mi_16_row_offset +
956 if (mi_8_col_offset >= max_cols ||
957 mi_8_row_offset >= max_rows)
959 build_y_mask(lfi_n, mip, shift_y, lfm);
970 // The largest loopfilter we have is 16x16 so we use the 16x16 mask
971 // for 32x32 transforms also also.
972 lfm->left_y[TX_16X16] |= lfm->left_y[TX_32X32];
973 lfm->above_y[TX_16X16] |= lfm->above_y[TX_32X32];
974 lfm->left_uv[TX_16X16] |= lfm->left_uv[TX_32X32];
975 lfm->above_uv[TX_16X16] |= lfm->above_uv[TX_32X32];
977 // We do at least 8 tap filter on every 32x32 even if the transform size
978 // is 4x4. So if the 4x4 is set on a border pixel add it to the 8x8 and
979 // remove it from the 4x4.
980 lfm->left_y[TX_8X8] |= lfm->left_y[TX_4X4] & left_border;
981 lfm->left_y[TX_4X4] &= ~left_border;
982 lfm->above_y[TX_8X8] |= lfm->above_y[TX_4X4] & above_border;
983 lfm->above_y[TX_4X4] &= ~above_border;
984 lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_4X4] & left_border_uv;
985 lfm->left_uv[TX_4X4] &= ~left_border_uv;
986 lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_4X4] & above_border_uv;
987 lfm->above_uv[TX_4X4] &= ~above_border_uv;
989 // We do some special edge handling.
990 if (mi_row + MI_BLOCK_SIZE > cm->mi_rows) {
991 const uint64_t rows = cm->mi_rows - mi_row;
993 // Each pixel inside the border gets a 1,
994 const uint64_t mask_y = (((uint64_t) 1 << (rows << 3)) - 1);
995 const uint16_t mask_uv = (((uint16_t) 1 << (((rows + 1) >> 1) << 2)) - 1);
997 // Remove values completely outside our border.
998 for (i = 0; i < TX_32X32; i++) {
999 lfm->left_y[i] &= mask_y;
1000 lfm->above_y[i] &= mask_y;
1001 lfm->left_uv[i] &= mask_uv;
1002 lfm->above_uv[i] &= mask_uv;
1004 lfm->int_4x4_y &= mask_y;
1005 lfm->int_4x4_uv &= mask_uv;
1007 // We don't apply a wide loop filter on the last uv block row. If set
1008 // apply the shorter one instead.
1010 lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16];
1011 lfm->above_uv[TX_16X16] = 0;
1014 lfm->above_uv[TX_8X8] |= lfm->above_uv[TX_16X16] & 0xff00;
1015 lfm->above_uv[TX_16X16] &= ~(lfm->above_uv[TX_16X16] & 0xff00);
1019 if (mi_col + MI_BLOCK_SIZE > cm->mi_cols) {
1020 const uint64_t columns = cm->mi_cols - mi_col;
1022 // Each pixel inside the border gets a 1, the multiply copies the border
1023 // to where we need it.
1024 const uint64_t mask_y = (((1 << columns) - 1)) * 0x0101010101010101;
1025 const uint16_t mask_uv = ((1 << ((columns + 1) >> 1)) - 1) * 0x1111;
1027 // Internal edges are not applied on the last column of the image so
1028 // we mask 1 more for the internal edges
1029 const uint16_t mask_uv_int = ((1 << (columns >> 1)) - 1) * 0x1111;
1031 // Remove the bits outside the image edge.
1032 for (i = 0; i < TX_32X32; i++) {
1033 lfm->left_y[i] &= mask_y;
1034 lfm->above_y[i] &= mask_y;
1035 lfm->left_uv[i] &= mask_uv;
1036 lfm->above_uv[i] &= mask_uv;
1038 lfm->int_4x4_y &= mask_y;
1039 lfm->int_4x4_uv &= mask_uv_int;
1041 // We don't apply a wide loop filter on the last uv column. If set
1042 // apply the shorter one instead.
1044 lfm->left_uv[TX_8X8] |= lfm->left_uv[TX_16X16];
1045 lfm->left_uv[TX_16X16] = 0;
1048 lfm->left_uv[TX_8X8] |= (lfm->left_uv[TX_16X16] & 0xcccc);
1049 lfm->left_uv[TX_16X16] &= ~(lfm->left_uv[TX_16X16] & 0xcccc);
1052 // We don't apply a loop filter on the first column in the image, mask that
1055 for (i = 0; i < TX_32X32; i++) {
1056 lfm->left_y[i] &= 0xfefefefefefefefe;
1057 lfm->left_uv[i] &= 0xeeee;
1061 // Assert if we try to apply 2 different loop filters at the same position.
1062 assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_8X8]));
1063 assert(!(lfm->left_y[TX_16X16] & lfm->left_y[TX_4X4]));
1064 assert(!(lfm->left_y[TX_8X8] & lfm->left_y[TX_4X4]));
1065 assert(!(lfm->int_4x4_y & lfm->left_y[TX_16X16]));
1066 assert(!(lfm->left_uv[TX_16X16]&lfm->left_uv[TX_8X8]));
1067 assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_4X4]));
1068 assert(!(lfm->left_uv[TX_8X8] & lfm->left_uv[TX_4X4]));
1069 assert(!(lfm->int_4x4_uv & lfm->left_uv[TX_16X16]));
1070 assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_8X8]));
1071 assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_4X4]));
1072 assert(!(lfm->above_y[TX_8X8] & lfm->above_y[TX_4X4]));
1073 assert(!(lfm->int_4x4_y & lfm->above_y[TX_16X16]));
1074 assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_8X8]));
1075 assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_4X4]));
1076 assert(!(lfm->above_uv[TX_8X8] & lfm->above_uv[TX_4X4]));
1077 assert(!(lfm->int_4x4_uv & lfm->above_uv[TX_16X16]));
1080 static void filter_selectively_vert(uint8_t *s, int pitch,
1081 unsigned int mask_16x16,
1082 unsigned int mask_8x8,
1083 unsigned int mask_4x4,
1084 unsigned int mask_4x4_int,
1085 const loop_filter_info_n *lfi_n,
1086 const uint8_t *lfl) {
1089 for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
1091 const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;
1094 if (mask_16x16 & 1) {
1095 vp9_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr);
1096 } else if (mask_8x8 & 1) {
1097 vp9_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
1098 } else if (mask_4x4 & 1) {
1099 vp9_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
1102 if (mask_4x4_int & 1)
1103 vp9_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim, lfi->hev_thr, 1);
1113 #if CONFIG_VP9_HIGHBITDEPTH
1114 static void highbd_filter_selectively_vert(uint16_t *s, int pitch,
1115 unsigned int mask_16x16,
1116 unsigned int mask_8x8,
1117 unsigned int mask_4x4,
1118 unsigned int mask_4x4_int,
1119 const loop_filter_info_n *lfi_n,
1120 const uint8_t *lfl, int bd) {
1123 for (mask = mask_16x16 | mask_8x8 | mask_4x4 | mask_4x4_int;
1125 const loop_filter_thresh *lfi = lfi_n->lfthr + *lfl;
1128 if (mask_16x16 & 1) {
1129 vp9_highbd_lpf_vertical_16(s, pitch, lfi->mblim, lfi->lim,
1131 } else if (mask_8x8 & 1) {
1132 vp9_highbd_lpf_vertical_8(s, pitch, lfi->mblim, lfi->lim,
1133 lfi->hev_thr, 1, bd);
1134 } else if (mask_4x4 & 1) {
1135 vp9_highbd_lpf_vertical_4(s, pitch, lfi->mblim, lfi->lim,
1136 lfi->hev_thr, 1, bd);
1139 if (mask_4x4_int & 1)
1140 vp9_highbd_lpf_vertical_4(s + 4, pitch, lfi->mblim, lfi->lim,
1141 lfi->hev_thr, 1, bd);
1150 #endif // CONFIG_VP9_HIGHBITDEPTH
1152 static void filter_block_plane_non420(VP9_COMMON *cm,
1153 struct macroblockd_plane *plane,
1155 int mi_row, int mi_col) {
1156 const int ss_x = plane->subsampling_x;
1157 const int ss_y = plane->subsampling_y;
1158 const int row_step = 1 << ss_y;
1159 const int col_step = 1 << ss_x;
1160 const int row_step_stride = cm->mi_stride * row_step;
1161 struct buf_2d *const dst = &plane->dst;
1162 uint8_t* const dst0 = dst->buf;
1163 unsigned int mask_16x16[MI_BLOCK_SIZE] = {0};
1164 unsigned int mask_8x8[MI_BLOCK_SIZE] = {0};
1165 unsigned int mask_4x4[MI_BLOCK_SIZE] = {0};
1166 unsigned int mask_4x4_int[MI_BLOCK_SIZE] = {0};
1167 uint8_t lfl[MI_BLOCK_SIZE * MI_BLOCK_SIZE];
1170 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) {
1171 unsigned int mask_16x16_c = 0;
1172 unsigned int mask_8x8_c = 0;
1173 unsigned int mask_4x4_c = 0;
1174 unsigned int border_mask;
1176 // Determine the vertical edges that need filtering
1177 for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
1178 const MODE_INFO *mi = mi_8x8[c].src_mi;
1179 const BLOCK_SIZE sb_type = mi[0].mbmi.sb_type;
1180 const int skip_this = mi[0].mbmi.skip && is_inter_block(&mi[0].mbmi);
1181 // left edge of current unit is block/partition edge -> no skip
1182 const int block_edge_left = (num_4x4_blocks_wide_lookup[sb_type] > 1) ?
1183 !(c & (num_8x8_blocks_wide_lookup[sb_type] - 1)) : 1;
1184 const int skip_this_c = skip_this && !block_edge_left;
1185 // top edge of current unit is block/partition edge -> no skip
1186 const int block_edge_above = (num_4x4_blocks_high_lookup[sb_type] > 1) ?
1187 !(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) : 1;
1188 const int skip_this_r = skip_this && !block_edge_above;
1189 const TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV)
1190 ? get_uv_tx_size(&mi[0].mbmi, plane)
1191 : mi[0].mbmi.tx_size;
1192 const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1;
1193 const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
1195 // Filter level can vary per MI
1196 if (!(lfl[(r << 3) + (c >> ss_x)] =
1197 get_filter_level(&cm->lf_info, &mi[0].mbmi)))
1200 // Build masks based on the transform size of each block
1201 if (tx_size == TX_32X32) {
1202 if (!skip_this_c && ((c >> ss_x) & 3) == 0) {
1203 if (!skip_border_4x4_c)
1204 mask_16x16_c |= 1 << (c >> ss_x);
1206 mask_8x8_c |= 1 << (c >> ss_x);
1208 if (!skip_this_r && ((r >> ss_y) & 3) == 0) {
1209 if (!skip_border_4x4_r)
1210 mask_16x16[r] |= 1 << (c >> ss_x);
1212 mask_8x8[r] |= 1 << (c >> ss_x);
1214 } else if (tx_size == TX_16X16) {
1215 if (!skip_this_c && ((c >> ss_x) & 1) == 0) {
1216 if (!skip_border_4x4_c)
1217 mask_16x16_c |= 1 << (c >> ss_x);
1219 mask_8x8_c |= 1 << (c >> ss_x);
1221 if (!skip_this_r && ((r >> ss_y) & 1) == 0) {
1222 if (!skip_border_4x4_r)
1223 mask_16x16[r] |= 1 << (c >> ss_x);
1225 mask_8x8[r] |= 1 << (c >> ss_x);
1228 // force 8x8 filtering on 32x32 boundaries
1230 if (tx_size == TX_8X8 || ((c >> ss_x) & 3) == 0)
1231 mask_8x8_c |= 1 << (c >> ss_x);
1233 mask_4x4_c |= 1 << (c >> ss_x);
1237 if (tx_size == TX_8X8 || ((r >> ss_y) & 3) == 0)
1238 mask_8x8[r] |= 1 << (c >> ss_x);
1240 mask_4x4[r] |= 1 << (c >> ss_x);
1243 if (!skip_this && tx_size < TX_8X8 && !skip_border_4x4_c)
1244 mask_4x4_int[r] |= 1 << (c >> ss_x);
1248 // Disable filtering on the leftmost column
1249 border_mask = ~(mi_col == 0);
1250 #if CONFIG_VP9_HIGHBITDEPTH
1251 if (cm->use_highbitdepth) {
1252 highbd_filter_selectively_vert(CONVERT_TO_SHORTPTR(dst->buf),
1254 mask_16x16_c & border_mask,
1255 mask_8x8_c & border_mask,
1256 mask_4x4_c & border_mask,
1258 &cm->lf_info, &lfl[r << 3],
1259 (int)cm->bit_depth);
1261 filter_selectively_vert(dst->buf, dst->stride,
1262 mask_16x16_c & border_mask,
1263 mask_8x8_c & border_mask,
1264 mask_4x4_c & border_mask,
1266 &cm->lf_info, &lfl[r << 3]);
1269 filter_selectively_vert(dst->buf, dst->stride,
1270 mask_16x16_c & border_mask,
1271 mask_8x8_c & border_mask,
1272 mask_4x4_c & border_mask,
1274 &cm->lf_info, &lfl[r << 3]);
1275 #endif // CONFIG_VP9_HIGHBITDEPTH
1276 dst->buf += 8 * dst->stride;
1277 mi_8x8 += row_step_stride;
1280 // Now do horizontal pass
1282 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += row_step) {
1283 const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
1284 const unsigned int mask_4x4_int_r = skip_border_4x4_r ? 0 : mask_4x4_int[r];
1286 unsigned int mask_16x16_r;
1287 unsigned int mask_8x8_r;
1288 unsigned int mask_4x4_r;
1290 if (mi_row + r == 0) {
1295 mask_16x16_r = mask_16x16[r];
1296 mask_8x8_r = mask_8x8[r];
1297 mask_4x4_r = mask_4x4[r];
1299 #if CONFIG_VP9_HIGHBITDEPTH
1300 if (cm->use_highbitdepth) {
1301 highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
1307 &cm->lf_info, &lfl[r << 3],
1308 (int)cm->bit_depth);
1310 filter_selectively_horiz(dst->buf, dst->stride,
1315 &cm->lf_info, &lfl[r << 3]);
1318 filter_selectively_horiz(dst->buf, dst->stride,
1323 &cm->lf_info, &lfl[r << 3]);
1324 #endif // CONFIG_VP9_HIGHBITDEPTH
1325 dst->buf += 8 * dst->stride;
1329 void vp9_filter_block_plane(VP9_COMMON *const cm,
1330 struct macroblockd_plane *const plane,
1332 LOOP_FILTER_MASK *lfm) {
1333 struct buf_2d *const dst = &plane->dst;
1334 uint8_t* const dst0 = dst->buf;
1337 if (!plane->plane_type) {
1338 uint64_t mask_16x16 = lfm->left_y[TX_16X16];
1339 uint64_t mask_8x8 = lfm->left_y[TX_8X8];
1340 uint64_t mask_4x4 = lfm->left_y[TX_4X4];
1341 uint64_t mask_4x4_int = lfm->int_4x4_y;
1343 // Vertical pass: do 2 rows at one time
1344 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) {
1345 unsigned int mask_16x16_l = mask_16x16 & 0xffff;
1346 unsigned int mask_8x8_l = mask_8x8 & 0xffff;
1347 unsigned int mask_4x4_l = mask_4x4 & 0xffff;
1348 unsigned int mask_4x4_int_l = mask_4x4_int & 0xffff;
1350 // Disable filtering on the leftmost column.
1351 #if CONFIG_VP9_HIGHBITDEPTH
1352 if (cm->use_highbitdepth) {
1353 highbd_filter_selectively_vert_row2(plane->plane_type,
1354 CONVERT_TO_SHORTPTR(dst->buf),
1360 &cm->lf_info, &lfm->lfl_y[r << 3],
1361 (int)cm->bit_depth);
1363 filter_selectively_vert_row2(plane->plane_type,
1364 dst->buf, dst->stride,
1370 &lfm->lfl_y[r << 3]);
1373 filter_selectively_vert_row2(plane->plane_type,
1374 dst->buf, dst->stride,
1379 &cm->lf_info, &lfm->lfl_y[r << 3]);
1380 #endif // CONFIG_VP9_HIGHBITDEPTH
1381 dst->buf += 16 * dst->stride;
1385 mask_4x4_int >>= 16;
1390 mask_16x16 = lfm->above_y[TX_16X16];
1391 mask_8x8 = lfm->above_y[TX_8X8];
1392 mask_4x4 = lfm->above_y[TX_4X4];
1393 mask_4x4_int = lfm->int_4x4_y;
1395 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r++) {
1396 unsigned int mask_16x16_r;
1397 unsigned int mask_8x8_r;
1398 unsigned int mask_4x4_r;
1400 if (mi_row + r == 0) {
1405 mask_16x16_r = mask_16x16 & 0xff;
1406 mask_8x8_r = mask_8x8 & 0xff;
1407 mask_4x4_r = mask_4x4 & 0xff;
1410 #if CONFIG_VP9_HIGHBITDEPTH
1411 if (cm->use_highbitdepth) {
1412 highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
1417 mask_4x4_int & 0xff,
1419 &lfm->lfl_y[r << 3],
1420 (int)cm->bit_depth);
1422 filter_selectively_horiz(dst->buf, dst->stride,
1426 mask_4x4_int & 0xff,
1428 &lfm->lfl_y[r << 3]);
1431 filter_selectively_horiz(dst->buf, dst->stride,
1435 mask_4x4_int & 0xff,
1437 &lfm->lfl_y[r << 3]);
1438 #endif // CONFIG_VP9_HIGHBITDEPTH
1440 dst->buf += 8 * dst->stride;
1447 uint16_t mask_16x16 = lfm->left_uv[TX_16X16];
1448 uint16_t mask_8x8 = lfm->left_uv[TX_8X8];
1449 uint16_t mask_4x4 = lfm->left_uv[TX_4X4];
1450 uint16_t mask_4x4_int = lfm->int_4x4_uv;
1452 // Vertical pass: do 2 rows at one time
1453 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 4) {
1454 if (plane->plane_type == 1) {
1455 for (c = 0; c < (MI_BLOCK_SIZE >> 1); c++) {
1456 lfm->lfl_uv[(r << 1) + c] = lfm->lfl_y[(r << 3) + (c << 1)];
1457 lfm->lfl_uv[((r + 2) << 1) + c] = lfm->lfl_y[((r + 2) << 3) +
1463 unsigned int mask_16x16_l = mask_16x16 & 0xff;
1464 unsigned int mask_8x8_l = mask_8x8 & 0xff;
1465 unsigned int mask_4x4_l = mask_4x4 & 0xff;
1466 unsigned int mask_4x4_int_l = mask_4x4_int & 0xff;
1468 // Disable filtering on the leftmost column.
1469 #if CONFIG_VP9_HIGHBITDEPTH
1470 if (cm->use_highbitdepth) {
1471 highbd_filter_selectively_vert_row2(plane->plane_type,
1472 CONVERT_TO_SHORTPTR(dst->buf),
1479 &lfm->lfl_uv[r << 1],
1480 (int)cm->bit_depth);
1482 filter_selectively_vert_row2(plane->plane_type,
1483 dst->buf, dst->stride,
1489 &lfm->lfl_uv[r << 1]);
1492 filter_selectively_vert_row2(plane->plane_type,
1493 dst->buf, dst->stride,
1499 &lfm->lfl_uv[r << 1]);
1500 #endif // CONFIG_VP9_HIGHBITDEPTH
1502 dst->buf += 16 * dst->stride;
1512 mask_16x16 = lfm->above_uv[TX_16X16];
1513 mask_8x8 = lfm->above_uv[TX_8X8];
1514 mask_4x4 = lfm->above_uv[TX_4X4];
1515 mask_4x4_int = lfm->int_4x4_uv;
1517 for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) {
1518 const int skip_border_4x4_r = mi_row + r == cm->mi_rows - 1;
1519 const unsigned int mask_4x4_int_r = skip_border_4x4_r ?
1520 0 : (mask_4x4_int & 0xf);
1521 unsigned int mask_16x16_r;
1522 unsigned int mask_8x8_r;
1523 unsigned int mask_4x4_r;
1525 if (mi_row + r == 0) {
1530 mask_16x16_r = mask_16x16 & 0xf;
1531 mask_8x8_r = mask_8x8 & 0xf;
1532 mask_4x4_r = mask_4x4 & 0xf;
1535 #if CONFIG_VP9_HIGHBITDEPTH
1536 if (cm->use_highbitdepth) {
1537 highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
1544 &lfm->lfl_uv[r << 1],
1545 (int)cm->bit_depth);
1547 filter_selectively_horiz(dst->buf, dst->stride,
1553 &lfm->lfl_uv[r << 1]);
1556 filter_selectively_horiz(dst->buf, dst->stride,
1562 &lfm->lfl_uv[r << 1]);
1563 #endif // CONFIG_VP9_HIGHBITDEPTH
1565 dst->buf += 8 * dst->stride;
1574 void vp9_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
1576 struct macroblockd_plane planes[MAX_MB_PLANE],
1577 int start, int stop, int y_only) {
1578 const int num_planes = y_only ? 1 : MAX_MB_PLANE;
1579 const int use_420 = y_only || (planes[1].subsampling_y == 1 &&
1580 planes[1].subsampling_x == 1);
1581 LOOP_FILTER_MASK lfm;
1584 for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) {
1585 MODE_INFO *mi = cm->mi + mi_row * cm->mi_stride;
1587 for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
1590 vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
1592 // TODO(JBB): Make setup_mask work for non 420.
1594 vp9_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride,
1597 for (plane = 0; plane < num_planes; ++plane) {
1599 vp9_filter_block_plane(cm, &planes[plane], mi_row, &lfm);
1601 filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
1608 void vp9_loop_filter_frame(YV12_BUFFER_CONFIG *frame,
1609 VP9_COMMON *cm, MACROBLOCKD *xd,
1610 int frame_filter_level,
1611 int y_only, int partial_frame) {
1612 int start_mi_row, end_mi_row, mi_rows_to_filter;
1613 if (!frame_filter_level) return;
1615 mi_rows_to_filter = cm->mi_rows;
1616 if (partial_frame && cm->mi_rows > 8) {
1617 start_mi_row = cm->mi_rows >> 1;
1618 start_mi_row &= 0xfffffff8;
1619 mi_rows_to_filter = MAX(cm->mi_rows / 8, 8);
1621 end_mi_row = start_mi_row + mi_rows_to_filter;
1622 vp9_loop_filter_frame_init(cm, frame_filter_level);
1623 vp9_loop_filter_rows(frame, cm, xd->plane,
1624 start_mi_row, end_mi_row,
1628 int vp9_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
1630 vp9_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
1631 lf_data->start, lf_data->stop, lf_data->y_only);