From 5e16d397bd67b6d870c1ac34f50799f5c2fd6f91 Mon Sep 17 00:00:00 2001 From: James Zern Date: Mon, 17 Aug 2015 18:19:22 -0700 Subject: [PATCH] vpx_dsp_common: add VPX prefix to MIN/MAX prevents redeclaration warnings; vp8 has its own define which will be resolved in a future commit Change-Id: Ic941fef3dd4262fcdce48b73075fe6b375f11c9c --- vp10/common/blockd.h | 2 +- vp10/common/common_data.h | 2 +- vp10/common/loopfilter.c | 2 +- vp10/common/postproc.c | 2 +- vp10/common/pred_common.h | 8 +-- vp10/common/thread_common.c | 4 +- vp10/common/tile_common.c | 2 +- vp10/decoder/decodeframe.c | 18 +++--- vp10/decoder/decodemv.c | 14 ++--- vp10/encoder/aq_complexity.c | 6 +- vp10/encoder/aq_cyclicrefresh.c | 17 +++--- vp10/encoder/bitstream.c | 2 +- vp10/encoder/denoiser.c | 8 +-- vp10/encoder/encodeframe.c | 58 ++++++++++---------- vp10/encoder/encodemv.c | 4 +- vp10/encoder/encoder.c | 37 +++++++------ vp10/encoder/ethread.c | 4 +- vp10/encoder/extend.c | 10 ++-- vp10/encoder/firstpass.c | 82 ++++++++++++++-------------- vp10/encoder/mbgraph.c | 2 +- vp10/encoder/mcomp.c | 72 ++++++++++++------------ vp10/encoder/picklpf.c | 4 +- vp10/encoder/ratectrl.c | 106 +++++++++++++++++++----------------- vp10/encoder/rd.c | 15 +++-- vp10/encoder/rdopt.c | 93 +++++++++++++++---------------- vp10/encoder/speed_features.c | 16 +++--- vp10/encoder/svc_layercontext.c | 10 ++-- vp10/encoder/temporal_filter.c | 2 +- vp10/vp10_dx_iface.c | 2 +- vp9/common/vp9_blockd.h | 2 +- vp9/common/vp9_common_data.c | 2 +- vp9/common/vp9_loopfilter.c | 2 +- vp9/common/vp9_postproc.c | 2 +- vp9/common/vp9_pred_common.h | 8 +-- vp9/common/vp9_thread_common.c | 4 +- vp9/common/vp9_tile_common.c | 2 +- vp9/decoder/vp9_decodeframe.c | 18 +++--- vp9/decoder/vp9_decodemv.c | 14 ++--- vp9/encoder/vp9_aq_complexity.c | 6 +- vp9/encoder/vp9_aq_cyclicrefresh.c | 17 +++--- vp9/encoder/vp9_bitstream.c | 2 +- vp9/encoder/vp9_denoiser.c | 8 +-- vp9/encoder/vp9_encodeframe.c | 66 +++++++++++----------- vp9/encoder/vp9_encodemv.c | 4 +- vp9/encoder/vp9_encoder.c | 37 +++++++------ vp9/encoder/vp9_ethread.c | 4 +- vp9/encoder/vp9_extend.c | 10 ++-- vp9/encoder/vp9_firstpass.c | 82 ++++++++++++++-------------- vp9/encoder/vp9_mbgraph.c | 2 +- vp9/encoder/vp9_mcomp.c | 68 +++++++++++------------ vp9/encoder/vp9_picklpf.c | 4 +- vp9/encoder/vp9_pickmode.c | 38 ++++++------- vp9/encoder/vp9_ratectrl.c | 109 +++++++++++++++++++------------------ vp9/encoder/vp9_rd.c | 15 +++-- vp9/encoder/vp9_rdopt.c | 95 ++++++++++++++++---------------- vp9/encoder/vp9_speed_features.c | 16 +++--- vp9/encoder/vp9_svc_layercontext.c | 10 ++-- vp9/encoder/vp9_temporal_filter.c | 2 +- vp9/vp9_dx_iface.c | 3 +- vpx_dsp/bitreader.c | 2 +- vpx_dsp/prob.h | 4 +- vpx_dsp/vpx_dsp_common.h | 4 +- 62 files changed, 644 insertions(+), 622 deletions(-) diff --git a/vp10/common/blockd.h b/vp10/common/blockd.h index 26703d1..30bf103 100644 --- a/vp10/common/blockd.h +++ b/vp10/common/blockd.h @@ -238,7 +238,7 @@ static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize, return TX_4X4; } else { const BLOCK_SIZE plane_bsize = ss_size_lookup[bsize][xss][yss]; - return MIN(y_tx_size, max_txsize_lookup[plane_bsize]); + return VPXMIN(y_tx_size, max_txsize_lookup[plane_bsize]); } } diff --git a/vp10/common/common_data.h b/vp10/common/common_data.h index 8189518..f2ce039 100644 --- a/vp10/common/common_data.h +++ b/vp10/common/common_data.h @@ -35,7 +35,7 @@ static const uint8_t num_8x8_blocks_wide_lookup[BLOCK_SIZES] = static const uint8_t num_8x8_blocks_high_lookup[BLOCK_SIZES] = {1, 1, 1, 1, 2, 1, 2, 4, 2, 4, 8, 4, 8}; -// MIN(3, MIN(b_width_log2(bsize), b_height_log2(bsize))) +// VPXMIN(3, VPXMIN(b_width_log2(bsize), b_height_log2(bsize))) static const uint8_t size_group_lookup[BLOCK_SIZES] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3}; diff --git a/vp10/common/loopfilter.c b/vp10/common/loopfilter.c index c11d46a..f108374 100644 --- a/vp10/common/loopfilter.c +++ b/vp10/common/loopfilter.c @@ -1588,7 +1588,7 @@ void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, if (partial_frame && cm->mi_rows > 8) { start_mi_row = cm->mi_rows >> 1; start_mi_row &= 0xfffffff8; - mi_rows_to_filter = MAX(cm->mi_rows / 8, 8); + mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8); } end_mi_row = start_mi_row + mi_rows_to_filter; vp10_loop_filter_frame_init(cm, frame_filter_level); diff --git a/vp10/common/postproc.c b/vp10/common/postproc.c index 2d5b36a..d273420 100644 --- a/vp10/common/postproc.c +++ b/vp10/common/postproc.c @@ -625,7 +625,7 @@ static void swap_mi_and_prev_mi(VP10_COMMON *cm) { int vp10_post_proc_frame(struct VP10Common *cm, YV12_BUFFER_CONFIG *dest, vp10_ppflags_t *ppflags) { - const int q = MIN(105, cm->lf.filter_level * 2); + const int q = VPXMIN(105, cm->lf.filter_level * 2); const int flags = ppflags->post_proc_flag; YV12_BUFFER_CONFIG *const ppbuf = &cm->post_proc_buffer; struct postproc_state *const ppstate = &cm->postproc_state; diff --git a/vp10/common/pred_common.h b/vp10/common/pred_common.h index 9345c6b..22d2774 100644 --- a/vp10/common/pred_common.h +++ b/vp10/common/pred_common.h @@ -24,14 +24,14 @@ static INLINE int get_segment_id(const VP10_COMMON *cm, const int mi_offset = mi_row * cm->mi_cols + mi_col; const int bw = num_8x8_blocks_wide_lookup[bsize]; const int bh = num_8x8_blocks_high_lookup[bsize]; - const int xmis = MIN(cm->mi_cols - mi_col, bw); - const int ymis = MIN(cm->mi_rows - mi_row, bh); + const int xmis = VPXMIN(cm->mi_cols - mi_col, bw); + const int ymis = VPXMIN(cm->mi_rows - mi_row, bh); int x, y, segment_id = MAX_SEGMENTS; for (y = 0; y < ymis; ++y) for (x = 0; x < xmis; ++x) - segment_id = MIN(segment_id, - segment_ids[mi_offset + y * cm->mi_cols + x]); + segment_id = + VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]); assert(segment_id >= 0 && segment_id < MAX_SEGMENTS); return segment_id; diff --git a/vp10/common/thread_common.c b/vp10/common/thread_common.c index 69c6471..e87caab 100644 --- a/vp10/common/thread_common.c +++ b/vp10/common/thread_common.c @@ -165,7 +165,7 @@ static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, // Decoder may allocate more threads than number of tiles based on user's // input. const int tile_cols = 1 << cm->log2_tile_cols; - const int num_workers = MIN(nworkers, tile_cols); + const int num_workers = VPXMIN(nworkers, tile_cols); int i; if (!lf_sync->sync_range || sb_rows != lf_sync->rows || @@ -229,7 +229,7 @@ void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, if (partial_frame && cm->mi_rows > 8) { start_mi_row = cm->mi_rows >> 1; start_mi_row &= 0xfffffff8; - mi_rows_to_filter = MAX(cm->mi_rows / 8, 8); + mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8); } end_mi_row = start_mi_row + mi_rows_to_filter; vp10_loop_filter_frame_init(cm, frame_filter_level); diff --git a/vp10/common/tile_common.c b/vp10/common/tile_common.c index 821a5e8..f830e60 100644 --- a/vp10/common/tile_common.c +++ b/vp10/common/tile_common.c @@ -18,7 +18,7 @@ static int get_tile_offset(int idx, int mis, int log2) { const int sb_cols = mi_cols_aligned_to_sb(mis) >> MI_BLOCK_SIZE_LOG2; const int offset = ((idx * sb_cols) >> log2) << MI_BLOCK_SIZE_LOG2; - return MIN(offset, mis); + return VPXMIN(offset, mis); } void vp10_tile_set_row(TileInfo *tile, const VP10_COMMON *cm, int row) { diff --git a/vp10/decoder/decodeframe.c b/vp10/decoder/decodeframe.c index 6cb2ff5..b17fa1e 100644 --- a/vp10/decoder/decodeframe.c +++ b/vp10/decoder/decodeframe.c @@ -624,7 +624,7 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd, // pixels of each superblock row can be changed by next superblock row. if (pbi->frame_parallel_decode) vp10_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf, - MAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1)); + VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1)); // Skip border extension if block is inside the frame. if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 || @@ -652,7 +652,7 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd, if (pbi->frame_parallel_decode) { const int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS; vp10_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf, - MAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1)); + VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1)); } } #if CONFIG_VP9_HIGHBITDEPTH @@ -723,8 +723,8 @@ static void dec_build_inter_predictors_sb(VP10Decoder *const pbi, static INLINE TX_SIZE dec_get_uv_tx_size(const MB_MODE_INFO *mbmi, int n4_wl, int n4_hl) { // get minimum log2 num4x4s dimension - const int x = MIN(n4_wl, n4_hl); - return MIN(mbmi->tx_size, x); + const int x = VPXMIN(n4_wl, n4_hl); + return VPXMIN(mbmi->tx_size, x); } static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) { @@ -785,8 +785,8 @@ static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd, const int less8x8 = bsize < BLOCK_8X8; const int bw = 1 << (bwl - 1); const int bh = 1 << (bhl - 1); - const int x_mis = MIN(bw, cm->mi_cols - mi_col); - const int y_mis = MIN(bh, cm->mi_rows - mi_row); + const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col); + const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row); MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis, bwl, bhl); @@ -1570,7 +1570,7 @@ static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); const int tile_cols = 1 << cm->log2_tile_cols; const int tile_rows = 1 << cm->log2_tile_rows; - const int num_workers = MIN(pbi->max_threads & ~1, tile_cols); + const int num_workers = VPXMIN(pbi->max_threads & ~1, tile_cols); TileBuffer tile_buffers[1][1 << 6]; int n; int final_worker = -1; @@ -1637,7 +1637,7 @@ static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, int group_start = 0; while (group_start < tile_cols) { const TileBuffer largest = tile_buffers[0][group_start]; - const int group_end = MIN(group_start + num_workers, tile_cols) - 1; + const int group_end = VPXMIN(group_start + num_workers, tile_cols) - 1; memmove(tile_buffers[0] + group_start, tile_buffers[0] + group_start + 1, (group_end - group_start) * sizeof(tile_buffers[0][0])); tile_buffers[0][group_end] = largest; @@ -2069,7 +2069,7 @@ static struct vpx_read_bit_buffer *init_read_bit_buffer( rb->error_handler = error_handler; rb->error_handler_data = &pbi->common; if (pbi->decrypt_cb) { - const int n = (int)MIN(MAX_VP9_HEADER_SIZE, data_end - data); + const int n = (int)VPXMIN(MAX_VP9_HEADER_SIZE, data_end - data); pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n); rb->bit_buffer = clear_data; rb->bit_buffer_end = clear_data + n; diff --git a/vp10/decoder/decodemv.c b/vp10/decoder/decodemv.c index 5fbe2ca..cf331ee 100644 --- a/vp10/decoder/decodemv.c +++ b/vp10/decoder/decodemv.c @@ -87,7 +87,7 @@ static TX_SIZE read_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd, if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_8X8) return read_selected_tx_size(cm, xd, max_tx_size, r); else - return MIN(max_tx_size, tx_mode_to_biggest_tx_size[tx_mode]); + return VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[tx_mode]); } static int dec_get_segment_id(const VP10_COMMON *cm, const uint8_t *segment_ids, @@ -96,8 +96,8 @@ static int dec_get_segment_id(const VP10_COMMON *cm, const uint8_t *segment_ids, for (y = 0; y < y_mis; y++) for (x = 0; x < x_mis; x++) - segment_id = MIN(segment_id, - segment_ids[mi_offset + y * cm->mi_cols + x]); + segment_id = + VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]); assert(segment_id >= 0 && segment_id < MAX_SEGMENTS); return segment_id; @@ -156,8 +156,8 @@ static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd, const int bh = xd->plane[0].n4_h >> 1; // TODO(slavarnway): move x_mis, y_mis into xd ????? - const int x_mis = MIN(cm->mi_cols - mi_col, bw); - const int y_mis = MIN(cm->mi_rows - mi_row, bh); + const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw); + const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh); if (!seg->enabled) return 0; // Default for disabled segmentation @@ -212,8 +212,8 @@ static void read_intra_frame_mode_info(VP10_COMMON *const cm, const int bh = xd->plane[0].n4_h >> 1; // TODO(slavarnway): move x_mis, y_mis into xd ????? - const int x_mis = MIN(cm->mi_cols - mi_col, bw); - const int y_mis = MIN(cm->mi_rows - mi_row, bh); + const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw); + const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh); mbmi->segment_id = read_intra_segment_id(cm, mi_offset, x_mis, y_mis, r); mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r); diff --git a/vp10/encoder/aq_complexity.c b/vp10/encoder/aq_complexity.c index 195732a..7ba879d 100644 --- a/vp10/encoder/aq_complexity.c +++ b/vp10/encoder/aq_complexity.c @@ -117,8 +117,8 @@ void vp10_caq_select_segment(VP10_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs, const int mi_offset = mi_row * cm->mi_cols + mi_col; const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64]; const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64]; - const int xmis = MIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[bs]); - const int ymis = MIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[bs]); + const int xmis = VPXMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[bs]); + const int ymis = VPXMIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[bs]); int x, y; int i; unsigned char segment; @@ -136,7 +136,7 @@ void vp10_caq_select_segment(VP10_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs, vpx_clear_system_state(); low_var_thresh = (cpi->oxcf.pass == 2) - ? MAX(cpi->twopass.mb_av_energy, MIN_DEFAULT_LV_THRESH) + ? VPXMAX(cpi->twopass.mb_av_energy, MIN_DEFAULT_LV_THRESH) : DEFAULT_LV_THRESH; vp10_setup_src_planes(mb, cpi->Source, mi_row, mi_col); diff --git a/vp10/encoder/aq_cyclicrefresh.c b/vp10/encoder/aq_cyclicrefresh.c index 3d6e38b..45181d2 100644 --- a/vp10/encoder/aq_cyclicrefresh.c +++ b/vp10/encoder/aq_cyclicrefresh.c @@ -220,8 +220,8 @@ void vp10_cyclic_refresh_update_segment(VP10_COMP *const cpi, CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; const int bw = num_8x8_blocks_wide_lookup[bsize]; const int bh = num_8x8_blocks_high_lookup[bsize]; - const int xmis = MIN(cm->mi_cols - mi_col, bw); - const int ymis = MIN(cm->mi_rows - mi_row, bh); + const int xmis = VPXMIN(cm->mi_cols - mi_col, bw); + const int ymis = VPXMIN(cm->mi_rows - mi_row, bh); const int block_index = mi_row * cm->mi_cols + mi_col; const int refresh_this_block = candidate_refresh_aq(cr, mbmi, rate, dist, bsize); @@ -413,10 +413,10 @@ static void cyclic_refresh_update_map(VP10_COMP *const cpi) { assert(mi_col >= 0 && mi_col < cm->mi_cols); bl_index = mi_row * cm->mi_cols + mi_col; // Loop through all 8x8 blocks in superblock and update map. - xmis = MIN(cm->mi_cols - mi_col, - num_8x8_blocks_wide_lookup[BLOCK_64X64]); - ymis = MIN(cm->mi_rows - mi_row, - num_8x8_blocks_high_lookup[BLOCK_64X64]); + xmis = + VPXMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[BLOCK_64X64]); + ymis = + VPXMIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[BLOCK_64X64]); for (y = 0; y < ymis; y++) { for (x = 0; x < xmis; x++) { const int bl_index2 = bl_index + y * cm->mi_cols + x; @@ -545,8 +545,9 @@ void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) { // Set a more aggressive (higher) q delta for segment BOOST2. qindex_delta = compute_deltaq( - cpi, cm->base_qindex, MIN(CR_MAX_RATE_TARGET_RATIO, - 0.1 * cr->rate_boost_fac * cr->rate_ratio_qdelta)); + cpi, cm->base_qindex, + VPXMIN(CR_MAX_RATE_TARGET_RATIO, + 0.1 * cr->rate_boost_fac * cr->rate_ratio_qdelta)); cr->qindex_delta[2] = qindex_delta; vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta); diff --git a/vp10/encoder/bitstream.c b/vp10/encoder/bitstream.c index b872b4e..11ee0f5 100644 --- a/vp10/encoder/bitstream.c +++ b/vp10/encoder/bitstream.c @@ -815,7 +815,7 @@ static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd, static void encode_txfm_probs(VP10_COMMON *cm, vpx_writer *w, FRAME_COUNTS *counts) { // Mode - vpx_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2); + vpx_write_literal(w, VPXMIN(cm->tx_mode, ALLOW_32X32), 2); if (cm->tx_mode >= ALLOW_32X32) vpx_write_bit(w, cm->tx_mode == TX_MODE_SELECT); diff --git a/vp10/encoder/denoiser.c b/vp10/encoder/denoiser.c index 3c9d949..271113a 100644 --- a/vp10/encoder/denoiser.c +++ b/vp10/encoder/denoiser.c @@ -124,10 +124,10 @@ int vp10_denoiser_filter_c(const uint8_t *sig, int sig_stride, adj = adj_val[2]; } if (diff > 0) { - avg[c] = MIN(UINT8_MAX, sig[c] + adj); + avg[c] = VPXMIN(UINT8_MAX, sig[c] + adj); total_adj += adj; } else { - avg[c] = MAX(0, sig[c] - adj); + avg[c] = VPXMAX(0, sig[c] - adj); total_adj -= adj; } } @@ -164,13 +164,13 @@ int vp10_denoiser_filter_c(const uint8_t *sig, int sig_stride, // Diff positive means we made positive adjustment above // (in first try/attempt), so now make negative adjustment to bring // denoised signal down. - avg[c] = MAX(0, avg[c] - adj); + avg[c] = VPXMAX(0, avg[c] - adj); total_adj -= adj; } else { // Diff negative means we made negative adjustment above // (in first try/attempt), so now make positive adjustment to bring // denoised signal up. - avg[c] = MIN(UINT8_MAX, avg[c] + adj); + avg[c] = VPXMIN(UINT8_MAX, avg[c] + adj); total_adj += adj; } } diff --git a/vp10/encoder/encodeframe.c b/vp10/encoder/encodeframe.c index 09651fd..1635046 100644 --- a/vp10/encoder/encodeframe.c +++ b/vp10/encoder/encodeframe.c @@ -978,8 +978,8 @@ static void update_state(VP10_COMP *cpi, ThreadData *td, const struct segmentation *const seg = &cm->seg; const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type]; const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type]; - const int x_mis = MIN(bw, cm->mi_cols - mi_col); - const int y_mis = MIN(bh, cm->mi_rows - mi_row); + const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col); + const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row); MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col; int w, h; @@ -1131,8 +1131,8 @@ static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode, mbmi->sb_type = bsize; mbmi->mode = ZEROMV; - mbmi->tx_size = MIN(max_txsize_lookup[bsize], - tx_mode_to_biggest_tx_size[tx_mode]); + mbmi->tx_size = VPXMIN(max_txsize_lookup[bsize], + tx_mode_to_biggest_tx_size[tx_mode]); mbmi->skip = 1; mbmi->uv_mode = DC_PRED; mbmi->ref_frame[0] = LAST_FRAME; @@ -1495,7 +1495,7 @@ static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left, int cols_left, int *bh, int *bw) { if (rows_left <= 0 || cols_left <= 0) { - return MIN(bsize, BLOCK_8X8); + return VPXMIN(bsize, BLOCK_8X8); } else { for (; bsize > 0; bsize -= 3) { *bh = num_8x8_blocks_high_lookup[bsize]; @@ -1869,8 +1869,8 @@ static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8, MODE_INFO *mi = mi_8x8[index+j]; BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0; bs_hist[sb_type]++; - *min_block_size = MIN(*min_block_size, sb_type); - *max_block_size = MAX(*max_block_size, sb_type); + *min_block_size = VPXMIN(*min_block_size, sb_type); + *max_block_size = VPXMAX(*max_block_size, sb_type); } index += xd->mi_stride; } @@ -1947,8 +1947,8 @@ static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile, if (vp10_active_edge_sb(cpi, mi_row, mi_col)) { min_size = BLOCK_4X4; } else { - min_size = MIN(cpi->sf.rd_auto_partition_min_limit, - MIN(min_size, max_size)); + min_size = + VPXMIN(cpi->sf.rd_auto_partition_min_limit, VPXMIN(min_size, max_size)); } // When use_square_partition_only is true, make sure at least one square @@ -1984,8 +1984,8 @@ static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd, for (idx = 0; idx < mi_width; ++idx) { mi = prev_mi[idy * cm->mi_stride + idx]; bs = mi ? mi->mbmi.sb_type : bsize; - min_size = MIN(min_size, bs); - max_size = MAX(max_size, bs); + min_size = VPXMIN(min_size, bs); + max_size = VPXMAX(max_size, bs); } } } @@ -1994,8 +1994,8 @@ static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd, for (idy = 0; idy < mi_height; ++idy) { mi = xd->mi[idy * cm->mi_stride - 1]; bs = mi ? mi->mbmi.sb_type : bsize; - min_size = MIN(min_size, bs); - max_size = MAX(max_size, bs); + min_size = VPXMIN(min_size, bs); + max_size = VPXMAX(max_size, bs); } } @@ -2003,8 +2003,8 @@ static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd, for (idx = 0; idx < mi_width; ++idx) { mi = xd->mi[idx - cm->mi_stride]; bs = mi ? mi->mbmi.sb_type : bsize; - min_size = MIN(min_size, bs); - max_size = MAX(max_size, bs); + min_size = VPXMIN(min_size, bs); + max_size = VPXMAX(max_size, bs); } } @@ -2169,9 +2169,9 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td, int mb_row = mi_row >> 1; int mb_col = mi_col >> 1; int mb_row_end = - MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows); + VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows); int mb_col_end = - MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols); + VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols); int r, c; // compute a complexity measure, basically measure inconsistency of motion @@ -2260,9 +2260,9 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td, int mb_row = mi_row >> 1; int mb_col = mi_col >> 1; int mb_row_end = - MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows); + VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows); int mb_col_end = - MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols); + VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols); int r, c; int skip = 1; @@ -2887,7 +2887,7 @@ static void encode_frame_internal(VP10_COMP *cpi) { #endif // If allowed, encoding tiles in parallel with one thread handling one tile. - if (MIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1) + if (VPXMIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1) vp10_encode_tiles_mt(cpi); else encode_tiles(cpi); @@ -3102,10 +3102,10 @@ static void encode_superblock(VP10_COMP *cpi, ThreadData *td, int plane; mbmi->skip = 1; for (plane = 0; plane < MAX_MB_PLANE; ++plane) - vp10_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane); + vp10_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane); if (output_enabled) sum_intra_stats(td->counts, mi); - vp10_tokenize_sb(cpi, td, t, !output_enabled, MAX(bsize, BLOCK_8X8)); + vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8)); } else { int ref; const int is_compound = has_second_ref(mbmi); @@ -3118,12 +3118,14 @@ static void encode_superblock(VP10_COMP *cpi, ThreadData *td, &xd->block_refs[ref]->sf); } if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip) - vp10_build_inter_predictors_sby(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8)); + vp10_build_inter_predictors_sby(xd, mi_row, mi_col, + VPXMAX(bsize, BLOCK_8X8)); - vp10_build_inter_predictors_sbuv(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8)); + vp10_build_inter_predictors_sbuv(xd, mi_row, mi_col, + VPXMAX(bsize, BLOCK_8X8)); - vp10_encode_sb(x, MAX(bsize, BLOCK_8X8)); - vp10_tokenize_sb(cpi, td, t, !output_enabled, MAX(bsize, BLOCK_8X8)); + vp10_encode_sb(x, VPXMAX(bsize, BLOCK_8X8)); + vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8)); } if (output_enabled) { @@ -3137,8 +3139,8 @@ static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TX_SIZE tx_size; // The new intra coding scheme requires no change of transform size if (is_inter_block(&mi->mbmi)) { - tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode], - max_txsize_lookup[bsize]); + tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode], + max_txsize_lookup[bsize]); } else { tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; } diff --git a/vp10/encoder/encodemv.c b/vp10/encoder/encodemv.c index b67b93e..ecd5a38 100644 --- a/vp10/encoder/encodemv.c +++ b/vp10/encoder/encodemv.c @@ -216,8 +216,8 @@ void vp10_encode_mv(VP10_COMP* cpi, vpx_writer* w, // If auto_mv_step_size is enabled then keep track of the largest // motion vector component used. if (cpi->sf.mv.auto_mv_step_size) { - unsigned int maxv = MAX(abs(mv->row), abs(mv->col)) >> 3; - cpi->max_mv_magnitude = MAX(maxv, cpi->max_mv_magnitude); + unsigned int maxv = VPXMAX(abs(mv->row), abs(mv->col)) >> 3; + cpi->max_mv_magnitude = VPXMAX(maxv, cpi->max_mv_magnitude); } } diff --git a/vp10/encoder/encoder.c b/vp10/encoder/encoder.c index ee71ecf..33f060f 100644 --- a/vp10/encoder/encoder.c +++ b/vp10/encoder/encoder.c @@ -1492,8 +1492,8 @@ void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) { // Under a configuration change, where maximum_buffer_size may change, // keep buffer level clipped to the maximum allowed buffer size. - rc->bits_off_target = MIN(rc->bits_off_target, rc->maximum_buffer_size); - rc->buffer_level = MIN(rc->buffer_level, rc->maximum_buffer_size); + rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size); + rc->buffer_level = VPXMIN(rc->buffer_level, rc->maximum_buffer_size); // Set up frame rate and related parameters rate control values. vp10_new_framerate(cpi, cpi->framerate); @@ -2615,7 +2615,7 @@ static int scale_down(VP10_COMP *cpi, int q) { if (rc->frame_size_selector == UNSCALED && q >= rc->rf_level_maxq[gf_group->rf_level[gf_group->index]]) { const int max_size_thresh = (int)(rate_thresh_mult[SCALE_STEP1] - * MAX(rc->this_frame_target, rc->avg_frame_bandwidth)); + * VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth)); scale = rc->projected_frame_size > max_size_thresh ? 1 : 0; } return scale; @@ -2998,7 +2998,7 @@ static void output_frame_level_debug_stats(VP10_COMP *cpi) { static void set_mv_search_params(VP10_COMP *cpi) { const VP10_COMMON *const cm = &cpi->common; - const unsigned int max_mv_def = MIN(cm->width, cm->height); + const unsigned int max_mv_def = VPXMIN(cm->width, cm->height); // Default based on max resolution. cpi->mv_step_param = vp10_init_search_range(max_mv_def); @@ -3013,8 +3013,8 @@ static void set_mv_search_params(VP10_COMP *cpi) { // Allow mv_steps to correspond to twice the max mv magnitude found // in the previous frame, capped by the default max_mv_magnitude based // on resolution. - cpi->mv_step_param = - vp10_init_search_range(MIN(max_mv_def, 2 * cpi->max_mv_magnitude)); + cpi->mv_step_param = vp10_init_search_range( + VPXMIN(max_mv_def, 2 * cpi->max_mv_magnitude)); } cpi->max_mv_magnitude = 0; } @@ -3386,7 +3386,7 @@ static void encode_with_recode_loop(VP10_COMP *cpi, // Adjust Q q = (int)((q * high_err_target) / kf_err); - q = MIN(q, (q_high + q_low) >> 1); + q = VPXMIN(q, (q_high + q_low) >> 1); } else if (kf_err < low_err_target && rc->projected_frame_size >= frame_under_shoot_limit) { // The key frame is much better than the previous frame @@ -3395,7 +3395,7 @@ static void encode_with_recode_loop(VP10_COMP *cpi, // Adjust Q q = (int)((q * low_err_target) / kf_err); - q = MIN(q, (q_high + q_low + 1) >> 1); + q = VPXMIN(q, (q_high + q_low + 1) >> 1); } // Clamp Q to upper and lower limits: @@ -3404,7 +3404,7 @@ static void encode_with_recode_loop(VP10_COMP *cpi, loop = q != last_q; } else if (recode_loop_test( cpi, frame_over_shoot_limit, frame_under_shoot_limit, - q, MAX(q_high, top_index), bottom_index)) { + q, VPXMAX(q_high, top_index), bottom_index)) { // Is the projected frame size out of range and are we allowed // to attempt to recode. int last_q = q; @@ -3446,12 +3446,12 @@ static void encode_with_recode_loop(VP10_COMP *cpi, vp10_rc_update_rate_correction_factors(cpi); q = vp10_rc_regulate_q(cpi, rc->this_frame_target, - bottom_index, MAX(q_high, top_index)); + bottom_index, VPXMAX(q_high, top_index)); while (q < q_low && retries < 10) { vp10_rc_update_rate_correction_factors(cpi); q = vp10_rc_regulate_q(cpi, rc->this_frame_target, - bottom_index, MAX(q_high, top_index)); + bottom_index, VPXMAX(q_high, top_index)); retries++; } } @@ -4030,8 +4030,8 @@ static void adjust_frame_rate(VP10_COMP *cpi, // Average this frame's rate into the last second's average // frame rate. If we haven't seen 1 second yet, then average // over the whole interval seen. - const double interval = MIN((double)(source->ts_end - - cpi->first_time_stamp_ever), 10000000.0); + const double interval = VPXMIN( + (double)(source->ts_end - cpi->first_time_stamp_ever), 10000000.0); double avg_duration = 10000000.0 / cpi->framerate; avg_duration *= (interval - avg_duration + this_duration); avg_duration /= interval; @@ -4095,7 +4095,7 @@ static void adjust_image_stat(double y, double u, double v, double all, s->stat[U] += u; s->stat[V] += v; s->stat[ALL] += all; - s->worst = MIN(s->worst, all); + s->worst = VPXMIN(s->worst, all); } #endif // CONFIG_INTERNAL_STATS @@ -4425,7 +4425,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags, frame_ssim2 = vpx_calc_ssim(orig, recon, &weight); #endif // CONFIG_VP9_HIGHBITDEPTH - cpi->worst_ssim= MIN(cpi->worst_ssim, frame_ssim2); + cpi->worst_ssim= VPXMIN(cpi->worst_ssim, frame_ssim2); cpi->summed_quality += frame_ssim2 * weight; cpi->summed_weights += weight; @@ -4462,7 +4462,8 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags, cpi->Source->y_buffer, cpi->Source->y_stride, cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride, cpi->Source->y_width, cpi->Source->y_height); - cpi->worst_blockiness = MAX(cpi->worst_blockiness, frame_blockiness); + cpi->worst_blockiness = + VPXMAX(cpi->worst_blockiness, frame_blockiness); cpi->total_blockiness += frame_blockiness; } } @@ -4482,8 +4483,8 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags, double consistency = vpx_sse_to_psnr(samples, peak, (double)cpi->total_inconsistency); if (consistency > 0.0) - cpi->worst_consistency = MIN(cpi->worst_consistency, - consistency); + cpi->worst_consistency = + VPXMIN(cpi->worst_consistency, consistency); cpi->total_inconsistency += this_inconsistency; } } diff --git a/vp10/encoder/ethread.c b/vp10/encoder/ethread.c index 4356dff..6ba0572 100644 --- a/vp10/encoder/ethread.c +++ b/vp10/encoder/ethread.c @@ -67,7 +67,7 @@ void vp10_encode_tiles_mt(VP10_COMP *cpi) { VP10_COMMON *const cm = &cpi->common; const int tile_cols = 1 << cm->log2_tile_cols; const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); - const int num_workers = MIN(cpi->oxcf.max_threads, tile_cols); + const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols); int i; vp10_init_tile_data(cpi); @@ -80,7 +80,7 @@ void vp10_encode_tiles_mt(VP10_COMP *cpi) { // resolution. if (cpi->use_svc) { int max_tile_cols = get_max_tile_cols(cpi); - allocated_workers = MIN(cpi->oxcf.max_threads, max_tile_cols); + allocated_workers = VPXMIN(cpi->oxcf.max_threads, max_tile_cols); } CHECK_MEM_ERROR(cm, cpi->workers, diff --git a/vp10/encoder/extend.c b/vp10/encoder/extend.c index 1cac5ac..ffd992f 100644 --- a/vp10/encoder/extend.c +++ b/vp10/encoder/extend.c @@ -111,10 +111,12 @@ void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src, // Motion estimation may use src block variance with the block size up // to 64x64, so the right and bottom need to be extended to 64 multiple // or up to 16, whichever is greater. - const int er_y = MAX(src->y_width + 16, ALIGN_POWER_OF_TWO(src->y_width, 6)) - - src->y_crop_width; - const int eb_y = MAX(src->y_height + 16, ALIGN_POWER_OF_TWO(src->y_height, 6)) - - src->y_crop_height; + const int er_y = + VPXMAX(src->y_width + 16, ALIGN_POWER_OF_TWO(src->y_width, 6)) - + src->y_crop_width; + const int eb_y = + VPXMAX(src->y_height + 16, ALIGN_POWER_OF_TWO(src->y_height, 6)) - + src->y_crop_height; const int uv_width_subsampling = (src->uv_width != src->y_width); const int uv_height_subsampling = (src->uv_height != src->y_height); const int et_uv = et_y >> uv_height_subsampling; diff --git a/vp10/encoder/firstpass.c b/vp10/encoder/firstpass.c index 328ef4b..a49468e 100644 --- a/vp10/encoder/firstpass.c +++ b/vp10/encoder/firstpass.c @@ -383,7 +383,7 @@ static unsigned int highbd_get_prediction_error(BLOCK_SIZE bsize, // for first pass test. static int get_search_range(const VP10_COMP *cpi) { int sr = 0; - const int dim = MIN(cpi->initial_width, cpi->initial_height); + const int dim = VPXMIN(cpi->initial_width, cpi->initial_height); while ((dim << sr) < MAX_FULL_PEL_VAL) ++sr; @@ -1026,7 +1026,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) { // Exclude any image dead zone if (image_data_start_row > 0) { intra_skip_count = - MAX(0, intra_skip_count - (image_data_start_row * cm->mb_cols * 2)); + VPXMAX(0, intra_skip_count - (image_data_start_row * cm->mb_cols * 2)); } { @@ -1163,7 +1163,7 @@ static double calc_correction_factor(double err_per_mb, // Adjustment based on actual quantizer to power term. const double power_term = - MIN(vp10_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high); + VPXMIN(vp10_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high); // Calculate correction factor. if (power_term < 1.0) @@ -1192,7 +1192,7 @@ static int get_twopass_worst_quality(const VP10_COMP *cpi, } else { const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs : cpi->common.MBs; - const int active_mbs = MAX(1, num_mbs - (int)(num_mbs * inactive_zone)); + const int active_mbs = VPXMAX(1, num_mbs - (int)(num_mbs * inactive_zone)); const double av_err_per_mb = section_err / active_mbs; const double speed_term = 1.0 + 0.04 * oxcf->speed; const double ediv_size_correction = (double)num_mbs / EDIV_SIZE_FACTOR; @@ -1225,7 +1225,7 @@ static int get_twopass_worst_quality(const VP10_COMP *cpi, // Restriction on active max q for constrained quality mode. if (cpi->oxcf.rc_mode == VPX_CQ) - q = MAX(q, oxcf->cq_level); + q = VPXMAX(q, oxcf->cq_level); return q; } } @@ -1235,7 +1235,7 @@ static void setup_rf_level_maxq(VP10_COMP *cpi) { RATE_CONTROL *const rc = &cpi->rc; for (i = INTER_NORMAL; i < RATE_FACTOR_LEVELS; ++i) { int qdelta = vp10_frame_type_qdelta(cpi, i, rc->worst_quality); - rc->rf_level_maxq[i] = MAX(rc->worst_quality + qdelta, rc->best_quality); + rc->rf_level_maxq[i] = VPXMAX(rc->worst_quality + qdelta, rc->best_quality); } } @@ -1366,12 +1366,12 @@ static double get_sr_decay_rate(const VP10_COMP *cpi, if ((sr_diff > LOW_SR_DIFF_TRHESH)) { - sr_diff = MIN(sr_diff, SR_DIFF_MAX); + sr_diff = VPXMIN(sr_diff, SR_DIFF_MAX); sr_decay = 1.0 - (SR_DIFF_PART * sr_diff) - (MOTION_AMP_PART * motion_amplitude_factor) - (INTRA_PART * modified_pcnt_intra); } - return MAX(sr_decay, MIN(DEFAULT_DECAY_LIMIT, modified_pct_inter)); + return VPXMAX(sr_decay, VPXMIN(DEFAULT_DECAY_LIMIT, modified_pct_inter)); } // This function gives an estimate of how badly we believe the prediction @@ -1381,7 +1381,7 @@ static double get_zero_motion_factor(const VP10_COMP *cpi, const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion; double sr_decay = get_sr_decay_rate(cpi, frame); - return MIN(sr_decay, zero_motion_pct); + return VPXMIN(sr_decay, zero_motion_pct); } #define ZM_POWER_FACTOR 0.75 @@ -1393,8 +1393,8 @@ static double get_prediction_decay_rate(const VP10_COMP *cpi, (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion), ZM_POWER_FACTOR)); - return MAX(zero_motion_factor, - (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor))); + return VPXMAX(zero_motion_factor, + (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor))); } // Function to test for a condition where a complex transition is followed @@ -1485,12 +1485,12 @@ static double calc_frame_boost(VP10_COMP *cpi, const double lq = vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth); - const double boost_q_correction = MIN((0.5 + (lq * 0.015)), 1.5); + const double boost_q_correction = VPXMIN((0.5 + (lq * 0.015)), 1.5); int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs : cpi->common.MBs; // Correct for any inactive region in the image - num_mbs = (int)MAX(1, num_mbs * calculate_active_area(cpi, this_frame)); + num_mbs = (int)VPXMAX(1, num_mbs * calculate_active_area(cpi, this_frame)); // Underlying boost factor is based on inter error ratio. frame_boost = (BASELINE_ERR_PER_MB * num_mbs) / @@ -1506,7 +1506,7 @@ static double calc_frame_boost(VP10_COMP *cpi, else frame_boost += frame_boost * (this_frame_mv_in_out / 2.0); - return MIN(frame_boost, max_boost * boost_q_correction); + return VPXMIN(frame_boost, max_boost * boost_q_correction); } static int calc_arf_boost(VP10_COMP *cpi, int offset, @@ -1595,7 +1595,7 @@ static int calc_arf_boost(VP10_COMP *cpi, int offset, arf_boost = (*f_boost + *b_boost); if (arf_boost < ((b_frames + f_frames) * 20)) arf_boost = ((b_frames + f_frames) * 20); - arf_boost = MAX(arf_boost, MIN_ARF_GF_BOOST); + arf_boost = VPXMAX(arf_boost, MIN_ARF_GF_BOOST); return arf_boost; } @@ -1666,7 +1666,8 @@ static int calculate_boost_bits(int frame_count, } // Calculate the number of extra bits for use in the boosted frame or frames. - return MAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks), 0); + return VPXMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks), + 0); } // Current limit on maximum number of active arfs in a GF/ARF group. @@ -1805,7 +1806,7 @@ static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits, gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[arf_idx]; target_frame_size = clamp(target_frame_size, 0, - MIN(max_bits, (int)total_group_bits)); + VPXMIN(max_bits, (int)total_group_bits)); gf_group->update_type[frame_index] = LF_UPDATE; gf_group->rf_level[frame_index] = INTER_NORMAL; @@ -1926,7 +1927,7 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) { int int_lbq = (int)(vp10_convert_qindex_to_q(rc->last_boosted_qindex, cpi->common.bit_depth)); - active_min_gf_interval = rc->min_gf_interval + MIN(2, int_max_q / 200); + active_min_gf_interval = rc->min_gf_interval + VPXMIN(2, int_max_q / 200); if (active_min_gf_interval > rc->max_gf_interval) active_min_gf_interval = rc->max_gf_interval; @@ -1937,7 +1938,7 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) { // bits to spare and are better with a smaller interval and smaller boost. // At high Q when there are few bits to spare we are better with a longer // interval to spread the cost of the GF. - active_max_gf_interval = 12 + MIN(4, (int_lbq / 6)); + active_max_gf_interval = 12 + VPXMIN(4, (int_lbq / 6)); if (active_max_gf_interval < active_min_gf_interval) active_max_gf_interval = active_min_gf_interval; @@ -1982,8 +1983,8 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) { decay_accumulator = decay_accumulator * loop_decay_rate; // Monitor for static sections. - zero_motion_accumulator = - MIN(zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame)); + zero_motion_accumulator = VPXMIN( + zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame)); // Break clause to detect very still sections after motion. For example, // a static image after a fade or other transition. @@ -2039,7 +2040,7 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) { (cpi->multi_arf_allowed && (rc->baseline_gf_interval >= 6) && (zero_motion_accumulator < 0.995)) ? 1 : 0; } else { - rc->gfu_boost = MAX((int)boost_score, MIN_ARF_GF_BOOST); + rc->gfu_boost = VPXMAX((int)boost_score, MIN_ARF_GF_BOOST); rc->source_alt_ref_pending = 0; } @@ -2094,11 +2095,11 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) { // rc factor is a weight factor that corrects for local rate control drift. double rc_factor = 1.0; if (rc->rate_error_estimate > 0) { - rc_factor = MAX(RC_FACTOR_MIN, - (double)(100 - rc->rate_error_estimate) / 100.0); + rc_factor = VPXMAX(RC_FACTOR_MIN, + (double)(100 - rc->rate_error_estimate) / 100.0); } else { - rc_factor = MIN(RC_FACTOR_MAX, - (double)(100 - rc->rate_error_estimate) / 100.0); + rc_factor = VPXMIN(RC_FACTOR_MAX, + (double)(100 - rc->rate_error_estimate) / 100.0); } tmp_q = get_twopass_worst_quality(cpi, group_av_err, @@ -2106,7 +2107,7 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) { vbr_group_bits_per_frame, twopass->kfgroup_inter_fraction * rc_factor); twopass->active_worst_quality = - MAX(tmp_q, twopass->active_worst_quality >> 1); + VPXMAX(tmp_q, twopass->active_worst_quality >> 1); } #endif @@ -2423,7 +2424,7 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) { } else { twopass->kf_group_bits = 0; } - twopass->kf_group_bits = MAX(0, twopass->kf_group_bits); + twopass->kf_group_bits = VPXMAX(0, twopass->kf_group_bits); // Reset the first pass file position. reset_fpf_position(twopass, start_position); @@ -2437,9 +2438,8 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) { break; // Monitor for static sections. - zero_motion_accumulator = - MIN(zero_motion_accumulator, - get_zero_motion_factor(cpi, &next_frame)); + zero_motion_accumulator = VPXMIN( + zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame)); // Not all frames in the group are necessarily used in calculating boost. if ((i <= rc->max_gf_interval) || @@ -2452,7 +2452,7 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) { const double loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame); decay_accumulator *= loop_decay_rate; - decay_accumulator = MAX(decay_accumulator, MIN_DECAY_FACTOR); + decay_accumulator = VPXMAX(decay_accumulator, MIN_DECAY_FACTOR); av_decay_accumulator += decay_accumulator; ++loop_decay_counter; } @@ -2473,8 +2473,8 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) { // Apply various clamps for min and max boost rc->kf_boost = (int)(av_decay_accumulator * boost_score); - rc->kf_boost = MAX(rc->kf_boost, (rc->frames_to_key * 3)); - rc->kf_boost = MAX(rc->kf_boost, MIN_KF_BOOST); + rc->kf_boost = VPXMAX(rc->kf_boost, (rc->frames_to_key * 3)); + rc->kf_boost = VPXMAX(rc->kf_boost, MIN_KF_BOOST); // Work out how many bits to allocate for the key frame itself. kf_bits = calculate_boost_bits((rc->frames_to_key - 1), @@ -2772,7 +2772,7 @@ void vp10_twopass_postencode_update(VP10_COMP *cpi) { // is designed to prevent extreme behaviour at the end of a clip // or group of frames. rc->vbr_bits_off_target += rc->base_frame_target - rc->projected_frame_size; - twopass->bits_left = MAX(twopass->bits_left - bits_used, 0); + twopass->bits_left = VPXMAX(twopass->bits_left - bits_used, 0); // Calculate the pct rc error. if (rc->total_actual_bits) { @@ -2788,7 +2788,7 @@ void vp10_twopass_postencode_update(VP10_COMP *cpi) { twopass->kf_group_bits -= bits_used; twopass->last_kfgroup_zeromotion_pct = twopass->kf_zeromotion_pct; } - twopass->kf_group_bits = MAX(twopass->kf_group_bits, 0); + twopass->kf_group_bits = VPXMAX(twopass->kf_group_bits, 0); // Increment the gf group index ready for the next frame. ++twopass->gf_group.index; @@ -2838,18 +2838,18 @@ void vp10_twopass_postencode_update(VP10_COMP *cpi) { rc->vbr_bits_off_target_fast += fast_extra_thresh - rc->projected_frame_size; rc->vbr_bits_off_target_fast = - MIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth)); + VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth)); // Fast adaptation of minQ if necessary to use up the extra bits. if (rc->avg_frame_bandwidth) { twopass->extend_minq_fast = (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth); } - twopass->extend_minq_fast = MIN(twopass->extend_minq_fast, - minq_adj_limit - twopass->extend_minq); + twopass->extend_minq_fast = VPXMIN( + twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq); } else if (rc->vbr_bits_off_target_fast) { - twopass->extend_minq_fast = MIN(twopass->extend_minq_fast, - minq_adj_limit - twopass->extend_minq); + twopass->extend_minq_fast = VPXMIN( + twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq); } else { twopass->extend_minq_fast = 0; } diff --git a/vp10/encoder/mbgraph.c b/vp10/encoder/mbgraph.c index 6149670..02ba0e5 100644 --- a/vp10/encoder/mbgraph.c +++ b/vp10/encoder/mbgraph.c @@ -41,7 +41,7 @@ static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi, // Further step/diamond searches as necessary int step_param = mv_sf->reduce_first_step_size; - step_param = MIN(step_param, MAX_MVSEARCH_STEPS - 2); + step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2); vp10_set_mv_search_range(x, ref_mv); diff --git a/vp10/encoder/mcomp.c b/vp10/encoder/mcomp.c index 5720c63..1cfe471 100644 --- a/vp10/encoder/mcomp.c +++ b/vp10/encoder/mcomp.c @@ -37,10 +37,10 @@ void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv) { int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL; int row_max = (mv->row >> 3) + MAX_FULL_PEL_VAL; - col_min = MAX(col_min, (MV_LOW >> 3) + 1); - row_min = MAX(row_min, (MV_LOW >> 3) + 1); - col_max = MIN(col_max, (MV_UPP >> 3) - 1); - row_max = MIN(row_max, (MV_UPP >> 3) - 1); + col_min = VPXMAX(col_min, (MV_LOW >> 3) + 1); + row_min = VPXMAX(row_min, (MV_LOW >> 3) + 1); + col_max = VPXMIN(col_max, (MV_UPP >> 3) - 1); + row_max = VPXMIN(row_max, (MV_UPP >> 3) - 1); // Get intersection of UMV window and valid MV window to reduce # of checks // in diamond search. @@ -57,12 +57,12 @@ void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv) { int vp10_init_search_range(int size) { int sr = 0; // Minimum search size no matter what the passed in value. - size = MAX(16, size); + size = VPXMAX(16, size); while ((size << sr) < MAX_FULL_PEL_VAL) sr++; - sr = MIN(sr, MAX_MVSEARCH_STEPS - 2); + sr = VPXMIN(sr, MAX_MVSEARCH_STEPS - 2); return sr; } @@ -297,10 +297,10 @@ static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) { int br = bestmv->row * 8; \ int bc = bestmv->col * 8; \ int hstep = 4; \ - const int minc = MAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \ - const int maxc = MIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \ - const int minr = MAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \ - const int maxr = MIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \ + const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \ + const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \ + const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \ + const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \ int tr = br; \ int tc = bc; \ \ @@ -668,10 +668,10 @@ int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x, int bc = bestmv->col * 8; int hstep = 4; int iter, round = 3 - forced_stop; - const int minc = MAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); - const int maxc = MIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); - const int minr = MAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); - const int maxr = MIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); + const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); + const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); + const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); + const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); int tr = br; int tc = bc; const MV *search_step = search_step_table; @@ -1500,9 +1500,9 @@ int vp10_fast_hex_search(const MACROBLOCK *x, int use_mvcost, const MV *center_mv, MV *best_mv) { - return vp10_hex_search(x, ref_mv, MAX(MAX_MVSEARCH_STEPS - 2, search_param), - sad_per_bit, do_init_search, cost_list, vfp, use_mvcost, - center_mv, best_mv); + return vp10_hex_search( + x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit, + do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv); } int vp10_fast_dia_search(const MACROBLOCK *x, @@ -1515,9 +1515,9 @@ int vp10_fast_dia_search(const MACROBLOCK *x, int use_mvcost, const MV *center_mv, MV *best_mv) { - return vp10_bigdia_search(x, ref_mv, MAX(MAX_MVSEARCH_STEPS - 2, search_param), - sad_per_bit, do_init_search, cost_list, vfp, - use_mvcost, center_mv, best_mv); + return vp10_bigdia_search( + x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit, + do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv); } #undef CHECK_BETTER @@ -1547,10 +1547,10 @@ int vp10_full_range_search_c(const MACROBLOCK *x, best_sad = fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), in_what->stride) + mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); - start_row = MAX(-range, x->mv_row_min - ref_mv->row); - start_col = MAX(-range, x->mv_col_min - ref_mv->col); - end_row = MIN(range, x->mv_row_max - ref_mv->row); - end_col = MIN(range, x->mv_col_max - ref_mv->col); + start_row = VPXMAX(-range, x->mv_row_min - ref_mv->row); + start_col = VPXMAX(-range, x->mv_col_min - ref_mv->col); + end_row = VPXMIN(range, x->mv_row_max - ref_mv->row); + end_col = VPXMIN(range, x->mv_col_max - ref_mv->col); for (r = start_row; r <= end_row; ++r) { for (c = start_col; c <= end_col; c += 4) { @@ -2021,10 +2021,10 @@ int vp10_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv, const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const int row_min = MAX(ref_mv->row - distance, x->mv_row_min); - const int row_max = MIN(ref_mv->row + distance, x->mv_row_max); - const int col_min = MAX(ref_mv->col - distance, x->mv_col_min); - const int col_max = MIN(ref_mv->col + distance, x->mv_col_max); + const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min); + const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max); + const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min); + const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max); const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; int best_sad = fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), in_what->stride) + @@ -2054,10 +2054,10 @@ int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv, const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const int row_min = MAX(ref_mv->row - distance, x->mv_row_min); - const int row_max = MIN(ref_mv->row + distance, x->mv_row_max); - const int col_min = MAX(ref_mv->col - distance, x->mv_col_min); - const int col_max = MIN(ref_mv->col + distance, x->mv_col_max); + const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min); + const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max); + const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min); + const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max); const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), in_what->stride) + @@ -2119,10 +2119,10 @@ int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv, const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const int row_min = MAX(ref_mv->row - distance, x->mv_row_min); - const int row_max = MIN(ref_mv->row + distance, x->mv_row_max); - const int col_min = MAX(ref_mv->col - distance, x->mv_col_min); - const int col_max = MIN(ref_mv->col + distance, x->mv_col_max); + const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min); + const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max); + const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min); + const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max); const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), in_what->stride) + diff --git a/vp10/encoder/picklpf.c b/vp10/encoder/picklpf.c index 81c428d..ff0f387 100644 --- a/vp10/encoder/picklpf.c +++ b/vp10/encoder/picklpf.c @@ -92,8 +92,8 @@ static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi, ss_err[filt_mid] = best_err; while (filter_step > 0) { - const int filt_high = MIN(filt_mid + filter_step, max_filter_level); - const int filt_low = MAX(filt_mid - filter_step, min_filter_level); + const int filt_high = VPXMIN(filt_mid + filter_step, max_filter_level); + const int filt_low = VPXMAX(filt_mid - filter_step, min_filter_level); // Bias against raising loop filter in favor of lowering it. int64_t bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; diff --git a/vp10/encoder/ratectrl.c b/vp10/encoder/ratectrl.c index c40d30c..477a3f5 100644 --- a/vp10/encoder/ratectrl.c +++ b/vp10/encoder/ratectrl.c @@ -106,8 +106,7 @@ static int kf_low = 400; static int get_minq_index(double maxq, double x3, double x2, double x1, vpx_bit_depth_t bit_depth) { int i; - const double minqtarget = MIN(((x3 * maxq + x2) * maxq + x1) * maxq, - maxq); + const double minqtarget = VPXMIN(((x3 * maxq + x2) * maxq + x1) * maxq, maxq); // Special case handling to deal with the step from q2.0 // down to lossless mode represented by q 1.0. @@ -192,15 +191,15 @@ int vp10_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs, vpx_bit_depth_t bit_depth) { const int bpm = (int)(vp10_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth)); - return MAX(FRAME_OVERHEAD_BITS, - (int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS); + return VPXMAX(FRAME_OVERHEAD_BITS, + (int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS); } int vp10_rc_clamp_pframe_target_size(const VP10_COMP *const cpi, int target) { const RATE_CONTROL *rc = &cpi->rc; const VP10EncoderConfig *oxcf = &cpi->oxcf; - const int min_frame_target = MAX(rc->min_frame_bandwidth, - rc->avg_frame_bandwidth >> 5); + const int min_frame_target = VPXMAX(rc->min_frame_bandwidth, + rc->avg_frame_bandwidth >> 5); if (target < min_frame_target) target = min_frame_target; if (cpi->refresh_golden_frame && rc->is_src_frame_alt_ref) { @@ -216,7 +215,7 @@ int vp10_rc_clamp_pframe_target_size(const VP10_COMP *const cpi, int target) { if (oxcf->rc_max_inter_bitrate_pct) { const int max_rate = rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100; - target = MIN(target, max_rate); + target = VPXMIN(target, max_rate); } return target; } @@ -227,7 +226,7 @@ int vp10_rc_clamp_iframe_target_size(const VP10_COMP *const cpi, int target) { if (oxcf->rc_max_intra_bitrate_pct) { const int max_rate = rc->avg_frame_bandwidth * oxcf->rc_max_intra_bitrate_pct / 100; - target = MIN(target, max_rate); + target = VPXMIN(target, max_rate); } if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth; @@ -250,7 +249,8 @@ static void update_layer_buffer_level(SVC *svc, int encoded_frame_size) { lrc->bits_off_target += bits_off_for_this_layer; // Clip buffer level to maximum buffer size for the layer. - lrc->bits_off_target = MIN(lrc->bits_off_target, lrc->maximum_buffer_size); + lrc->bits_off_target = + VPXMIN(lrc->bits_off_target, lrc->maximum_buffer_size); lrc->buffer_level = lrc->bits_off_target; } } @@ -268,7 +268,7 @@ static void update_buffer_level(VP10_COMP *cpi, int encoded_frame_size) { } // Clip the buffer level to the maximum specified buffer size. - rc->bits_off_target = MIN(rc->bits_off_target, rc->maximum_buffer_size); + rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size); rc->buffer_level = rc->bits_off_target; if (is_one_pass_cbr_svc(cpi)) { @@ -287,8 +287,8 @@ int vp10_rc_get_default_min_gf_interval( if (factor <= factor_safe) return default_interval; else - return MAX(default_interval, - (int)(MIN_GF_INTERVAL * factor / factor_safe + 0.5)); + return VPXMAX(default_interval, + (int)(MIN_GF_INTERVAL * factor / factor_safe + 0.5)); // Note this logic makes: // 4K24: 5 // 4K30: 6 @@ -296,9 +296,9 @@ int vp10_rc_get_default_min_gf_interval( } int vp10_rc_get_default_max_gf_interval(double framerate, int min_gf_interval) { - int interval = MIN(MAX_GF_INTERVAL, (int)(framerate * 0.75)); + int interval = VPXMIN(MAX_GF_INTERVAL, (int)(framerate * 0.75)); interval += (interval & 0x01); // Round to even value - return MAX(interval, min_gf_interval); + return VPXMAX(interval, min_gf_interval); } void vp10_rc_init(const VP10EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) { @@ -478,7 +478,7 @@ void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) { // More heavily damped adjustment used if we have been oscillating either side // of target. adjustment_limit = 0.25 + - 0.5 * MIN(1, fabs(log10(0.01 * correction_factor))); + 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor))); cpi->rc.q_2_frame = cpi->rc.q_1_frame; cpi->rc.q_1_frame = cm->base_qindex; @@ -558,8 +558,8 @@ int vp10_rc_regulate_q(const VP10_COMP *cpi, int target_bits_per_frame, if (cpi->oxcf.rc_mode == VPX_CBR && (cpi->rc.rc_1_frame * cpi->rc.rc_2_frame == -1) && cpi->rc.q_1_frame != cpi->rc.q_2_frame) { - q = clamp(q, MIN(cpi->rc.q_1_frame, cpi->rc.q_2_frame), - MAX(cpi->rc.q_1_frame, cpi->rc.q_2_frame)); + q = clamp(q, VPXMIN(cpi->rc.q_1_frame, cpi->rc.q_2_frame), + VPXMAX(cpi->rc.q_1_frame, cpi->rc.q_2_frame)); } return q; } @@ -617,7 +617,7 @@ static int calc_active_worst_quality_one_pass_vbr(const VP10_COMP *cpi) { : rc->last_q[INTER_FRAME] * 2; } } - return MIN(active_worst_quality, rc->worst_quality); + return VPXMIN(active_worst_quality, rc->worst_quality); } // Adjust active_worst_quality level based on buffer level. @@ -643,10 +643,10 @@ static int calc_active_worst_quality_one_pass_cbr(const VP10_COMP *cpi) { // So for first few frames following key, the qp of that key frame is weighted // into the active_worst_quality setting. ambient_qp = (cm->current_video_frame < 5) ? - MIN(rc->avg_frame_qindex[INTER_FRAME], rc->avg_frame_qindex[KEY_FRAME]) : - rc->avg_frame_qindex[INTER_FRAME]; - active_worst_quality = MIN(rc->worst_quality, - ambient_qp * 5 / 4); + VPXMIN(rc->avg_frame_qindex[INTER_FRAME], + rc->avg_frame_qindex[KEY_FRAME]) : + rc->avg_frame_qindex[INTER_FRAME]; + active_worst_quality = VPXMIN(rc->worst_quality, ambient_qp * 5 / 4); if (rc->buffer_level > rc->optimal_buffer_level) { // Adjust down. // Maximum limit for down adjustment, ~30%. @@ -699,7 +699,7 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi, int delta_qindex = vp10_compute_qdelta(rc, last_boosted_q, (last_boosted_q * 0.75), cm->bit_depth); - active_best_quality = MAX(qindex + delta_qindex, rc->best_quality); + active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); } else if (cm->current_video_frame > 0) { // not first frame of one pass and kf_boost is set double q_adj_factor = 1.0; @@ -833,7 +833,7 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi, int delta_qindex = vp10_compute_qdelta(rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth); - active_best_quality = MAX(qindex + delta_qindex, rc->best_quality); + active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); } else { // not first frame of one pass and kf_boost is set double q_adj_factor = 1.0; @@ -1002,21 +1002,21 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi, int qindex; if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) { - qindex = MIN(rc->last_kf_qindex, rc->last_boosted_qindex); + qindex = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex); active_best_quality = qindex; last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth); delta_qindex = vp10_compute_qdelta(rc, last_boosted_q, last_boosted_q * 1.25, cm->bit_depth); - active_worst_quality = MIN(qindex + delta_qindex, active_worst_quality); - + active_worst_quality = + VPXMIN(qindex + delta_qindex, active_worst_quality); } else { qindex = rc->last_boosted_qindex; last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth); delta_qindex = vp10_compute_qdelta(rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth); - active_best_quality = MAX(qindex + delta_qindex, rc->best_quality); + active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); } } else { // Not forced keyframe. @@ -1116,8 +1116,8 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi, (cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) { int qdelta = vp10_frame_type_qdelta(cpi, gf_group->rf_level[gf_group->index], active_worst_quality); - active_worst_quality = MAX(active_worst_quality + qdelta, - active_best_quality); + active_worst_quality = VPXMAX(active_worst_quality + qdelta, + active_best_quality); } #endif @@ -1126,7 +1126,8 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi, int qdelta = vp10_compute_qdelta_by_rate(rc, cm->frame_type, active_best_quality, 2.0, cm->bit_depth); - active_best_quality = MAX(active_best_quality + qdelta, rc->best_quality); + active_best_quality = + VPXMAX(active_best_quality + qdelta, rc->best_quality); } active_best_quality = clamp(active_best_quality, @@ -1141,7 +1142,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi, rc->this_key_frame_forced) { // If static since last kf use better of last boosted and last kf q. if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) { - q = MIN(rc->last_kf_qindex, rc->last_boosted_qindex); + q = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex); } else { q = rc->last_boosted_qindex; } @@ -1195,9 +1196,9 @@ void vp10_rc_compute_frame_size_bounds(const VP10_COMP *cpi, // For very small rate targets where the fractional adjustment // may be tiny make sure there is at least a minimum range. const int tolerance = (cpi->sf.recode_tolerance * frame_target) / 100; - *frame_under_shoot_limit = MAX(frame_target - tolerance - 200, 0); - *frame_over_shoot_limit = MIN(frame_target + tolerance + 200, - cpi->rc.max_frame_bandwidth); + *frame_under_shoot_limit = VPXMAX(frame_target - tolerance - 200, 0); + *frame_over_shoot_limit = VPXMIN(frame_target + tolerance + 200, + cpi->rc.max_frame_bandwidth); } } @@ -1436,7 +1437,8 @@ static int calc_pframe_target_size_one_pass_cbr(const VP10_COMP *cpi) { const SVC *const svc = &cpi->svc; const int64_t diff = rc->optimal_buffer_level - rc->buffer_level; const int64_t one_pct_bits = 1 + rc->optimal_buffer_level / 100; - int min_frame_target = MAX(rc->avg_frame_bandwidth >> 4, FRAME_OVERHEAD_BITS); + int min_frame_target = + VPXMAX(rc->avg_frame_bandwidth >> 4, FRAME_OVERHEAD_BITS); int target; if (oxcf->gf_cbr_boost_pct) { @@ -1458,23 +1460,24 @@ static int calc_pframe_target_size_one_pass_cbr(const VP10_COMP *cpi) { svc->temporal_layer_id, svc->number_temporal_layers); const LAYER_CONTEXT *lc = &svc->layer_context[layer]; target = lc->avg_frame_size; - min_frame_target = MAX(lc->avg_frame_size >> 4, FRAME_OVERHEAD_BITS); + min_frame_target = VPXMAX(lc->avg_frame_size >> 4, FRAME_OVERHEAD_BITS); } if (diff > 0) { // Lower the target bandwidth for this frame. - const int pct_low = (int)MIN(diff / one_pct_bits, oxcf->under_shoot_pct); + const int pct_low = (int)VPXMIN(diff / one_pct_bits, oxcf->under_shoot_pct); target -= (target * pct_low) / 200; } else if (diff < 0) { // Increase the target bandwidth for this frame. - const int pct_high = (int)MIN(-diff / one_pct_bits, oxcf->over_shoot_pct); + const int pct_high = + (int)VPXMIN(-diff / one_pct_bits, oxcf->over_shoot_pct); target += (target * pct_high) / 200; } if (oxcf->rc_max_inter_bitrate_pct) { const int max_rate = rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100; - target = MIN(target, max_rate); + target = VPXMIN(target, max_rate); } - return MAX(min_frame_target, target); + return VPXMAX(min_frame_target, target); } static int calc_iframe_target_size_one_pass_cbr(const VP10_COMP *cpi) { @@ -1496,7 +1499,7 @@ static int calc_iframe_target_size_one_pass_cbr(const VP10_COMP *cpi) { const LAYER_CONTEXT *lc = &svc->layer_context[layer]; framerate = lc->framerate; } - kf_boost = MAX(kf_boost, (int)(2 * framerate - 16)); + kf_boost = VPXMAX(kf_boost, (int)(2 * framerate - 16)); if (rc->frames_since_key < framerate / 2) { kf_boost = (int)(kf_boost * rc->frames_since_key / (framerate / 2)); @@ -1704,7 +1707,7 @@ void vp10_rc_set_gf_interval_range(const VP10_COMP *const cpi, rc->max_gf_interval = rc->static_scene_max_gf_interval; // Clamp min to max - rc->min_gf_interval = MIN(rc->min_gf_interval, rc->max_gf_interval); + rc->min_gf_interval = VPXMIN(rc->min_gf_interval, rc->max_gf_interval); } void vp10_rc_update_framerate(VP10_COMP *cpi) { @@ -1717,7 +1720,8 @@ void vp10_rc_update_framerate(VP10_COMP *cpi) { rc->min_frame_bandwidth = (int)(rc->avg_frame_bandwidth * oxcf->two_pass_vbrmin_section / 100); - rc->min_frame_bandwidth = MAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS); + rc->min_frame_bandwidth = + VPXMAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS); // A maximum bitrate for a frame is defined. // The baseline for this aligns with HW implementations that @@ -1728,8 +1732,8 @@ void vp10_rc_update_framerate(VP10_COMP *cpi) { // specifies lossless encode. vbr_max_bits = (int)(((int64_t)rc->avg_frame_bandwidth * oxcf->two_pass_vbrmax_section) / 100); - rc->max_frame_bandwidth = MAX(MAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), - vbr_max_bits); + rc->max_frame_bandwidth = + VPXMAX(VPXMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits); vp10_rc_set_gf_interval_range(cpi, rc); } @@ -1767,12 +1771,12 @@ static void vbr_rate_correction(VP10_COMP *cpi, int *this_frame_target) { // Dont do it for kf,arf,gf or overlay frames. if (!frame_is_kf_gf_arf(cpi) && !rc->is_src_frame_alt_ref && rc->vbr_bits_off_target_fast) { - int one_frame_bits = MAX(rc->avg_frame_bandwidth, *this_frame_target); + int one_frame_bits = VPXMAX(rc->avg_frame_bandwidth, *this_frame_target); int fast_extra_bits; - fast_extra_bits = - (int)MIN(rc->vbr_bits_off_target_fast, one_frame_bits); - fast_extra_bits = (int)MIN(fast_extra_bits, - MAX(one_frame_bits / 8, rc->vbr_bits_off_target_fast / 8)); + fast_extra_bits = (int)VPXMIN(rc->vbr_bits_off_target_fast, one_frame_bits); + fast_extra_bits = (int)VPXMIN( + fast_extra_bits, + VPXMAX(one_frame_bits / 8, rc->vbr_bits_off_target_fast / 8)); *this_frame_target += (int)fast_extra_bits; rc->vbr_bits_off_target_fast -= fast_extra_bits; } diff --git a/vp10/encoder/rd.c b/vp10/encoder/rd.c index 425a10e..00e7a94 100644 --- a/vp10/encoder/rd.c +++ b/vp10/encoder/rd.c @@ -172,7 +172,7 @@ int vp10_compute_rd_mult(const VP10_COMP *cpi, int qindex) { if (cpi->oxcf.pass == 2 && (cpi->common.frame_type != KEY_FRAME)) { const GF_GROUP *const gf_group = &cpi->twopass.gf_group; const FRAME_UPDATE_TYPE frame_type = gf_group->update_type[gf_group->index]; - const int boost_index = MIN(15, (cpi->rc.gfu_boost / 100)); + const int boost_index = VPXMIN(15, (cpi->rc.gfu_boost / 100)); rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7; rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7); @@ -204,7 +204,7 @@ static int compute_rd_thresh_factor(int qindex, vpx_bit_depth_t bit_depth) { q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; #endif // CONFIG_VP9_HIGHBITDEPTH // TODO(debargha): Adjust the function below. - return MAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8); + return VPXMAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8); } void vp10_initialize_me_consts(VP10_COMP *cpi, MACROBLOCK *x, int qindex) { @@ -400,7 +400,7 @@ void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2, static const uint32_t MAX_XSQ_Q10 = 245727; const uint64_t xsq_q10_64 = (((uint64_t)qstep * qstep << (n_log2 + 10)) + (var >> 1)) / var; - const int xsq_q10 = (int)MIN(xsq_q10_64, MAX_XSQ_Q10); + const int xsq_q10 = (int)VPXMIN(xsq_q10_64, MAX_XSQ_Q10); model_rd_norm(xsq_q10, &r_q10, &d_q10); *rate = ((r_q10 << n_log2) + 2) >> 2; *dist = (var * (int64_t)d_q10 + 512) >> 10; @@ -481,7 +481,7 @@ void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x, continue; fp_row = (this_mv->row + 3 + (this_mv->row >= 0)) >> 3; fp_col = (this_mv->col + 3 + (this_mv->col >= 0)) >> 3; - max_mv = MAX(max_mv, MAX(abs(this_mv->row), abs(this_mv->col)) >> 3); + max_mv = VPXMAX(max_mv, VPXMAX(abs(this_mv->row), abs(this_mv->col)) >> 3); if (fp_row ==0 && fp_col == 0 && zero_seen) continue; @@ -626,16 +626,15 @@ void vp10_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh, const int top_mode = bsize < BLOCK_8X8 ? MAX_REFS : MAX_MODES; int mode; for (mode = 0; mode < top_mode; ++mode) { - const BLOCK_SIZE min_size = MAX(bsize - 1, BLOCK_4X4); - const BLOCK_SIZE max_size = MIN(bsize + 2, BLOCK_64X64); + const BLOCK_SIZE min_size = VPXMAX(bsize - 1, BLOCK_4X4); + const BLOCK_SIZE max_size = VPXMIN(bsize + 2, BLOCK_64X64); BLOCK_SIZE bs; for (bs = min_size; bs <= max_size; ++bs) { int *const fact = &factor_buf[bs][mode]; if (mode == best_mode_index) { *fact -= (*fact >> 4); } else { - *fact = MIN(*fact + RD_THRESH_INC, - rd_thresh * RD_THRESH_MAX_FACT); + *fact = VPXMIN(*fact + RD_THRESH_INC, rd_thresh * RD_THRESH_MAX_FACT); } } } diff --git a/vp10/encoder/rdopt.c b/vp10/encoder/rdopt.c index 62b585a..90a7aa3 100644 --- a/vp10/encoder/rdopt.c +++ b/vp10/encoder/rdopt.c @@ -192,8 +192,8 @@ static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize, const int64_t ac_thr = p->quant_thred[1] >> shift; // The low thresholds are used to measure if the prediction errors are // low enough so that we can skip the mode search. - const int64_t low_dc_thr = MIN(50, dc_thr >> 2); - const int64_t low_ac_thr = MIN(80, ac_thr >> 2); + const int64_t low_dc_thr = VPXMIN(50, dc_thr >> 2); + const int64_t low_ac_thr = VPXMIN(80, ac_thr >> 2); int bw = 1 << (b_width_log2_lookup[bs] - b_width_log2_lookup[unit_size]); int bh = 1 << (b_height_log2_lookup[bs] - b_width_log2_lookup[unit_size]); int idx, idy; @@ -505,7 +505,7 @@ static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, if (tx_size != TX_32X32) dc_correct >>= 2; - dist = MAX(0, sse - dc_correct); + dist = VPXMAX(0, sse - dc_correct); } } else { // SKIP_TXFM_AC_DC @@ -531,7 +531,7 @@ static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse); // TODO(jingning): temporarily enabled only for luma component - rd = MIN(rd1, rd2); + rd = VPXMIN(rd1, rd2); if (plane == 0) x->zcoeff_blk[tx_size][block] = !x->plane[plane].eobs[block] || (rd1 > rd2 && !xd->lossless); @@ -599,7 +599,7 @@ static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - mbmi->tx_size = MIN(max_tx_size, largest_tx_size); + mbmi->tx_size = VPXMIN(max_tx_size, largest_tx_size); txfm_rd_in_plane(x, rate, distortion, skip, sse, ref_best_rd, 0, bs, @@ -639,8 +639,8 @@ static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x, start_tx = max_tx_size; end_tx = 0; } else { - TX_SIZE chosen_tx_size = MIN(max_tx_size, - tx_mode_to_biggest_tx_size[cm->tx_mode]); + TX_SIZE chosen_tx_size = VPXMIN(max_tx_size, + tx_mode_to_biggest_tx_size[cm->tx_mode]); start_tx = chosen_tx_size; end_tx = chosen_tx_size; } @@ -1389,7 +1389,7 @@ static int64_t encode_inter_mb_segment(VP10_COMP *cpi, cpi->sf.use_fast_coef_costing); rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2); rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2); - rd = MIN(rd1, rd2); + rd = VPXMIN(rd1, rd2); if (rd >= best_yrd) return INT64_MAX; } @@ -1808,7 +1808,8 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x, if (i == 0) max_mv = x->max_mv_context[mbmi->ref_frame[0]]; else - max_mv = MAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3; + max_mv = + VPXMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3; if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) { // Take wtd average of the step_params based on the last frame's @@ -1826,7 +1827,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP10_COMP *cpi, MACROBLOCK *x, if (cpi->sf.adaptive_motion_search) { mvp_full.row = x->pred_mv[mbmi->ref_frame[0]].row >> 3; mvp_full.col = x->pred_mv[mbmi->ref_frame[0]].col >> 3; - step_param = MAX(step_param, 8); + step_param = VPXMAX(step_param, 8); } // adjust src pointer for this block @@ -2231,7 +2232,7 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x, vp10_set_mv_search_range(x, &ref_mv); // Work out the size of the first step in the mv step search. - // 0 here is maximum length first step. 1 is MAX >> 1 etc. + // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc. if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) { // Take wtd average of the step_params based on the last frame's // max mv magnitude and that based on the best ref mvs of the current @@ -2243,9 +2244,10 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x, } if (cpi->sf.adaptive_motion_search && bsize < BLOCK_64X64) { - int boffset = 2 * (b_width_log2_lookup[BLOCK_64X64] - - MIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize])); - step_param = MAX(step_param, boffset); + int boffset = + 2 * (b_width_log2_lookup[BLOCK_64X64] - + VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize])); + step_param = VPXMAX(step_param, boffset); } if (cpi->sf.adaptive_motion_search) { @@ -2466,7 +2468,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x, // motion field, where the distortion gain for a single block may not // be enough to overcome the cost of a new mv. if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) { - *rate2 += MAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1); + *rate2 += VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1); } else { *rate2 += rate_mv; } @@ -2503,10 +2505,10 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x, // initiation of a motion field. if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv, refs[0])) { - *rate2 += MIN(cost_mv_ref(cpi, this_mode, - mbmi_ext->mode_context[refs[0]]), - cost_mv_ref(cpi, NEARESTMV, - mbmi_ext->mode_context[refs[0]])); + *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, + mbmi_ext->mode_context[refs[0]]), + cost_mv_ref(cpi, NEARESTMV, + mbmi_ext->mode_context[refs[0]])); } else { *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]); } @@ -2548,10 +2550,10 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x, rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum); filter_cache[i] = rd; filter_cache[SWITCHABLE_FILTERS] = - MIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); + VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); if (cm->interp_filter == SWITCHABLE) rd += rs_rd; - *mask_filter = MAX(*mask_filter, rd); + *mask_filter = VPXMAX(*mask_filter, rd); } else { int rate_sum = 0; int64_t dist_sum = 0; @@ -2581,10 +2583,10 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x, rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum); filter_cache[i] = rd; filter_cache[SWITCHABLE_FILTERS] = - MIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); + VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); if (cm->interp_filter == SWITCHABLE) rd += rs_rd; - *mask_filter = MAX(*mask_filter, rd); + *mask_filter = VPXMAX(*mask_filter, rd); if (i == 0 && intpel_mv) { tmp_rate_sum = rate_sum; @@ -2695,7 +2697,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x, *distortion += distortion_y; rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion); - rdcosty = MIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse)); + rdcosty = VPXMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse)); if (!super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv, &sseuv, bsize, ref_best_rd - rdcosty)) { @@ -2760,7 +2762,7 @@ void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x, pd[1].subsampling_x, pd[1].subsampling_y); rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, - &dist_uv, &uv_skip, MAX(BLOCK_8X8, bsize), + &dist_uv, &uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size); if (y_skip && uv_skip) { @@ -2827,12 +2829,12 @@ static void rd_variance_adjustment(VP10_COMP *cpi, // to a predictor with a low spatial complexity compared to the source. if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) && (source_variance > recon_variance)) { - var_factor = MIN(absvar_diff, MIN(VLOW_ADJ_MAX, var_error)); + var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error)); // A second possible case of interest is where the source variance // is very low and we wish to discourage false texture or motion trails. } else if ((source_variance < (LOW_VAR_THRESH >> 1)) && (recon_variance > source_variance)) { - var_factor = MIN(absvar_diff, MIN(VHIGH_ADJ_MAX, var_error)); + var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error)); } *this_rd += (*this_rd * var_factor) / 100; } @@ -2862,7 +2864,7 @@ int vp10_active_h_edge(VP10_COMP *cpi, int mi_row, int mi_step) { top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2); bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2); - bottom_edge = MAX(top_edge, bottom_edge); + bottom_edge = VPXMAX(top_edge, bottom_edge); } if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) || @@ -2889,7 +2891,7 @@ int vp10_active_v_edge(VP10_COMP *cpi, int mi_col, int mi_step) { left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2); right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2); - right_edge = MAX(left_edge, right_edge); + right_edge = VPXMAX(left_edge, right_edge); } if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) || @@ -3136,7 +3138,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, } if ((ref_frame_skip_mask[0] & (1 << ref_frame)) && - (ref_frame_skip_mask[1] & (1 << MAX(0, second_ref_frame)))) + (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame)))) continue; if (mode_skip_mask[ref_frame] & (1 << this_mode)) @@ -3150,10 +3152,10 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, continue; if (sf->motion_field_mode_search) { - const int mi_width = MIN(num_8x8_blocks_wide_lookup[bsize], - tile_info->mi_col_end - mi_col); - const int mi_height = MIN(num_8x8_blocks_high_lookup[bsize], - tile_info->mi_row_end - mi_row); + const int mi_width = VPXMIN(num_8x8_blocks_wide_lookup[bsize], + tile_info->mi_col_end - mi_col); + const int mi_height = VPXMIN(num_8x8_blocks_high_lookup[bsize], + tile_info->mi_row_end - mi_row); const int bsl = mi_width_log2_lookup[bsize]; int cb_partition_search_ctrl = (((mi_row + mi_col) >> bsl) + get_chessboard_index(cm->current_video_frame)) & 0x1; @@ -3371,9 +3373,9 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, if (!disable_skip && ref_frame == INTRA_FRAME) { for (i = 0; i < REFERENCE_MODES; ++i) - best_pred_rd[i] = MIN(best_pred_rd[i], this_rd); + best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd); for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) - best_filter_rd[i] = MIN(best_filter_rd[i], this_rd); + best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd); } // Did this mode help.. i.e. is it the new best mode @@ -3472,7 +3474,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, adj_rd = filter_cache[i] - ref; adj_rd += this_rd; - best_filter_rd[i] = MIN(best_filter_rd[i], adj_rd); + best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd); } } } @@ -3815,7 +3817,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, } if ((ref_frame_skip_mask[0] & (1 << ref_frame)) && - (ref_frame_skip_mask[1] & (1 << MAX(0, second_ref_frame)))) + (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame)))) continue; // Test best rd so far against threshold for trying this mode. @@ -3970,12 +3972,11 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0); filter_cache[switchable_filter_index] = tmp_rd; filter_cache[SWITCHABLE_FILTERS] = - MIN(filter_cache[SWITCHABLE_FILTERS], - tmp_rd + rs_rd); + VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd); if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd; - mask_filter = MAX(mask_filter, tmp_rd); + mask_filter = VPXMAX(mask_filter, tmp_rd); newbest = (tmp_rd < tmp_best_rd); if (newbest) { @@ -4053,8 +4054,8 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred); tmp_best_rdu = best_rd - - MIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2), - RDCOST(x->rdmult, x->rddiv, 0, total_sse)); + VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2), + RDCOST(x->rdmult, x->rddiv, 0, total_sse)); if (tmp_best_rdu > 0) { // If even the 'Y' rd value of split is higher than best so far @@ -4114,9 +4115,9 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, if (!disable_skip && ref_frame == INTRA_FRAME) { for (i = 0; i < REFERENCE_MODES; ++i) - best_pred_rd[i] = MIN(best_pred_rd[i], this_rd); + best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd); for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) - best_filter_rd[i] = MIN(best_filter_rd[i], this_rd); + best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd); } // Did this mode help.. i.e. is it the new best mode @@ -4215,7 +4216,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, adj_rd = filter_cache[i] - ref; adj_rd += this_rd; - best_filter_rd[i] = MIN(best_filter_rd[i], adj_rd); + best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd); } } diff --git a/vp10/encoder/speed_features.c b/vp10/encoder/speed_features.c index b3a617a..3fb56dd 100644 --- a/vp10/encoder/speed_features.c +++ b/vp10/encoder/speed_features.c @@ -49,7 +49,7 @@ static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi, VP10_COMMON *const cm = &cpi->common; if (speed >= 1) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; sf->partition_search_breakout_dist_thr = (1 << 23); @@ -60,7 +60,7 @@ static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi, } if (speed >= 2) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; sf->adaptive_pred_interp_filter = 0; @@ -75,7 +75,7 @@ static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi, } if (speed >= 3) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->disable_split_mask = DISABLE_ALL_SPLIT; sf->schedule_mode_search = cm->base_qindex < 220 ? 1 : 0; sf->partition_search_breakout_dist_thr = (1 << 25); @@ -99,7 +99,7 @@ static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi, } if (speed >= 4) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->partition_search_breakout_dist_thr = (1 << 26); } else { sf->partition_search_breakout_dist_thr = (1 << 24); @@ -215,7 +215,7 @@ static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi, VP10_COMMON *const cm = &cpi->common; if (speed >= 1) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; } else { @@ -224,7 +224,7 @@ static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi, } if (speed >= 2) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; } else { @@ -233,7 +233,7 @@ static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi, } if (speed >= 5) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->partition_search_breakout_dist_thr = (1 << 25); } else { sf->partition_search_breakout_dist_thr = (1 << 23); @@ -241,7 +241,7 @@ static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi, } if (speed >= 7) { - sf->encode_breakout_thresh = (MIN(cm->width, cm->height) >= 720) ? + sf->encode_breakout_thresh = (VPXMIN(cm->width, cm->height) >= 720) ? 800 : 300; } } diff --git a/vp10/encoder/svc_layercontext.c b/vp10/encoder/svc_layercontext.c index 6c73256..49d46ee 100644 --- a/vp10/encoder/svc_layercontext.c +++ b/vp10/encoder/svc_layercontext.c @@ -141,8 +141,8 @@ void vp10_update_layer_context_change_config(VP10_COMP *const cpi, lrc->maximum_buffer_size = (int64_t)(rc->maximum_buffer_size * bitrate_alloc); lrc->bits_off_target = - MIN(lrc->bits_off_target, lrc->maximum_buffer_size); - lrc->buffer_level = MIN(lrc->buffer_level, lrc->maximum_buffer_size); + VPXMIN(lrc->bits_off_target, lrc->maximum_buffer_size); + lrc->buffer_level = VPXMIN(lrc->buffer_level, lrc->maximum_buffer_size); lc->framerate = cpi->framerate / oxcf->ts_rate_decimator[tl]; lrc->avg_frame_bandwidth = (int)(lc->target_bandwidth / lc->framerate); lrc->max_frame_bandwidth = rc->max_frame_bandwidth; @@ -173,9 +173,9 @@ void vp10_update_layer_context_change_config(VP10_COMP *const cpi, (int64_t)(rc->optimal_buffer_level * bitrate_alloc); lrc->maximum_buffer_size = (int64_t)(rc->maximum_buffer_size * bitrate_alloc); - lrc->bits_off_target = MIN(lrc->bits_off_target, - lrc->maximum_buffer_size); - lrc->buffer_level = MIN(lrc->buffer_level, lrc->maximum_buffer_size); + lrc->bits_off_target = VPXMIN(lrc->bits_off_target, + lrc->maximum_buffer_size); + lrc->buffer_level = VPXMIN(lrc->buffer_level, lrc->maximum_buffer_size); // Update framerate-related quantities. if (svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) { lc->framerate = cpi->framerate / oxcf->ts_rate_decimator[layer]; diff --git a/vp10/encoder/temporal_filter.c b/vp10/encoder/temporal_filter.c index 1341dc4..dba7891 100644 --- a/vp10/encoder/temporal_filter.c +++ b/vp10/encoder/temporal_filter.c @@ -242,7 +242,7 @@ static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi, xd->plane[0].pre[0].stride = stride; step_param = mv_sf->reduce_first_step_size; - step_param = MIN(step_param, MAX_MVSEARCH_STEPS - 2); + step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2); // Ignore mv costing by sending NULL pointer instead of cost arrays vp10_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1, diff --git a/vp10/vp10_dx_iface.c b/vp10/vp10_dx_iface.c index 1848103..0ff64df 100644 --- a/vp10/vp10_dx_iface.c +++ b/vp10/vp10_dx_iface.c @@ -183,7 +183,7 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data, si->w = si->h = 0; if (decrypt_cb) { - data_sz = MIN(sizeof(clear_buffer), data_sz); + data_sz = VPXMIN(sizeof(clear_buffer), data_sz); decrypt_cb(decrypt_state, data, clear_buffer, data_sz); data = clear_buffer; } diff --git a/vp9/common/vp9_blockd.h b/vp9/common/vp9_blockd.h index d776b44..b757c01 100644 --- a/vp9/common/vp9_blockd.h +++ b/vp9/common/vp9_blockd.h @@ -235,7 +235,7 @@ static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize, return TX_4X4; } else { const BLOCK_SIZE plane_bsize = ss_size_lookup[bsize][xss][yss]; - return MIN(y_tx_size, max_txsize_lookup[plane_bsize]); + return VPXMIN(y_tx_size, max_txsize_lookup[plane_bsize]); } } diff --git a/vp9/common/vp9_common_data.c b/vp9/common/vp9_common_data.c index 0bf7cbc..ca7f4ad 100644 --- a/vp9/common/vp9_common_data.c +++ b/vp9/common/vp9_common_data.c @@ -27,7 +27,7 @@ const uint8_t num_8x8_blocks_wide_lookup[BLOCK_SIZES] = const uint8_t num_8x8_blocks_high_lookup[BLOCK_SIZES] = {1, 1, 1, 1, 2, 1, 2, 4, 2, 4, 8, 4, 8}; -// MIN(3, MIN(b_width_log2(bsize), b_height_log2(bsize))) +// VPXMIN(3, VPXMIN(b_width_log2(bsize), b_height_log2(bsize))) const uint8_t size_group_lookup[BLOCK_SIZES] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3}; diff --git a/vp9/common/vp9_loopfilter.c b/vp9/common/vp9_loopfilter.c index 0915918..08d55c6 100644 --- a/vp9/common/vp9_loopfilter.c +++ b/vp9/common/vp9_loopfilter.c @@ -1588,7 +1588,7 @@ void vp9_loop_filter_frame(YV12_BUFFER_CONFIG *frame, if (partial_frame && cm->mi_rows > 8) { start_mi_row = cm->mi_rows >> 1; start_mi_row &= 0xfffffff8; - mi_rows_to_filter = MAX(cm->mi_rows / 8, 8); + mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8); } end_mi_row = start_mi_row + mi_rows_to_filter; vp9_loop_filter_frame_init(cm, frame_filter_level); diff --git a/vp9/common/vp9_postproc.c b/vp9/common/vp9_postproc.c index 71ab861..6c87cee 100644 --- a/vp9/common/vp9_postproc.c +++ b/vp9/common/vp9_postproc.c @@ -625,7 +625,7 @@ static void swap_mi_and_prev_mi(VP9_COMMON *cm) { int vp9_post_proc_frame(struct VP9Common *cm, YV12_BUFFER_CONFIG *dest, vp9_ppflags_t *ppflags) { - const int q = MIN(105, cm->lf.filter_level * 2); + const int q = VPXMIN(105, cm->lf.filter_level * 2); const int flags = ppflags->post_proc_flag; YV12_BUFFER_CONFIG *const ppbuf = &cm->post_proc_buffer; struct postproc_state *const ppstate = &cm->postproc_state; diff --git a/vp9/common/vp9_pred_common.h b/vp9/common/vp9_pred_common.h index 67b95db..d56eacf 100644 --- a/vp9/common/vp9_pred_common.h +++ b/vp9/common/vp9_pred_common.h @@ -24,14 +24,14 @@ static INLINE int get_segment_id(const VP9_COMMON *cm, const int mi_offset = mi_row * cm->mi_cols + mi_col; const int bw = num_8x8_blocks_wide_lookup[bsize]; const int bh = num_8x8_blocks_high_lookup[bsize]; - const int xmis = MIN(cm->mi_cols - mi_col, bw); - const int ymis = MIN(cm->mi_rows - mi_row, bh); + const int xmis = VPXMIN(cm->mi_cols - mi_col, bw); + const int ymis = VPXMIN(cm->mi_rows - mi_row, bh); int x, y, segment_id = MAX_SEGMENTS; for (y = 0; y < ymis; ++y) for (x = 0; x < xmis; ++x) - segment_id = MIN(segment_id, - segment_ids[mi_offset + y * cm->mi_cols + x]); + segment_id = + VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]); assert(segment_id >= 0 && segment_id < MAX_SEGMENTS); return segment_id; diff --git a/vp9/common/vp9_thread_common.c b/vp9/common/vp9_thread_common.c index 6b11c93..2e6285a 100644 --- a/vp9/common/vp9_thread_common.c +++ b/vp9/common/vp9_thread_common.c @@ -165,7 +165,7 @@ static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, // Decoder may allocate more threads than number of tiles based on user's // input. const int tile_cols = 1 << cm->log2_tile_cols; - const int num_workers = MIN(nworkers, tile_cols); + const int num_workers = VPXMIN(nworkers, tile_cols); int i; if (!lf_sync->sync_range || sb_rows != lf_sync->rows || @@ -229,7 +229,7 @@ void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, if (partial_frame && cm->mi_rows > 8) { start_mi_row = cm->mi_rows >> 1; start_mi_row &= 0xfffffff8; - mi_rows_to_filter = MAX(cm->mi_rows / 8, 8); + mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8); } end_mi_row = start_mi_row + mi_rows_to_filter; vp9_loop_filter_frame_init(cm, frame_filter_level); diff --git a/vp9/common/vp9_tile_common.c b/vp9/common/vp9_tile_common.c index 7a20e0a..f276412 100644 --- a/vp9/common/vp9_tile_common.c +++ b/vp9/common/vp9_tile_common.c @@ -18,7 +18,7 @@ static int get_tile_offset(int idx, int mis, int log2) { const int sb_cols = mi_cols_aligned_to_sb(mis) >> MI_BLOCK_SIZE_LOG2; const int offset = ((idx * sb_cols) >> log2) << MI_BLOCK_SIZE_LOG2; - return MIN(offset, mis); + return VPXMIN(offset, mis); } void vp9_tile_set_row(TileInfo *tile, const VP9_COMMON *cm, int row) { diff --git a/vp9/decoder/vp9_decodeframe.c b/vp9/decoder/vp9_decodeframe.c index fb7b3b8..f9f991d 100644 --- a/vp9/decoder/vp9_decodeframe.c +++ b/vp9/decoder/vp9_decodeframe.c @@ -658,7 +658,7 @@ static void dec_build_inter_predictors(VP9Decoder *const pbi, MACROBLOCKD *xd, // pixels of each superblock row can be changed by next superblock row. if (pbi->frame_parallel_decode) vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf, - MAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1)); + VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1)); // Skip border extension if block is inside the frame. if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 || @@ -686,7 +686,7 @@ static void dec_build_inter_predictors(VP9Decoder *const pbi, MACROBLOCKD *xd, if (pbi->frame_parallel_decode) { const int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS; vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf, - MAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1)); + VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1)); } } #if CONFIG_VP9_HIGHBITDEPTH @@ -757,8 +757,8 @@ static void dec_build_inter_predictors_sb(VP9Decoder *const pbi, static INLINE TX_SIZE dec_get_uv_tx_size(const MB_MODE_INFO *mbmi, int n4_wl, int n4_hl) { // get minimum log2 num4x4s dimension - const int x = MIN(n4_wl, n4_hl); - return MIN(mbmi->tx_size, x); + const int x = VPXMIN(n4_wl, n4_hl); + return VPXMIN(mbmi->tx_size, x); } static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) { @@ -819,8 +819,8 @@ static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd, const int less8x8 = bsize < BLOCK_8X8; const int bw = 1 << (bwl - 1); const int bh = 1 << (bhl - 1); - const int x_mis = MIN(bw, cm->mi_cols - mi_col); - const int y_mis = MIN(bh, cm->mi_rows - mi_row); + const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col); + const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row); MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis, bwl, bhl); @@ -1603,7 +1603,7 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols); const int tile_cols = 1 << cm->log2_tile_cols; const int tile_rows = 1 << cm->log2_tile_rows; - const int num_workers = MIN(pbi->max_threads & ~1, tile_cols); + const int num_workers = VPXMIN(pbi->max_threads & ~1, tile_cols); TileBuffer tile_buffers[1][1 << 6]; int n; int final_worker = -1; @@ -1670,7 +1670,7 @@ static const uint8_t *decode_tiles_mt(VP9Decoder *pbi, int group_start = 0; while (group_start < tile_cols) { const TileBuffer largest = tile_buffers[0][group_start]; - const int group_end = MIN(group_start + num_workers, tile_cols) - 1; + const int group_end = VPXMIN(group_start + num_workers, tile_cols) - 1; memmove(tile_buffers[0] + group_start, tile_buffers[0] + group_start + 1, (group_end - group_start) * sizeof(tile_buffers[0][0])); tile_buffers[0][group_end] = largest; @@ -2102,7 +2102,7 @@ static struct vpx_read_bit_buffer *init_read_bit_buffer( rb->error_handler = error_handler; rb->error_handler_data = &pbi->common; if (pbi->decrypt_cb) { - const int n = (int)MIN(MAX_VP9_HEADER_SIZE, data_end - data); + const int n = (int)VPXMIN(MAX_VP9_HEADER_SIZE, data_end - data); pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n); rb->bit_buffer = clear_data; rb->bit_buffer_end = clear_data + n; diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c index 33818a9..9db88a4 100644 --- a/vp9/decoder/vp9_decodemv.c +++ b/vp9/decoder/vp9_decodemv.c @@ -87,7 +87,7 @@ static TX_SIZE read_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd, if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_8X8) return read_selected_tx_size(cm, xd, max_tx_size, r); else - return MIN(max_tx_size, tx_mode_to_biggest_tx_size[tx_mode]); + return VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[tx_mode]); } static int dec_get_segment_id(const VP9_COMMON *cm, const uint8_t *segment_ids, @@ -96,8 +96,8 @@ static int dec_get_segment_id(const VP9_COMMON *cm, const uint8_t *segment_ids, for (y = 0; y < y_mis; y++) for (x = 0; x < x_mis; x++) - segment_id = MIN(segment_id, - segment_ids[mi_offset + y * cm->mi_cols + x]); + segment_id = + VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]); assert(segment_id >= 0 && segment_id < MAX_SEGMENTS); return segment_id; @@ -156,8 +156,8 @@ static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd, const int bh = xd->plane[0].n4_h >> 1; // TODO(slavarnway): move x_mis, y_mis into xd ????? - const int x_mis = MIN(cm->mi_cols - mi_col, bw); - const int y_mis = MIN(cm->mi_rows - mi_row, bh); + const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw); + const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh); if (!seg->enabled) return 0; // Default for disabled segmentation @@ -212,8 +212,8 @@ static void read_intra_frame_mode_info(VP9_COMMON *const cm, const int bh = xd->plane[0].n4_h >> 1; // TODO(slavarnway): move x_mis, y_mis into xd ????? - const int x_mis = MIN(cm->mi_cols - mi_col, bw); - const int y_mis = MIN(cm->mi_rows - mi_row, bh); + const int x_mis = VPXMIN(cm->mi_cols - mi_col, bw); + const int y_mis = VPXMIN(cm->mi_rows - mi_row, bh); mbmi->segment_id = read_intra_segment_id(cm, mi_offset, x_mis, y_mis, r); mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r); diff --git a/vp9/encoder/vp9_aq_complexity.c b/vp9/encoder/vp9_aq_complexity.c index 15f227f..93ea270 100644 --- a/vp9/encoder/vp9_aq_complexity.c +++ b/vp9/encoder/vp9_aq_complexity.c @@ -117,8 +117,8 @@ void vp9_caq_select_segment(VP9_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs, const int mi_offset = mi_row * cm->mi_cols + mi_col; const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64]; const int bh = num_8x8_blocks_high_lookup[BLOCK_64X64]; - const int xmis = MIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[bs]); - const int ymis = MIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[bs]); + const int xmis = VPXMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[bs]); + const int ymis = VPXMIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[bs]); int x, y; int i; unsigned char segment; @@ -136,7 +136,7 @@ void vp9_caq_select_segment(VP9_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs, vpx_clear_system_state(); low_var_thresh = (cpi->oxcf.pass == 2) - ? MAX(cpi->twopass.mb_av_energy, MIN_DEFAULT_LV_THRESH) + ? VPXMAX(cpi->twopass.mb_av_energy, MIN_DEFAULT_LV_THRESH) : DEFAULT_LV_THRESH; vp9_setup_src_planes(mb, cpi->Source, mi_row, mi_col); diff --git a/vp9/encoder/vp9_aq_cyclicrefresh.c b/vp9/encoder/vp9_aq_cyclicrefresh.c index e6b3686..813c339 100644 --- a/vp9/encoder/vp9_aq_cyclicrefresh.c +++ b/vp9/encoder/vp9_aq_cyclicrefresh.c @@ -223,8 +223,8 @@ void vp9_cyclic_refresh_update_segment(VP9_COMP *const cpi, CYCLIC_REFRESH *const cr = cpi->cyclic_refresh; const int bw = num_8x8_blocks_wide_lookup[bsize]; const int bh = num_8x8_blocks_high_lookup[bsize]; - const int xmis = MIN(cm->mi_cols - mi_col, bw); - const int ymis = MIN(cm->mi_rows - mi_row, bh); + const int xmis = VPXMIN(cm->mi_cols - mi_col, bw); + const int ymis = VPXMIN(cm->mi_rows - mi_row, bh); const int block_index = mi_row * cm->mi_cols + mi_col; const int refresh_this_block = candidate_refresh_aq(cr, mbmi, rate, dist, bsize); @@ -416,10 +416,10 @@ static void cyclic_refresh_update_map(VP9_COMP *const cpi) { assert(mi_col >= 0 && mi_col < cm->mi_cols); bl_index = mi_row * cm->mi_cols + mi_col; // Loop through all 8x8 blocks in superblock and update map. - xmis = MIN(cm->mi_cols - mi_col, - num_8x8_blocks_wide_lookup[BLOCK_64X64]); - ymis = MIN(cm->mi_rows - mi_row, - num_8x8_blocks_high_lookup[BLOCK_64X64]); + xmis = + VPXMIN(cm->mi_cols - mi_col, num_8x8_blocks_wide_lookup[BLOCK_64X64]); + ymis = + VPXMIN(cm->mi_rows - mi_row, num_8x8_blocks_high_lookup[BLOCK_64X64]); for (y = 0; y < ymis; y++) { for (x = 0; x < xmis; x++) { const int bl_index2 = bl_index + y * cm->mi_cols + x; @@ -551,8 +551,9 @@ void vp9_cyclic_refresh_setup(VP9_COMP *const cpi) { // Set a more aggressive (higher) q delta for segment BOOST2. qindex_delta = compute_deltaq( - cpi, cm->base_qindex, MIN(CR_MAX_RATE_TARGET_RATIO, - 0.1 * cr->rate_boost_fac * cr->rate_ratio_qdelta)); + cpi, cm->base_qindex, + VPXMIN(CR_MAX_RATE_TARGET_RATIO, + 0.1 * cr->rate_boost_fac * cr->rate_ratio_qdelta)); cr->qindex_delta[2] = qindex_delta; vp9_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta); diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c index d0de095..502fcd5 100644 --- a/vp9/encoder/vp9_bitstream.c +++ b/vp9/encoder/vp9_bitstream.c @@ -815,7 +815,7 @@ static void encode_segmentation(VP9_COMMON *cm, MACROBLOCKD *xd, static void encode_txfm_probs(VP9_COMMON *cm, vpx_writer *w, FRAME_COUNTS *counts) { // Mode - vpx_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2); + vpx_write_literal(w, VPXMIN(cm->tx_mode, ALLOW_32X32), 2); if (cm->tx_mode >= ALLOW_32X32) vpx_write_bit(w, cm->tx_mode == TX_MODE_SELECT); diff --git a/vp9/encoder/vp9_denoiser.c b/vp9/encoder/vp9_denoiser.c index 5f99285..bf6c533 100644 --- a/vp9/encoder/vp9_denoiser.c +++ b/vp9/encoder/vp9_denoiser.c @@ -120,10 +120,10 @@ int vp9_denoiser_filter_c(const uint8_t *sig, int sig_stride, adj = adj_val[2]; } if (diff > 0) { - avg[c] = MIN(UINT8_MAX, sig[c] + adj); + avg[c] = VPXMIN(UINT8_MAX, sig[c] + adj); total_adj += adj; } else { - avg[c] = MAX(0, sig[c] - adj); + avg[c] = VPXMAX(0, sig[c] - adj); total_adj -= adj; } } @@ -160,13 +160,13 @@ int vp9_denoiser_filter_c(const uint8_t *sig, int sig_stride, // Diff positive means we made positive adjustment above // (in first try/attempt), so now make negative adjustment to bring // denoised signal down. - avg[c] = MAX(0, avg[c] - adj); + avg[c] = VPXMAX(0, avg[c] - adj); total_adj -= adj; } else { // Diff negative means we made negative adjustment above // (in first try/attempt), so now make positive adjustment to bring // denoised signal up. - avg[c] = MIN(UINT8_MAX, avg[c] + adj); + avg[c] = VPXMIN(UINT8_MAX, avg[c] + adj); total_adj += adj; } } diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index 295a751..d02d48c 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -979,8 +979,8 @@ static void update_state(VP9_COMP *cpi, ThreadData *td, const struct segmentation *const seg = &cm->seg; const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type]; const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type]; - const int x_mis = MIN(bw, cm->mi_cols - mi_col); - const int y_mis = MIN(bh, cm->mi_rows - mi_row); + const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col); + const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row); MV_REF *const frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col; int w, h; @@ -1132,8 +1132,8 @@ static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode, mbmi->sb_type = bsize; mbmi->mode = ZEROMV; - mbmi->tx_size = MIN(max_txsize_lookup[bsize], - tx_mode_to_biggest_tx_size[tx_mode]); + mbmi->tx_size = + VPXMIN(max_txsize_lookup[bsize], tx_mode_to_biggest_tx_size[tx_mode]); mbmi->skip = 1; mbmi->uv_mode = DC_PRED; mbmi->ref_frame[0] = LAST_FRAME; @@ -1496,7 +1496,7 @@ static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize, int rows_left, int cols_left, int *bh, int *bw) { if (rows_left <= 0 || cols_left <= 0) { - return MIN(bsize, BLOCK_8X8); + return VPXMIN(bsize, BLOCK_8X8); } else { for (; bsize > 0; bsize -= 3) { *bh = num_8x8_blocks_high_lookup[bsize]; @@ -1672,8 +1672,8 @@ static void update_state_rt(VP9_COMP *cpi, ThreadData *td, const struct segmentation *const seg = &cm->seg; const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type]; const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type]; - const int x_mis = MIN(bw, cm->mi_cols - mi_col); - const int y_mis = MIN(bh, cm->mi_rows - mi_row); + const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col); + const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row); *(xd->mi[0]) = ctx->mic; *(x->mbmi_ext) = ctx->mbmi_ext; @@ -1741,7 +1741,7 @@ static void encode_b_rt(VP9_COMP *cpi, ThreadData *td, if (cpi->oxcf.noise_sensitivity > 0 && output_enabled && cpi->common.frame_type != KEY_FRAME) { vp9_denoiser_denoise(&cpi->denoiser, x, mi_row, mi_col, - MAX(BLOCK_8X8, bsize), ctx); + VPXMAX(BLOCK_8X8, bsize), ctx); } #endif @@ -2133,8 +2133,8 @@ static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8, MODE_INFO *mi = mi_8x8[index+j]; BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0; bs_hist[sb_type]++; - *min_block_size = MIN(*min_block_size, sb_type); - *max_block_size = MAX(*max_block_size, sb_type); + *min_block_size = VPXMIN(*min_block_size, sb_type); + *max_block_size = VPXMAX(*max_block_size, sb_type); } index += xd->mi_stride; } @@ -2211,8 +2211,8 @@ static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile, if (vp9_active_edge_sb(cpi, mi_row, mi_col)) { min_size = BLOCK_4X4; } else { - min_size = MIN(cpi->sf.rd_auto_partition_min_limit, - MIN(min_size, max_size)); + min_size = + VPXMIN(cpi->sf.rd_auto_partition_min_limit, VPXMIN(min_size, max_size)); } // When use_square_partition_only is true, make sure at least one square @@ -2248,8 +2248,8 @@ static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd, for (idx = 0; idx < mi_width; ++idx) { mi = prev_mi[idy * cm->mi_stride + idx]; bs = mi ? mi->mbmi.sb_type : bsize; - min_size = MIN(min_size, bs); - max_size = MAX(max_size, bs); + min_size = VPXMIN(min_size, bs); + max_size = VPXMAX(max_size, bs); } } } @@ -2258,8 +2258,8 @@ static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd, for (idy = 0; idy < mi_height; ++idy) { mi = xd->mi[idy * cm->mi_stride - 1]; bs = mi ? mi->mbmi.sb_type : bsize; - min_size = MIN(min_size, bs); - max_size = MAX(max_size, bs); + min_size = VPXMIN(min_size, bs); + max_size = VPXMAX(max_size, bs); } } @@ -2267,8 +2267,8 @@ static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd, for (idx = 0; idx < mi_width; ++idx) { mi = xd->mi[idx - cm->mi_stride]; bs = mi ? mi->mbmi.sb_type : bsize; - min_size = MIN(min_size, bs); - max_size = MAX(max_size, bs); + min_size = VPXMIN(min_size, bs); + max_size = VPXMAX(max_size, bs); } } @@ -2433,9 +2433,9 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, int mb_row = mi_row >> 1; int mb_col = mi_col >> 1; int mb_row_end = - MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows); + VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows); int mb_col_end = - MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols); + VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols); int r, c; // compute a complexity measure, basically measure inconsistency of motion @@ -2524,9 +2524,9 @@ static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td, int mb_row = mi_row >> 1; int mb_col = mi_col >> 1; int mb_row_end = - MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows); + VPXMIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows); int mb_col_end = - MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols); + VPXMIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols); int r, c; int skip = 1; @@ -3646,7 +3646,7 @@ static int set_var_thresh_from_histogram(VP9_COMP *cpi) { const int last_stride = cpi->Last_Source->y_stride; // Pick cutoff threshold - const int cutoff = (MIN(cm->width, cm->height) >= 720) ? + const int cutoff = (VPXMIN(cm->width, cm->height) >= 720) ? (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) : (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100); DECLARE_ALIGNED(16, int, hist[VAR_HIST_BINS]); @@ -3947,7 +3947,7 @@ static void encode_frame_internal(VP9_COMP *cpi) { #endif // If allowed, encoding tiles in parallel with one thread handling one tile. - if (MIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1) + if (VPXMIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1) vp9_encode_tiles_mt(cpi); else encode_tiles(cpi); @@ -4162,10 +4162,10 @@ static void encode_superblock(VP9_COMP *cpi, ThreadData *td, int plane; mbmi->skip = 1; for (plane = 0; plane < MAX_MB_PLANE; ++plane) - vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane); + vp9_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane); if (output_enabled) sum_intra_stats(td->counts, mi); - vp9_tokenize_sb(cpi, td, t, !output_enabled, MAX(bsize, BLOCK_8X8)); + vp9_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8)); } else { int ref; const int is_compound = has_second_ref(mbmi); @@ -4178,12 +4178,14 @@ static void encode_superblock(VP9_COMP *cpi, ThreadData *td, &xd->block_refs[ref]->sf); } if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip) - vp9_build_inter_predictors_sby(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8)); + vp9_build_inter_predictors_sby(xd, mi_row, mi_col, + VPXMAX(bsize, BLOCK_8X8)); - vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8)); + vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, + VPXMAX(bsize, BLOCK_8X8)); - vp9_encode_sb(x, MAX(bsize, BLOCK_8X8)); - vp9_tokenize_sb(cpi, td, t, !output_enabled, MAX(bsize, BLOCK_8X8)); + vp9_encode_sb(x, VPXMAX(bsize, BLOCK_8X8)); + vp9_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8)); } if (output_enabled) { @@ -4197,8 +4199,8 @@ static void encode_superblock(VP9_COMP *cpi, ThreadData *td, TX_SIZE tx_size; // The new intra coding scheme requires no change of transform size if (is_inter_block(&mi->mbmi)) { - tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode], - max_txsize_lookup[bsize]); + tx_size = VPXMIN(tx_mode_to_biggest_tx_size[cm->tx_mode], + max_txsize_lookup[bsize]); } else { tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4; } diff --git a/vp9/encoder/vp9_encodemv.c b/vp9/encoder/vp9_encodemv.c index 7848c93..a4dee73 100644 --- a/vp9/encoder/vp9_encodemv.c +++ b/vp9/encoder/vp9_encodemv.c @@ -216,8 +216,8 @@ void vp9_encode_mv(VP9_COMP* cpi, vpx_writer* w, // If auto_mv_step_size is enabled then keep track of the largest // motion vector component used. if (cpi->sf.mv.auto_mv_step_size) { - unsigned int maxv = MAX(abs(mv->row), abs(mv->col)) >> 3; - cpi->max_mv_magnitude = MAX(maxv, cpi->max_mv_magnitude); + unsigned int maxv = VPXMAX(abs(mv->row), abs(mv->col)) >> 3; + cpi->max_mv_magnitude = VPXMAX(maxv, cpi->max_mv_magnitude); } } diff --git a/vp9/encoder/vp9_encoder.c b/vp9/encoder/vp9_encoder.c index 4654d63..d9ecb9a 100644 --- a/vp9/encoder/vp9_encoder.c +++ b/vp9/encoder/vp9_encoder.c @@ -1490,8 +1490,8 @@ void vp9_change_config(struct VP9_COMP *cpi, const VP9EncoderConfig *oxcf) { // Under a configuration change, where maximum_buffer_size may change, // keep buffer level clipped to the maximum allowed buffer size. - rc->bits_off_target = MIN(rc->bits_off_target, rc->maximum_buffer_size); - rc->buffer_level = MIN(rc->buffer_level, rc->maximum_buffer_size); + rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size); + rc->buffer_level = VPXMIN(rc->buffer_level, rc->maximum_buffer_size); // Set up frame rate and related parameters rate control values. vp9_new_framerate(cpi, cpi->framerate); @@ -2615,7 +2615,7 @@ static int scale_down(VP9_COMP *cpi, int q) { if (rc->frame_size_selector == UNSCALED && q >= rc->rf_level_maxq[gf_group->rf_level[gf_group->index]]) { const int max_size_thresh = (int)(rate_thresh_mult[SCALE_STEP1] - * MAX(rc->this_frame_target, rc->avg_frame_bandwidth)); + * VPXMAX(rc->this_frame_target, rc->avg_frame_bandwidth)); scale = rc->projected_frame_size > max_size_thresh ? 1 : 0; } return scale; @@ -2998,7 +2998,7 @@ static void output_frame_level_debug_stats(VP9_COMP *cpi) { static void set_mv_search_params(VP9_COMP *cpi) { const VP9_COMMON *const cm = &cpi->common; - const unsigned int max_mv_def = MIN(cm->width, cm->height); + const unsigned int max_mv_def = VPXMIN(cm->width, cm->height); // Default based on max resolution. cpi->mv_step_param = vp9_init_search_range(max_mv_def); @@ -3013,8 +3013,8 @@ static void set_mv_search_params(VP9_COMP *cpi) { // Allow mv_steps to correspond to twice the max mv magnitude found // in the previous frame, capped by the default max_mv_magnitude based // on resolution. - cpi->mv_step_param = - vp9_init_search_range(MIN(max_mv_def, 2 * cpi->max_mv_magnitude)); + cpi->mv_step_param = vp9_init_search_range( + VPXMIN(max_mv_def, 2 * cpi->max_mv_magnitude)); } cpi->max_mv_magnitude = 0; } @@ -3414,7 +3414,7 @@ static void encode_with_recode_loop(VP9_COMP *cpi, // Adjust Q q = (int)((q * high_err_target) / kf_err); - q = MIN(q, (q_high + q_low) >> 1); + q = VPXMIN(q, (q_high + q_low) >> 1); } else if (kf_err < low_err_target && rc->projected_frame_size >= frame_under_shoot_limit) { // The key frame is much better than the previous frame @@ -3423,7 +3423,7 @@ static void encode_with_recode_loop(VP9_COMP *cpi, // Adjust Q q = (int)((q * low_err_target) / kf_err); - q = MIN(q, (q_high + q_low + 1) >> 1); + q = VPXMIN(q, (q_high + q_low + 1) >> 1); } // Clamp Q to upper and lower limits: @@ -3432,7 +3432,7 @@ static void encode_with_recode_loop(VP9_COMP *cpi, loop = q != last_q; } else if (recode_loop_test( cpi, frame_over_shoot_limit, frame_under_shoot_limit, - q, MAX(q_high, top_index), bottom_index)) { + q, VPXMAX(q_high, top_index), bottom_index)) { // Is the projected frame size out of range and are we allowed // to attempt to recode. int last_q = q; @@ -3474,12 +3474,12 @@ static void encode_with_recode_loop(VP9_COMP *cpi, vp9_rc_update_rate_correction_factors(cpi); q = vp9_rc_regulate_q(cpi, rc->this_frame_target, - bottom_index, MAX(q_high, top_index)); + bottom_index, VPXMAX(q_high, top_index)); while (q < q_low && retries < 10) { vp9_rc_update_rate_correction_factors(cpi); q = vp9_rc_regulate_q(cpi, rc->this_frame_target, - bottom_index, MAX(q_high, top_index)); + bottom_index, VPXMAX(q_high, top_index)); retries++; } } @@ -4053,8 +4053,8 @@ static void adjust_frame_rate(VP9_COMP *cpi, // Average this frame's rate into the last second's average // frame rate. If we haven't seen 1 second yet, then average // over the whole interval seen. - const double interval = MIN((double)(source->ts_end - - cpi->first_time_stamp_ever), 10000000.0); + const double interval = VPXMIN( + (double)(source->ts_end - cpi->first_time_stamp_ever), 10000000.0); double avg_duration = 10000000.0 / cpi->framerate; avg_duration *= (interval - avg_duration + this_duration); avg_duration /= interval; @@ -4118,7 +4118,7 @@ static void adjust_image_stat(double y, double u, double v, double all, s->stat[U] += u; s->stat[V] += v; s->stat[ALL] += all; - s->worst = MIN(s->worst, all); + s->worst = VPXMIN(s->worst, all); } #endif // CONFIG_INTERNAL_STATS @@ -4448,7 +4448,7 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, frame_ssim2 = vpx_calc_ssim(orig, recon, &weight); #endif // CONFIG_VP9_HIGHBITDEPTH - cpi->worst_ssim= MIN(cpi->worst_ssim, frame_ssim2); + cpi->worst_ssim = VPXMIN(cpi->worst_ssim, frame_ssim2); cpi->summed_quality += frame_ssim2 * weight; cpi->summed_weights += weight; @@ -4485,7 +4485,8 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, cpi->Source->y_buffer, cpi->Source->y_stride, cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride, cpi->Source->y_width, cpi->Source->y_height); - cpi->worst_blockiness = MAX(cpi->worst_blockiness, frame_blockiness); + cpi->worst_blockiness = + VPXMAX(cpi->worst_blockiness, frame_blockiness); cpi->total_blockiness += frame_blockiness; } } @@ -4505,8 +4506,8 @@ int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags, double consistency = vpx_sse_to_psnr(samples, peak, (double)cpi->total_inconsistency); if (consistency > 0.0) - cpi->worst_consistency = MIN(cpi->worst_consistency, - consistency); + cpi->worst_consistency = + VPXMIN(cpi->worst_consistency, consistency); cpi->total_inconsistency += this_inconsistency; } } diff --git a/vp9/encoder/vp9_ethread.c b/vp9/encoder/vp9_ethread.c index 00025b7..adb3fd8 100644 --- a/vp9/encoder/vp9_ethread.c +++ b/vp9/encoder/vp9_ethread.c @@ -67,7 +67,7 @@ void vp9_encode_tiles_mt(VP9_COMP *cpi) { VP9_COMMON *const cm = &cpi->common; const int tile_cols = 1 << cm->log2_tile_cols; const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); - const int num_workers = MIN(cpi->oxcf.max_threads, tile_cols); + const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols); int i; vp9_init_tile_data(cpi); @@ -80,7 +80,7 @@ void vp9_encode_tiles_mt(VP9_COMP *cpi) { // resolution. if (cpi->use_svc) { int max_tile_cols = get_max_tile_cols(cpi); - allocated_workers = MIN(cpi->oxcf.max_threads, max_tile_cols); + allocated_workers = VPXMIN(cpi->oxcf.max_threads, max_tile_cols); } CHECK_MEM_ERROR(cm, cpi->workers, diff --git a/vp9/encoder/vp9_extend.c b/vp9/encoder/vp9_extend.c index 0c304dc..4a47f87 100644 --- a/vp9/encoder/vp9_extend.c +++ b/vp9/encoder/vp9_extend.c @@ -111,10 +111,12 @@ void vp9_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src, // Motion estimation may use src block variance with the block size up // to 64x64, so the right and bottom need to be extended to 64 multiple // or up to 16, whichever is greater. - const int er_y = MAX(src->y_width + 16, ALIGN_POWER_OF_TWO(src->y_width, 6)) - - src->y_crop_width; - const int eb_y = MAX(src->y_height + 16, ALIGN_POWER_OF_TWO(src->y_height, 6)) - - src->y_crop_height; + const int er_y = + VPXMAX(src->y_width + 16, ALIGN_POWER_OF_TWO(src->y_width, 6)) - + src->y_crop_width; + const int eb_y = + VPXMAX(src->y_height + 16, ALIGN_POWER_OF_TWO(src->y_height, 6)) - + src->y_crop_height; const int uv_width_subsampling = (src->uv_width != src->y_width); const int uv_height_subsampling = (src->uv_height != src->y_height); const int et_uv = et_y >> uv_height_subsampling; diff --git a/vp9/encoder/vp9_firstpass.c b/vp9/encoder/vp9_firstpass.c index e0c5966..65bf4b4 100644 --- a/vp9/encoder/vp9_firstpass.c +++ b/vp9/encoder/vp9_firstpass.c @@ -381,7 +381,7 @@ static unsigned int highbd_get_prediction_error(BLOCK_SIZE bsize, // for first pass test. static int get_search_range(const VP9_COMP *cpi) { int sr = 0; - const int dim = MIN(cpi->initial_width, cpi->initial_height); + const int dim = VPXMIN(cpi->initial_width, cpi->initial_height); while ((dim << sr) < MAX_FULL_PEL_VAL) ++sr; @@ -1024,7 +1024,7 @@ void vp9_first_pass(VP9_COMP *cpi, const struct lookahead_entry *source) { // Exclude any image dead zone if (image_data_start_row > 0) { intra_skip_count = - MAX(0, intra_skip_count - (image_data_start_row * cm->mb_cols * 2)); + VPXMAX(0, intra_skip_count - (image_data_start_row * cm->mb_cols * 2)); } { @@ -1161,7 +1161,7 @@ static double calc_correction_factor(double err_per_mb, // Adjustment based on actual quantizer to power term. const double power_term = - MIN(vp9_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high); + VPXMIN(vp9_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high); // Calculate correction factor. if (power_term < 1.0) @@ -1190,7 +1190,7 @@ static int get_twopass_worst_quality(const VP9_COMP *cpi, } else { const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs : cpi->common.MBs; - const int active_mbs = MAX(1, num_mbs - (int)(num_mbs * inactive_zone)); + const int active_mbs = VPXMAX(1, num_mbs - (int)(num_mbs * inactive_zone)); const double av_err_per_mb = section_err / active_mbs; const double speed_term = 1.0 + 0.04 * oxcf->speed; const double ediv_size_correction = (double)num_mbs / EDIV_SIZE_FACTOR; @@ -1223,7 +1223,7 @@ static int get_twopass_worst_quality(const VP9_COMP *cpi, // Restriction on active max q for constrained quality mode. if (cpi->oxcf.rc_mode == VPX_CQ) - q = MAX(q, oxcf->cq_level); + q = VPXMAX(q, oxcf->cq_level); return q; } } @@ -1233,7 +1233,7 @@ static void setup_rf_level_maxq(VP9_COMP *cpi) { RATE_CONTROL *const rc = &cpi->rc; for (i = INTER_NORMAL; i < RATE_FACTOR_LEVELS; ++i) { int qdelta = vp9_frame_type_qdelta(cpi, i, rc->worst_quality); - rc->rf_level_maxq[i] = MAX(rc->worst_quality + qdelta, rc->best_quality); + rc->rf_level_maxq[i] = VPXMAX(rc->worst_quality + qdelta, rc->best_quality); } } @@ -1364,12 +1364,12 @@ static double get_sr_decay_rate(const VP9_COMP *cpi, if ((sr_diff > LOW_SR_DIFF_TRHESH)) { - sr_diff = MIN(sr_diff, SR_DIFF_MAX); + sr_diff = VPXMIN(sr_diff, SR_DIFF_MAX); sr_decay = 1.0 - (SR_DIFF_PART * sr_diff) - (MOTION_AMP_PART * motion_amplitude_factor) - (INTRA_PART * modified_pcnt_intra); } - return MAX(sr_decay, MIN(DEFAULT_DECAY_LIMIT, modified_pct_inter)); + return VPXMAX(sr_decay, VPXMIN(DEFAULT_DECAY_LIMIT, modified_pct_inter)); } // This function gives an estimate of how badly we believe the prediction @@ -1379,7 +1379,7 @@ static double get_zero_motion_factor(const VP9_COMP *cpi, const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion; double sr_decay = get_sr_decay_rate(cpi, frame); - return MIN(sr_decay, zero_motion_pct); + return VPXMIN(sr_decay, zero_motion_pct); } #define ZM_POWER_FACTOR 0.75 @@ -1391,8 +1391,8 @@ static double get_prediction_decay_rate(const VP9_COMP *cpi, (0.95 * pow((next_frame->pcnt_inter - next_frame->pcnt_motion), ZM_POWER_FACTOR)); - return MAX(zero_motion_factor, - (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor))); + return VPXMAX(zero_motion_factor, + (sr_decay_rate + ((1.0 - sr_decay_rate) * zero_motion_factor))); } // Function to test for a condition where a complex transition is followed @@ -1483,12 +1483,12 @@ static double calc_frame_boost(VP9_COMP *cpi, const double lq = vp9_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth); - const double boost_q_correction = MIN((0.5 + (lq * 0.015)), 1.5); + const double boost_q_correction = VPXMIN((0.5 + (lq * 0.015)), 1.5); int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs : cpi->common.MBs; // Correct for any inactive region in the image - num_mbs = (int)MAX(1, num_mbs * calculate_active_area(cpi, this_frame)); + num_mbs = (int)VPXMAX(1, num_mbs * calculate_active_area(cpi, this_frame)); // Underlying boost factor is based on inter error ratio. frame_boost = (BASELINE_ERR_PER_MB * num_mbs) / @@ -1504,7 +1504,7 @@ static double calc_frame_boost(VP9_COMP *cpi, else frame_boost += frame_boost * (this_frame_mv_in_out / 2.0); - return MIN(frame_boost, max_boost * boost_q_correction); + return VPXMIN(frame_boost, max_boost * boost_q_correction); } static int calc_arf_boost(VP9_COMP *cpi, int offset, @@ -1593,7 +1593,7 @@ static int calc_arf_boost(VP9_COMP *cpi, int offset, arf_boost = (*f_boost + *b_boost); if (arf_boost < ((b_frames + f_frames) * 20)) arf_boost = ((b_frames + f_frames) * 20); - arf_boost = MAX(arf_boost, MIN_ARF_GF_BOOST); + arf_boost = VPXMAX(arf_boost, MIN_ARF_GF_BOOST); return arf_boost; } @@ -1664,7 +1664,8 @@ static int calculate_boost_bits(int frame_count, } // Calculate the number of extra bits for use in the boosted frame or frames. - return MAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks), 0); + return VPXMAX((int)(((int64_t)boost * total_group_bits) / allocation_chunks), + 0); } // Current limit on maximum number of active arfs in a GF/ARF group. @@ -1803,7 +1804,7 @@ static void allocate_gf_group_bits(VP9_COMP *cpi, int64_t gf_group_bits, gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[arf_idx]; target_frame_size = clamp(target_frame_size, 0, - MIN(max_bits, (int)total_group_bits)); + VPXMIN(max_bits, (int)total_group_bits)); gf_group->update_type[frame_index] = LF_UPDATE; gf_group->rf_level[frame_index] = INTER_NORMAL; @@ -1924,7 +1925,7 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { int int_lbq = (int)(vp9_convert_qindex_to_q(rc->last_boosted_qindex, cpi->common.bit_depth)); - active_min_gf_interval = rc->min_gf_interval + MIN(2, int_max_q / 200); + active_min_gf_interval = rc->min_gf_interval + VPXMIN(2, int_max_q / 200); if (active_min_gf_interval > rc->max_gf_interval) active_min_gf_interval = rc->max_gf_interval; @@ -1935,7 +1936,7 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // bits to spare and are better with a smaller interval and smaller boost. // At high Q when there are few bits to spare we are better with a longer // interval to spread the cost of the GF. - active_max_gf_interval = 12 + MIN(4, (int_lbq / 6)); + active_max_gf_interval = 12 + VPXMIN(4, (int_lbq / 6)); if (active_max_gf_interval < active_min_gf_interval) active_max_gf_interval = active_min_gf_interval; @@ -1980,8 +1981,8 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { decay_accumulator = decay_accumulator * loop_decay_rate; // Monitor for static sections. - zero_motion_accumulator = - MIN(zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame)); + zero_motion_accumulator = VPXMIN( + zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame)); // Break clause to detect very still sections after motion. For example, // a static image after a fade or other transition. @@ -2037,7 +2038,7 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { (cpi->multi_arf_allowed && (rc->baseline_gf_interval >= 6) && (zero_motion_accumulator < 0.995)) ? 1 : 0; } else { - rc->gfu_boost = MAX((int)boost_score, MIN_ARF_GF_BOOST); + rc->gfu_boost = VPXMAX((int)boost_score, MIN_ARF_GF_BOOST); rc->source_alt_ref_pending = 0; } @@ -2092,11 +2093,11 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // rc factor is a weight factor that corrects for local rate control drift. double rc_factor = 1.0; if (rc->rate_error_estimate > 0) { - rc_factor = MAX(RC_FACTOR_MIN, - (double)(100 - rc->rate_error_estimate) / 100.0); + rc_factor = VPXMAX(RC_FACTOR_MIN, + (double)(100 - rc->rate_error_estimate) / 100.0); } else { - rc_factor = MIN(RC_FACTOR_MAX, - (double)(100 - rc->rate_error_estimate) / 100.0); + rc_factor = VPXMIN(RC_FACTOR_MAX, + (double)(100 - rc->rate_error_estimate) / 100.0); } tmp_q = get_twopass_worst_quality(cpi, group_av_err, @@ -2104,7 +2105,7 @@ static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { vbr_group_bits_per_frame, twopass->kfgroup_inter_fraction * rc_factor); twopass->active_worst_quality = - MAX(tmp_q, twopass->active_worst_quality >> 1); + VPXMAX(tmp_q, twopass->active_worst_quality >> 1); } #endif @@ -2421,7 +2422,7 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { } else { twopass->kf_group_bits = 0; } - twopass->kf_group_bits = MAX(0, twopass->kf_group_bits); + twopass->kf_group_bits = VPXMAX(0, twopass->kf_group_bits); // Reset the first pass file position. reset_fpf_position(twopass, start_position); @@ -2435,9 +2436,8 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { break; // Monitor for static sections. - zero_motion_accumulator = - MIN(zero_motion_accumulator, - get_zero_motion_factor(cpi, &next_frame)); + zero_motion_accumulator = VPXMIN( + zero_motion_accumulator, get_zero_motion_factor(cpi, &next_frame)); // Not all frames in the group are necessarily used in calculating boost. if ((i <= rc->max_gf_interval) || @@ -2450,7 +2450,7 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { const double loop_decay_rate = get_prediction_decay_rate(cpi, &next_frame); decay_accumulator *= loop_decay_rate; - decay_accumulator = MAX(decay_accumulator, MIN_DECAY_FACTOR); + decay_accumulator = VPXMAX(decay_accumulator, MIN_DECAY_FACTOR); av_decay_accumulator += decay_accumulator; ++loop_decay_counter; } @@ -2471,8 +2471,8 @@ static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) { // Apply various clamps for min and max boost rc->kf_boost = (int)(av_decay_accumulator * boost_score); - rc->kf_boost = MAX(rc->kf_boost, (rc->frames_to_key * 3)); - rc->kf_boost = MAX(rc->kf_boost, MIN_KF_BOOST); + rc->kf_boost = VPXMAX(rc->kf_boost, (rc->frames_to_key * 3)); + rc->kf_boost = VPXMAX(rc->kf_boost, MIN_KF_BOOST); // Work out how many bits to allocate for the key frame itself. kf_bits = calculate_boost_bits((rc->frames_to_key - 1), @@ -2770,7 +2770,7 @@ void vp9_twopass_postencode_update(VP9_COMP *cpi) { // is designed to prevent extreme behaviour at the end of a clip // or group of frames. rc->vbr_bits_off_target += rc->base_frame_target - rc->projected_frame_size; - twopass->bits_left = MAX(twopass->bits_left - bits_used, 0); + twopass->bits_left = VPXMAX(twopass->bits_left - bits_used, 0); // Calculate the pct rc error. if (rc->total_actual_bits) { @@ -2786,7 +2786,7 @@ void vp9_twopass_postencode_update(VP9_COMP *cpi) { twopass->kf_group_bits -= bits_used; twopass->last_kfgroup_zeromotion_pct = twopass->kf_zeromotion_pct; } - twopass->kf_group_bits = MAX(twopass->kf_group_bits, 0); + twopass->kf_group_bits = VPXMAX(twopass->kf_group_bits, 0); // Increment the gf group index ready for the next frame. ++twopass->gf_group.index; @@ -2836,18 +2836,18 @@ void vp9_twopass_postencode_update(VP9_COMP *cpi) { rc->vbr_bits_off_target_fast += fast_extra_thresh - rc->projected_frame_size; rc->vbr_bits_off_target_fast = - MIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth)); + VPXMIN(rc->vbr_bits_off_target_fast, (4 * rc->avg_frame_bandwidth)); // Fast adaptation of minQ if necessary to use up the extra bits. if (rc->avg_frame_bandwidth) { twopass->extend_minq_fast = (int)(rc->vbr_bits_off_target_fast * 8 / rc->avg_frame_bandwidth); } - twopass->extend_minq_fast = MIN(twopass->extend_minq_fast, - minq_adj_limit - twopass->extend_minq); + twopass->extend_minq_fast = VPXMIN( + twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq); } else if (rc->vbr_bits_off_target_fast) { - twopass->extend_minq_fast = MIN(twopass->extend_minq_fast, - minq_adj_limit - twopass->extend_minq); + twopass->extend_minq_fast = VPXMIN( + twopass->extend_minq_fast, minq_adj_limit - twopass->extend_minq); } else { twopass->extend_minq_fast = 0; } diff --git a/vp9/encoder/vp9_mbgraph.c b/vp9/encoder/vp9_mbgraph.c index d59f315..1784666 100644 --- a/vp9/encoder/vp9_mbgraph.c +++ b/vp9/encoder/vp9_mbgraph.c @@ -41,7 +41,7 @@ static unsigned int do_16x16_motion_iteration(VP9_COMP *cpi, // Further step/diamond searches as necessary int step_param = mv_sf->reduce_first_step_size; - step_param = MIN(step_param, MAX_MVSEARCH_STEPS - 2); + step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2); vp9_set_mv_search_range(x, ref_mv); diff --git a/vp9/encoder/vp9_mcomp.c b/vp9/encoder/vp9_mcomp.c index aa3e51c..6a02481 100644 --- a/vp9/encoder/vp9_mcomp.c +++ b/vp9/encoder/vp9_mcomp.c @@ -37,10 +37,10 @@ void vp9_set_mv_search_range(MACROBLOCK *x, const MV *mv) { int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL; int row_max = (mv->row >> 3) + MAX_FULL_PEL_VAL; - col_min = MAX(col_min, (MV_LOW >> 3) + 1); - row_min = MAX(row_min, (MV_LOW >> 3) + 1); - col_max = MIN(col_max, (MV_UPP >> 3) - 1); - row_max = MIN(row_max, (MV_UPP >> 3) - 1); + col_min = VPXMAX(col_min, (MV_LOW >> 3) + 1); + row_min = VPXMAX(row_min, (MV_LOW >> 3) + 1); + col_max = VPXMIN(col_max, (MV_UPP >> 3) - 1); + row_max = VPXMIN(row_max, (MV_UPP >> 3) - 1); // Get intersection of UMV window and valid MV window to reduce # of checks // in diamond search. @@ -57,12 +57,12 @@ void vp9_set_mv_search_range(MACROBLOCK *x, const MV *mv) { int vp9_init_search_range(int size) { int sr = 0; // Minimum search size no matter what the passed in value. - size = MAX(16, size); + size = VPXMAX(16, size); while ((size << sr) < MAX_FULL_PEL_VAL) sr++; - sr = MIN(sr, MAX_MVSEARCH_STEPS - 2); + sr = VPXMIN(sr, MAX_MVSEARCH_STEPS - 2); return sr; } @@ -297,10 +297,10 @@ static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) { int br = bestmv->row * 8; \ int bc = bestmv->col * 8; \ int hstep = 4; \ - const int minc = MAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \ - const int maxc = MIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \ - const int minr = MAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \ - const int maxr = MIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \ + const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); \ + const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); \ + const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); \ + const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); \ int tr = br; \ int tc = bc; \ \ @@ -668,10 +668,10 @@ int vp9_find_best_sub_pixel_tree(const MACROBLOCK *x, int bc = bestmv->col * 8; int hstep = 4; int iter, round = 3 - forced_stop; - const int minc = MAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); - const int maxc = MIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); - const int minr = MAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); - const int maxr = MIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); + const int minc = VPXMAX(x->mv_col_min * 8, ref_mv->col - MV_MAX); + const int maxc = VPXMIN(x->mv_col_max * 8, ref_mv->col + MV_MAX); + const int minr = VPXMAX(x->mv_row_min * 8, ref_mv->row - MV_MAX); + const int maxr = VPXMIN(x->mv_row_max * 8, ref_mv->row + MV_MAX); int tr = br; int tc = bc; const MV *search_step = search_step_table; @@ -1500,7 +1500,7 @@ int vp9_fast_hex_search(const MACROBLOCK *x, int use_mvcost, const MV *center_mv, MV *best_mv) { - return vp9_hex_search(x, ref_mv, MAX(MAX_MVSEARCH_STEPS - 2, search_param), + return vp9_hex_search(x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit, do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv); } @@ -1515,9 +1515,9 @@ int vp9_fast_dia_search(const MACROBLOCK *x, int use_mvcost, const MV *center_mv, MV *best_mv) { - return vp9_bigdia_search(x, ref_mv, MAX(MAX_MVSEARCH_STEPS - 2, search_param), - sad_per_bit, do_init_search, cost_list, vfp, - use_mvcost, center_mv, best_mv); + return vp9_bigdia_search( + x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit, + do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv); } #undef CHECK_BETTER @@ -1547,10 +1547,10 @@ int vp9_full_range_search_c(const MACROBLOCK *x, best_sad = fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), in_what->stride) + mvsad_err_cost(x, ref_mv, &fcenter_mv, sad_per_bit); - start_row = MAX(-range, x->mv_row_min - ref_mv->row); - start_col = MAX(-range, x->mv_col_min - ref_mv->col); - end_row = MIN(range, x->mv_row_max - ref_mv->row); - end_col = MIN(range, x->mv_col_max - ref_mv->col); + start_row = VPXMAX(-range, x->mv_row_min - ref_mv->row); + start_col = VPXMAX(-range, x->mv_col_min - ref_mv->col); + end_row = VPXMIN(range, x->mv_row_max - ref_mv->row); + end_col = VPXMIN(range, x->mv_col_max - ref_mv->col); for (r = start_row; r <= end_row; ++r) { for (c = start_col; c <= end_col; c += 4) { @@ -2021,10 +2021,10 @@ int vp9_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv, const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const int row_min = MAX(ref_mv->row - distance, x->mv_row_min); - const int row_max = MIN(ref_mv->row + distance, x->mv_row_max); - const int col_min = MAX(ref_mv->col - distance, x->mv_col_min); - const int col_max = MIN(ref_mv->col + distance, x->mv_col_max); + const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min); + const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max); + const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min); + const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max); const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; int best_sad = fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), in_what->stride) + @@ -2054,10 +2054,10 @@ int vp9_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv, const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const int row_min = MAX(ref_mv->row - distance, x->mv_row_min); - const int row_max = MIN(ref_mv->row + distance, x->mv_row_max); - const int col_min = MAX(ref_mv->col - distance, x->mv_col_min); - const int col_max = MIN(ref_mv->col + distance, x->mv_col_max); + const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min); + const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max); + const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min); + const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max); const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), in_what->stride) + @@ -2119,10 +2119,10 @@ int vp9_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv, const MACROBLOCKD *const xd = &x->e_mbd; const struct buf_2d *const what = &x->plane[0].src; const struct buf_2d *const in_what = &xd->plane[0].pre[0]; - const int row_min = MAX(ref_mv->row - distance, x->mv_row_min); - const int row_max = MIN(ref_mv->row + distance, x->mv_row_max); - const int col_min = MAX(ref_mv->col - distance, x->mv_col_min); - const int col_max = MIN(ref_mv->col + distance, x->mv_col_max); + const int row_min = VPXMAX(ref_mv->row - distance, x->mv_row_min); + const int row_max = VPXMIN(ref_mv->row + distance, x->mv_row_max); + const int col_min = VPXMAX(ref_mv->col - distance, x->mv_col_min); + const int col_max = VPXMIN(ref_mv->col + distance, x->mv_col_max); const MV fcenter_mv = {center_mv->row >> 3, center_mv->col >> 3}; unsigned int best_sad = fn_ptr->sdf(what->buf, what->stride, get_buf_from_mv(in_what, ref_mv), in_what->stride) + diff --git a/vp9/encoder/vp9_picklpf.c b/vp9/encoder/vp9_picklpf.c index 8e19103..1b1068e 100644 --- a/vp9/encoder/vp9_picklpf.c +++ b/vp9/encoder/vp9_picklpf.c @@ -92,8 +92,8 @@ static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP9_COMP *cpi, ss_err[filt_mid] = best_err; while (filter_step > 0) { - const int filt_high = MIN(filt_mid + filter_step, max_filter_level); - const int filt_low = MAX(filt_mid - filter_step, min_filter_level); + const int filt_high = VPXMIN(filt_mid + filter_step, max_filter_level); + const int filt_low = VPXMAX(filt_mid - filter_step, min_filter_level); // Bias against raising loop filter in favor of lowering it. int64_t bias = (best_err >> (15 - (filt_mid / 8))) * filter_step; diff --git a/vp9/encoder/vp9_pickmode.c b/vp9/encoder/vp9_pickmode.c index cc018fc..a065f59 100644 --- a/vp9/encoder/vp9_pickmode.c +++ b/vp9/encoder/vp9_pickmode.c @@ -293,8 +293,8 @@ static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize, if (cpi->common.tx_mode == TX_MODE_SELECT) { if (sse > (var << 2)) - tx_size = MIN(max_txsize_lookup[bsize], - tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); + tx_size = VPXMIN(max_txsize_lookup[bsize], + tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); else tx_size = TX_8X8; @@ -304,8 +304,8 @@ static void model_rd_for_sb_y_large(VP9_COMP *cpi, BLOCK_SIZE bsize, else if (tx_size > TX_16X16) tx_size = TX_16X16; } else { - tx_size = MIN(max_txsize_lookup[bsize], - tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); + tx_size = VPXMIN(max_txsize_lookup[bsize], + tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); } assert(tx_size >= TX_8X8); @@ -475,8 +475,8 @@ static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, if (cpi->common.tx_mode == TX_MODE_SELECT) { if (sse > (var << 2)) xd->mi[0]->mbmi.tx_size = - MIN(max_txsize_lookup[bsize], - tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); + VPXMIN(max_txsize_lookup[bsize], + tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); else xd->mi[0]->mbmi.tx_size = TX_8X8; @@ -487,8 +487,8 @@ static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize, xd->mi[0]->mbmi.tx_size = TX_16X16; } else { xd->mi[0]->mbmi.tx_size = - MIN(max_txsize_lookup[bsize], - tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); + VPXMIN(max_txsize_lookup[bsize], + tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); } // Evaluate if the partition block is a skippable block in Y plane. @@ -791,7 +791,7 @@ static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x, const unsigned int max_thresh = 36000; // The encode_breakout input const unsigned int min_thresh = - MIN(((unsigned int)x->encode_breakout << 4), max_thresh); + VPXMIN(((unsigned int)x->encode_breakout << 4), max_thresh); #if CONFIG_VP9_HIGHBITDEPTH const int shift = (xd->bd << 1) - 16; #endif @@ -911,7 +911,7 @@ static void estimate_block_intra(int plane, int block, BLOCK_SIZE plane_bsize, // TODO(jingning): This needs further refactoring. block_yrd(cpi, x, &rate, &dist, &is_skippable, &this_sse, 0, - bsize_tx, MIN(tx_size, TX_16X16)); + bsize_tx, VPXMIN(tx_size, TX_16X16)); x->skip_txfm[0] = is_skippable; rate += vp9_cost_bit(vp9_get_skip_prob(&cpi->common, xd), is_skippable); @@ -961,8 +961,8 @@ static INLINE void update_thresh_freq_fact(VP9_COMP *cpi, if (thr_mode_idx == best_mode_idx) *freq_fact -= (*freq_fact >> 4); else - *freq_fact = MIN(*freq_fact + RD_THRESH_INC, - cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT); + *freq_fact = VPXMIN(*freq_fact + RD_THRESH_INC, + cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT); } void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost, @@ -973,8 +973,8 @@ void vp9_pick_intra_mode(VP9_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost, PREDICTION_MODE this_mode; struct estimate_block_intra_args args = { cpi, x, DC_PRED, 0, 0 }; const TX_SIZE intra_tx_size = - MIN(max_txsize_lookup[bsize], - tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); + VPXMIN(max_txsize_lookup[bsize], + tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); MODE_INFO *const mic = xd->mi[0]; int *bmode_costs; const MODE_INFO *above_mi = xd->mi[-xd->mi_stride]; @@ -1160,8 +1160,8 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, mbmi->sb_type = bsize; mbmi->ref_frame[0] = NONE; mbmi->ref_frame[1] = NONE; - mbmi->tx_size = MIN(max_txsize_lookup[bsize], - tx_mode_to_biggest_tx_size[cm->tx_mode]); + mbmi->tx_size = VPXMIN(max_txsize_lookup[bsize], + tx_mode_to_biggest_tx_size[cm->tx_mode]); #if CONFIG_VP9_TEMPORAL_DENOISING vp9_denoiser_reset_frame_stats(ctx); @@ -1414,7 +1414,7 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, if (!this_early_term) { this_sse = (int64_t)sse_y; block_yrd(cpi, x, &this_rdc.rate, &this_rdc.dist, &is_skippable, - &this_sse, 0, bsize, MIN(mbmi->tx_size, TX_16X16)); + &this_sse, 0, bsize, VPXMIN(mbmi->tx_size, TX_16X16)); x->skip_txfm[0] = is_skippable; if (is_skippable) { this_rdc.rate = vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1); @@ -1523,8 +1523,8 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, bsize <= cpi->sf.max_intra_bsize)) { struct estimate_block_intra_args args = { cpi, x, DC_PRED, 0, 0 }; const TX_SIZE intra_tx_size = - MIN(max_txsize_lookup[bsize], - tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); + VPXMIN(max_txsize_lookup[bsize], + tx_mode_to_biggest_tx_size[cpi->common.tx_mode]); int i; TX_SIZE best_intra_tx_size = TX_SIZES; diff --git a/vp9/encoder/vp9_ratectrl.c b/vp9/encoder/vp9_ratectrl.c index 4ba3406..fdc55f7 100644 --- a/vp9/encoder/vp9_ratectrl.c +++ b/vp9/encoder/vp9_ratectrl.c @@ -106,8 +106,8 @@ static int kf_low = 400; static int get_minq_index(double maxq, double x3, double x2, double x1, vpx_bit_depth_t bit_depth) { int i; - const double minqtarget = MIN(((x3 * maxq + x2) * maxq + x1) * maxq, - maxq); + const double minqtarget = VPXMIN(((x3 * maxq + x2) * maxq + x1) * maxq, + maxq); // Special case handling to deal with the step from q2.0 // down to lossless mode represented by q 1.0. @@ -192,15 +192,15 @@ int vp9_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs, vpx_bit_depth_t bit_depth) { const int bpm = (int)(vp9_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth)); - return MAX(FRAME_OVERHEAD_BITS, - (int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS); + return VPXMAX(FRAME_OVERHEAD_BITS, + (int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS); } int vp9_rc_clamp_pframe_target_size(const VP9_COMP *const cpi, int target) { const RATE_CONTROL *rc = &cpi->rc; const VP9EncoderConfig *oxcf = &cpi->oxcf; - const int min_frame_target = MAX(rc->min_frame_bandwidth, - rc->avg_frame_bandwidth >> 5); + const int min_frame_target = VPXMAX(rc->min_frame_bandwidth, + rc->avg_frame_bandwidth >> 5); if (target < min_frame_target) target = min_frame_target; if (cpi->refresh_golden_frame && rc->is_src_frame_alt_ref) { @@ -216,7 +216,7 @@ int vp9_rc_clamp_pframe_target_size(const VP9_COMP *const cpi, int target) { if (oxcf->rc_max_inter_bitrate_pct) { const int max_rate = rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100; - target = MIN(target, max_rate); + target = VPXMIN(target, max_rate); } return target; } @@ -227,7 +227,7 @@ int vp9_rc_clamp_iframe_target_size(const VP9_COMP *const cpi, int target) { if (oxcf->rc_max_intra_bitrate_pct) { const int max_rate = rc->avg_frame_bandwidth * oxcf->rc_max_intra_bitrate_pct / 100; - target = MIN(target, max_rate); + target = VPXMIN(target, max_rate); } if (target > rc->max_frame_bandwidth) target = rc->max_frame_bandwidth; @@ -250,7 +250,8 @@ static void update_layer_buffer_level(SVC *svc, int encoded_frame_size) { lrc->bits_off_target += bits_off_for_this_layer; // Clip buffer level to maximum buffer size for the layer. - lrc->bits_off_target = MIN(lrc->bits_off_target, lrc->maximum_buffer_size); + lrc->bits_off_target = + VPXMIN(lrc->bits_off_target, lrc->maximum_buffer_size); lrc->buffer_level = lrc->bits_off_target; } } @@ -268,7 +269,7 @@ static void update_buffer_level(VP9_COMP *cpi, int encoded_frame_size) { } // Clip the buffer level to the maximum specified buffer size. - rc->bits_off_target = MIN(rc->bits_off_target, rc->maximum_buffer_size); + rc->bits_off_target = VPXMIN(rc->bits_off_target, rc->maximum_buffer_size); rc->buffer_level = rc->bits_off_target; if (is_one_pass_cbr_svc(cpi)) { @@ -287,8 +288,8 @@ int vp9_rc_get_default_min_gf_interval( if (factor <= factor_safe) return default_interval; else - return MAX(default_interval, - (int)(MIN_GF_INTERVAL * factor / factor_safe + 0.5)); + return VPXMAX(default_interval, + (int)(MIN_GF_INTERVAL * factor / factor_safe + 0.5)); // Note this logic makes: // 4K24: 5 // 4K30: 6 @@ -296,9 +297,9 @@ int vp9_rc_get_default_min_gf_interval( } int vp9_rc_get_default_max_gf_interval(double framerate, int min_gf_interval) { - int interval = MIN(MAX_GF_INTERVAL, (int)(framerate * 0.75)); + int interval = VPXMIN(MAX_GF_INTERVAL, (int)(framerate * 0.75)); interval += (interval & 0x01); // Round to even value - return MAX(interval, min_gf_interval); + return VPXMAX(interval, min_gf_interval); } void vp9_rc_init(const VP9EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) { @@ -478,7 +479,7 @@ void vp9_rc_update_rate_correction_factors(VP9_COMP *cpi) { // More heavily damped adjustment used if we have been oscillating either side // of target. adjustment_limit = 0.25 + - 0.5 * MIN(1, fabs(log10(0.01 * correction_factor))); + 0.5 * VPXMIN(1, fabs(log10(0.01 * correction_factor))); cpi->rc.q_2_frame = cpi->rc.q_1_frame; cpi->rc.q_1_frame = cm->base_qindex; @@ -558,8 +559,8 @@ int vp9_rc_regulate_q(const VP9_COMP *cpi, int target_bits_per_frame, if (cpi->oxcf.rc_mode == VPX_CBR && (cpi->rc.rc_1_frame * cpi->rc.rc_2_frame == -1) && cpi->rc.q_1_frame != cpi->rc.q_2_frame) { - q = clamp(q, MIN(cpi->rc.q_1_frame, cpi->rc.q_2_frame), - MAX(cpi->rc.q_1_frame, cpi->rc.q_2_frame)); + q = clamp(q, VPXMIN(cpi->rc.q_1_frame, cpi->rc.q_2_frame), + VPXMAX(cpi->rc.q_1_frame, cpi->rc.q_2_frame)); } return q; } @@ -617,7 +618,7 @@ static int calc_active_worst_quality_one_pass_vbr(const VP9_COMP *cpi) { : rc->last_q[INTER_FRAME] * 2; } } - return MIN(active_worst_quality, rc->worst_quality); + return VPXMIN(active_worst_quality, rc->worst_quality); } // Adjust active_worst_quality level based on buffer level. @@ -644,10 +645,10 @@ static int calc_active_worst_quality_one_pass_cbr(const VP9_COMP *cpi) { // So for first few frames following key, the qp of that key frame is weighted // into the active_worst_quality setting. ambient_qp = (cm->current_video_frame < num_frames_weight_key) ? - MIN(rc->avg_frame_qindex[INTER_FRAME], rc->avg_frame_qindex[KEY_FRAME]) : - rc->avg_frame_qindex[INTER_FRAME]; - active_worst_quality = MIN(rc->worst_quality, - ambient_qp * 5 / 4); + VPXMIN(rc->avg_frame_qindex[INTER_FRAME], + rc->avg_frame_qindex[KEY_FRAME]) : + rc->avg_frame_qindex[INTER_FRAME]; + active_worst_quality = VPXMIN(rc->worst_quality, ambient_qp * 5 / 4); if (rc->buffer_level > rc->optimal_buffer_level) { // Adjust down. // Maximum limit for down adjustment, ~30%. @@ -700,7 +701,7 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP9_COMP *cpi, int delta_qindex = vp9_compute_qdelta(rc, last_boosted_q, (last_boosted_q * 0.75), cm->bit_depth); - active_best_quality = MAX(qindex + delta_qindex, rc->best_quality); + active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); } else if (cm->current_video_frame > 0) { // not first frame of one pass and kf_boost is set double q_adj_factor = 1.0; @@ -833,7 +834,7 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP9_COMP *cpi, int delta_qindex = vp9_compute_qdelta(rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth); - active_best_quality = MAX(qindex + delta_qindex, rc->best_quality); + active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); } else { // not first frame of one pass and kf_boost is set double q_adj_factor = 1.0; @@ -1002,21 +1003,21 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, int qindex; if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) { - qindex = MIN(rc->last_kf_qindex, rc->last_boosted_qindex); + qindex = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex); active_best_quality = qindex; last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth); delta_qindex = vp9_compute_qdelta(rc, last_boosted_q, last_boosted_q * 1.25, cm->bit_depth); - active_worst_quality = MIN(qindex + delta_qindex, active_worst_quality); - + active_worst_quality = + VPXMIN(qindex + delta_qindex, active_worst_quality); } else { qindex = rc->last_boosted_qindex; last_boosted_q = vp9_convert_qindex_to_q(qindex, cm->bit_depth); delta_qindex = vp9_compute_qdelta(rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth); - active_best_quality = MAX(qindex + delta_qindex, rc->best_quality); + active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality); } } else { // Not forced keyframe. @@ -1116,8 +1117,8 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, (cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) { int qdelta = vp9_frame_type_qdelta(cpi, gf_group->rf_level[gf_group->index], active_worst_quality); - active_worst_quality = MAX(active_worst_quality + qdelta, - active_best_quality); + active_worst_quality = VPXMAX(active_worst_quality + qdelta, + active_best_quality); } #endif @@ -1126,7 +1127,8 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, int qdelta = vp9_compute_qdelta_by_rate(rc, cm->frame_type, active_best_quality, 2.0, cm->bit_depth); - active_best_quality = MAX(active_best_quality + qdelta, rc->best_quality); + active_best_quality = + VPXMAX(active_best_quality + qdelta, rc->best_quality); } active_best_quality = clamp(active_best_quality, @@ -1141,7 +1143,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP9_COMP *cpi, rc->this_key_frame_forced) { // If static since last kf use better of last boosted and last kf q. if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) { - q = MIN(rc->last_kf_qindex, rc->last_boosted_qindex); + q = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex); } else { q = rc->last_boosted_qindex; } @@ -1203,9 +1205,9 @@ void vp9_rc_compute_frame_size_bounds(const VP9_COMP *cpi, // For very small rate targets where the fractional adjustment // may be tiny make sure there is at least a minimum range. const int tolerance = (cpi->sf.recode_tolerance * frame_target) / 100; - *frame_under_shoot_limit = MAX(frame_target - tolerance - 200, 0); - *frame_over_shoot_limit = MIN(frame_target + tolerance + 200, - cpi->rc.max_frame_bandwidth); + *frame_under_shoot_limit = VPXMAX(frame_target - tolerance - 200, 0); + *frame_over_shoot_limit = VPXMIN(frame_target + tolerance + 200, + cpi->rc.max_frame_bandwidth); } } @@ -1458,7 +1460,8 @@ static int calc_pframe_target_size_one_pass_cbr(const VP9_COMP *cpi) { const SVC *const svc = &cpi->svc; const int64_t diff = rc->optimal_buffer_level - rc->buffer_level; const int64_t one_pct_bits = 1 + rc->optimal_buffer_level / 100; - int min_frame_target = MAX(rc->avg_frame_bandwidth >> 4, FRAME_OVERHEAD_BITS); + int min_frame_target = + VPXMAX(rc->avg_frame_bandwidth >> 4, FRAME_OVERHEAD_BITS); int target; if (oxcf->gf_cbr_boost_pct) { @@ -1480,23 +1483,24 @@ static int calc_pframe_target_size_one_pass_cbr(const VP9_COMP *cpi) { svc->temporal_layer_id, svc->number_temporal_layers); const LAYER_CONTEXT *lc = &svc->layer_context[layer]; target = lc->avg_frame_size; - min_frame_target = MAX(lc->avg_frame_size >> 4, FRAME_OVERHEAD_BITS); + min_frame_target = VPXMAX(lc->avg_frame_size >> 4, FRAME_OVERHEAD_BITS); } if (diff > 0) { // Lower the target bandwidth for this frame. - const int pct_low = (int)MIN(diff / one_pct_bits, oxcf->under_shoot_pct); + const int pct_low = (int)VPXMIN(diff / one_pct_bits, oxcf->under_shoot_pct); target -= (target * pct_low) / 200; } else if (diff < 0) { // Increase the target bandwidth for this frame. - const int pct_high = (int)MIN(-diff / one_pct_bits, oxcf->over_shoot_pct); + const int pct_high = + (int)VPXMIN(-diff / one_pct_bits, oxcf->over_shoot_pct); target += (target * pct_high) / 200; } if (oxcf->rc_max_inter_bitrate_pct) { const int max_rate = rc->avg_frame_bandwidth * oxcf->rc_max_inter_bitrate_pct / 100; - target = MIN(target, max_rate); + target = VPXMIN(target, max_rate); } - return MAX(min_frame_target, target); + return VPXMAX(min_frame_target, target); } static int calc_iframe_target_size_one_pass_cbr(const VP9_COMP *cpi) { @@ -1518,7 +1522,7 @@ static int calc_iframe_target_size_one_pass_cbr(const VP9_COMP *cpi) { const LAYER_CONTEXT *lc = &svc->layer_context[layer]; framerate = lc->framerate; } - kf_boost = MAX(kf_boost, (int)(2 * framerate - 16)); + kf_boost = VPXMAX(kf_boost, (int)(2 * framerate - 16)); if (rc->frames_since_key < framerate / 2) { kf_boost = (int)(kf_boost * rc->frames_since_key / (framerate / 2)); @@ -1726,7 +1730,7 @@ void vp9_rc_set_gf_interval_range(const VP9_COMP *const cpi, rc->max_gf_interval = rc->static_scene_max_gf_interval; // Clamp min to max - rc->min_gf_interval = MIN(rc->min_gf_interval, rc->max_gf_interval); + rc->min_gf_interval = VPXMIN(rc->min_gf_interval, rc->max_gf_interval); } void vp9_rc_update_framerate(VP9_COMP *cpi) { @@ -1739,7 +1743,8 @@ void vp9_rc_update_framerate(VP9_COMP *cpi) { rc->min_frame_bandwidth = (int)(rc->avg_frame_bandwidth * oxcf->two_pass_vbrmin_section / 100); - rc->min_frame_bandwidth = MAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS); + rc->min_frame_bandwidth = + VPXMAX(rc->min_frame_bandwidth, FRAME_OVERHEAD_BITS); // A maximum bitrate for a frame is defined. // The baseline for this aligns with HW implementations that @@ -1750,8 +1755,8 @@ void vp9_rc_update_framerate(VP9_COMP *cpi) { // specifies lossless encode. vbr_max_bits = (int)(((int64_t)rc->avg_frame_bandwidth * oxcf->two_pass_vbrmax_section) / 100); - rc->max_frame_bandwidth = MAX(MAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), - vbr_max_bits); + rc->max_frame_bandwidth = + VPXMAX(VPXMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits); vp9_rc_set_gf_interval_range(cpi, rc); } @@ -1789,12 +1794,12 @@ static void vbr_rate_correction(VP9_COMP *cpi, int *this_frame_target) { // Dont do it for kf,arf,gf or overlay frames. if (!frame_is_kf_gf_arf(cpi) && !rc->is_src_frame_alt_ref && rc->vbr_bits_off_target_fast) { - int one_frame_bits = MAX(rc->avg_frame_bandwidth, *this_frame_target); + int one_frame_bits = VPXMAX(rc->avg_frame_bandwidth, *this_frame_target); int fast_extra_bits; - fast_extra_bits = - (int)MIN(rc->vbr_bits_off_target_fast, one_frame_bits); - fast_extra_bits = (int)MIN(fast_extra_bits, - MAX(one_frame_bits / 8, rc->vbr_bits_off_target_fast / 8)); + fast_extra_bits = (int)VPXMIN(rc->vbr_bits_off_target_fast, one_frame_bits); + fast_extra_bits = (int)VPXMIN( + fast_extra_bits, + VPXMAX(one_frame_bits / 8, rc->vbr_bits_off_target_fast / 8)); *this_frame_target += (int)fast_extra_bits; rc->vbr_bits_off_target_fast -= fast_extra_bits; } @@ -1948,7 +1953,7 @@ void vp9_avg_source_sad(VP9_COMP *cpi) { // between current and the previous frame value(s). Use a minimum threshold // for cases where there is small change from content that is completely // static. - if (avg_sad > MAX(4000, (rc->avg_source_sad << 3)) && + if (avg_sad > VPXMAX(4000, (rc->avg_source_sad << 3)) && rc->frames_since_key > 1) rc->high_source_sad = 1; else diff --git a/vp9/encoder/vp9_rd.c b/vp9/encoder/vp9_rd.c index 2f2f7c1..fdee153 100644 --- a/vp9/encoder/vp9_rd.c +++ b/vp9/encoder/vp9_rd.c @@ -172,7 +172,7 @@ int vp9_compute_rd_mult(const VP9_COMP *cpi, int qindex) { if (cpi->oxcf.pass == 2 && (cpi->common.frame_type != KEY_FRAME)) { const GF_GROUP *const gf_group = &cpi->twopass.gf_group; const FRAME_UPDATE_TYPE frame_type = gf_group->update_type[gf_group->index]; - const int boost_index = MIN(15, (cpi->rc.gfu_boost / 100)); + const int boost_index = VPXMIN(15, (cpi->rc.gfu_boost / 100)); rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7; rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7); @@ -204,7 +204,7 @@ static int compute_rd_thresh_factor(int qindex, vpx_bit_depth_t bit_depth) { q = vp9_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; #endif // CONFIG_VP9_HIGHBITDEPTH // TODO(debargha): Adjust the function below. - return MAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8); + return VPXMAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8); } void vp9_initialize_me_consts(VP9_COMP *cpi, MACROBLOCK *x, int qindex) { @@ -404,7 +404,7 @@ void vp9_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2, static const uint32_t MAX_XSQ_Q10 = 245727; const uint64_t xsq_q10_64 = (((uint64_t)qstep * qstep << (n_log2 + 10)) + (var >> 1)) / var; - const int xsq_q10 = (int)MIN(xsq_q10_64, MAX_XSQ_Q10); + const int xsq_q10 = (int)VPXMIN(xsq_q10_64, MAX_XSQ_Q10); model_rd_norm(xsq_q10, &r_q10, &d_q10); *rate = ((r_q10 << n_log2) + 2) >> 2; *dist = (var * (int64_t)d_q10 + 512) >> 10; @@ -485,7 +485,7 @@ void vp9_mv_pred(VP9_COMP *cpi, MACROBLOCK *x, continue; fp_row = (this_mv->row + 3 + (this_mv->row >= 0)) >> 3; fp_col = (this_mv->col + 3 + (this_mv->col >= 0)) >> 3; - max_mv = MAX(max_mv, MAX(abs(this_mv->row), abs(this_mv->col)) >> 3); + max_mv = VPXMAX(max_mv, VPXMAX(abs(this_mv->row), abs(this_mv->col)) >> 3); if (fp_row ==0 && fp_col == 0 && zero_seen) continue; @@ -629,16 +629,15 @@ void vp9_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh, const int top_mode = bsize < BLOCK_8X8 ? MAX_REFS : MAX_MODES; int mode; for (mode = 0; mode < top_mode; ++mode) { - const BLOCK_SIZE min_size = MAX(bsize - 1, BLOCK_4X4); - const BLOCK_SIZE max_size = MIN(bsize + 2, BLOCK_64X64); + const BLOCK_SIZE min_size = VPXMAX(bsize - 1, BLOCK_4X4); + const BLOCK_SIZE max_size = VPXMIN(bsize + 2, BLOCK_64X64); BLOCK_SIZE bs; for (bs = min_size; bs <= max_size; ++bs) { int *const fact = &factor_buf[bs][mode]; if (mode == best_mode_index) { *fact -= (*fact >> 4); } else { - *fact = MIN(*fact + RD_THRESH_INC, - rd_thresh * RD_THRESH_MAX_FACT); + *fact = VPXMIN(*fact + RD_THRESH_INC, rd_thresh * RD_THRESH_MAX_FACT); } } } diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index 96c6474..038d1e1 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -192,8 +192,8 @@ static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE bsize, const int64_t ac_thr = p->quant_thred[1] >> shift; // The low thresholds are used to measure if the prediction errors are // low enough so that we can skip the mode search. - const int64_t low_dc_thr = MIN(50, dc_thr >> 2); - const int64_t low_ac_thr = MIN(80, ac_thr >> 2); + const int64_t low_dc_thr = VPXMIN(50, dc_thr >> 2); + const int64_t low_ac_thr = VPXMIN(80, ac_thr >> 2); int bw = 1 << (b_width_log2_lookup[bs] - b_width_log2_lookup[unit_size]); int bh = 1 << (b_height_log2_lookup[bs] - b_width_log2_lookup[unit_size]); int idx, idy; @@ -505,7 +505,7 @@ static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, if (tx_size != TX_32X32) dc_correct >>= 2; - dist = MAX(0, sse - dc_correct); + dist = VPXMAX(0, sse - dc_correct); } } else { // SKIP_TXFM_AC_DC @@ -531,7 +531,7 @@ static void block_rd_txfm(int plane, int block, BLOCK_SIZE plane_bsize, rd2 = RDCOST(x->rdmult, x->rddiv, 0, sse); // TODO(jingning): temporarily enabled only for luma component - rd = MIN(rd1, rd2); + rd = VPXMIN(rd1, rd2); if (plane == 0) x->zcoeff_blk[tx_size][block] = !x->plane[plane].eobs[block] || (rd1 > rd2 && !xd->lossless); @@ -597,7 +597,7 @@ static void choose_largest_tx_size(VP9_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - mbmi->tx_size = MIN(max_tx_size, largest_tx_size); + mbmi->tx_size = VPXMIN(max_tx_size, largest_tx_size); txfm_rd_in_plane(x, rate, distortion, skip, sse, ref_best_rd, 0, bs, @@ -637,8 +637,8 @@ static void choose_tx_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, start_tx = max_tx_size; end_tx = 0; } else { - TX_SIZE chosen_tx_size = MIN(max_tx_size, - tx_mode_to_biggest_tx_size[cm->tx_mode]); + TX_SIZE chosen_tx_size = VPXMIN(max_tx_size, + tx_mode_to_biggest_tx_size[cm->tx_mode]); start_tx = chosen_tx_size; end_tx = chosen_tx_size; } @@ -1389,7 +1389,7 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, cpi->sf.use_fast_coef_costing); rd1 = RDCOST(x->rdmult, x->rddiv, thisrate, thisdistortion >> 2); rd2 = RDCOST(x->rdmult, x->rddiv, 0, thissse >> 2); - rd = MIN(rd1, rd2); + rd = VPXMIN(rd1, rd2); if (rd >= best_yrd) return INT64_MAX; } @@ -1808,7 +1808,8 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, if (i == 0) max_mv = x->max_mv_context[mbmi->ref_frame[0]]; else - max_mv = MAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3; + max_mv = + VPXMAX(abs(bsi->mvp.as_mv.row), abs(bsi->mvp.as_mv.col)) >> 3; if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) { // Take wtd average of the step_params based on the last frame's @@ -1826,7 +1827,7 @@ static int64_t rd_pick_best_sub8x8_mode(VP9_COMP *cpi, MACROBLOCK *x, if (cpi->sf.adaptive_motion_search) { mvp_full.row = x->pred_mv[mbmi->ref_frame[0]].row >> 3; mvp_full.col = x->pred_mv[mbmi->ref_frame[0]].col >> 3; - step_param = MAX(step_param, 8); + step_param = VPXMAX(step_param, 8); } // adjust src pointer for this block @@ -2231,7 +2232,7 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, vp9_set_mv_search_range(x, &ref_mv); // Work out the size of the first step in the mv step search. - // 0 here is maximum length first step. 1 is MAX >> 1 etc. + // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc. if (cpi->sf.mv.auto_mv_step_size && cm->show_frame) { // Take wtd average of the step_params based on the last frame's // max mv magnitude and that based on the best ref mvs of the current @@ -2243,9 +2244,10 @@ static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, } if (cpi->sf.adaptive_motion_search && bsize < BLOCK_64X64) { - int boffset = 2 * (b_width_log2_lookup[BLOCK_64X64] - - MIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize])); - step_param = MAX(step_param, boffset); + int boffset = + 2 * (b_width_log2_lookup[BLOCK_64X64] - + VPXMIN(b_height_log2_lookup[bsize], b_width_log2_lookup[bsize])); + step_param = VPXMAX(step_param, boffset); } if (cpi->sf.adaptive_motion_search) { @@ -2466,7 +2468,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, // motion field, where the distortion gain for a single block may not // be enough to overcome the cost of a new mv. if (discount_newmv_test(cpi, this_mode, tmp_mv, mode_mv, refs[0])) { - *rate2 += MAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1); + *rate2 += VPXMAX((rate_mv / NEW_MV_DISCOUNT_FACTOR), 1); } else { *rate2 += rate_mv; } @@ -2502,10 +2504,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, // initiation of a motion field. if (discount_newmv_test(cpi, this_mode, frame_mv[refs[0]], mode_mv, refs[0])) { - *rate2 += MIN(cost_mv_ref(cpi, this_mode, - mbmi_ext->mode_context[refs[0]]), - cost_mv_ref(cpi, NEARESTMV, - mbmi_ext->mode_context[refs[0]])); + *rate2 += VPXMIN(cost_mv_ref(cpi, this_mode, + mbmi_ext->mode_context[refs[0]]), + cost_mv_ref(cpi, NEARESTMV, + mbmi_ext->mode_context[refs[0]])); } else { *rate2 += cost_mv_ref(cpi, this_mode, mbmi_ext->mode_context[refs[0]]); } @@ -2547,10 +2549,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, rd = RDCOST(x->rdmult, x->rddiv, tmp_rate_sum, tmp_dist_sum); filter_cache[i] = rd; filter_cache[SWITCHABLE_FILTERS] = - MIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); + VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); if (cm->interp_filter == SWITCHABLE) rd += rs_rd; - *mask_filter = MAX(*mask_filter, rd); + *mask_filter = VPXMAX(*mask_filter, rd); } else { int rate_sum = 0; int64_t dist_sum = 0; @@ -2580,10 +2582,10 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, rd = RDCOST(x->rdmult, x->rddiv, rate_sum, dist_sum); filter_cache[i] = rd; filter_cache[SWITCHABLE_FILTERS] = - MIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); + VPXMIN(filter_cache[SWITCHABLE_FILTERS], rd + rs_rd); if (cm->interp_filter == SWITCHABLE) rd += rs_rd; - *mask_filter = MAX(*mask_filter, rd); + *mask_filter = VPXMAX(*mask_filter, rd); if (i == 0 && intpel_mv) { tmp_rate_sum = rate_sum; @@ -2694,7 +2696,7 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x, *distortion += distortion_y; rdcosty = RDCOST(x->rdmult, x->rddiv, *rate2, *distortion); - rdcosty = MIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse)); + rdcosty = VPXMIN(rdcosty, RDCOST(x->rdmult, x->rddiv, 0, *psse)); if (!super_block_uvrd(cpi, x, rate_uv, &distortion_uv, &skippable_uv, &sseuv, bsize, ref_best_rd - rdcosty)) { @@ -2759,7 +2761,7 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, pd[1].subsampling_x, pd[1].subsampling_y); rd_pick_intra_sbuv_mode(cpi, x, ctx, &rate_uv, &rate_uv_tokenonly, - &dist_uv, &uv_skip, MAX(BLOCK_8X8, bsize), + &dist_uv, &uv_skip, VPXMAX(BLOCK_8X8, bsize), max_uv_tx_size); if (y_skip && uv_skip) { @@ -2826,12 +2828,12 @@ static void rd_variance_adjustment(VP9_COMP *cpi, // to a predictor with a low spatial complexity compared to the source. if ((source_variance > LOW_VAR_THRESH) && (ref_frame == INTRA_FRAME) && (source_variance > recon_variance)) { - var_factor = MIN(absvar_diff, MIN(VLOW_ADJ_MAX, var_error)); + var_factor = VPXMIN(absvar_diff, VPXMIN(VLOW_ADJ_MAX, var_error)); // A second possible case of interest is where the source variance // is very low and we wish to discourage false texture or motion trails. } else if ((source_variance < (LOW_VAR_THRESH >> 1)) && (recon_variance > source_variance)) { - var_factor = MIN(absvar_diff, MIN(VHIGH_ADJ_MAX, var_error)); + var_factor = VPXMIN(absvar_diff, VPXMIN(VHIGH_ADJ_MAX, var_error)); } *this_rd += (*this_rd * var_factor) / 100; } @@ -2861,7 +2863,7 @@ int vp9_active_h_edge(VP9_COMP *cpi, int mi_row, int mi_step) { top_edge += (int)(twopass->this_frame_stats.inactive_zone_rows * 2); bottom_edge -= (int)(twopass->this_frame_stats.inactive_zone_rows * 2); - bottom_edge = MAX(top_edge, bottom_edge); + bottom_edge = VPXMAX(top_edge, bottom_edge); } if (((top_edge >= mi_row) && (top_edge < (mi_row + mi_step))) || @@ -2888,7 +2890,7 @@ int vp9_active_v_edge(VP9_COMP *cpi, int mi_col, int mi_step) { left_edge += (int)(twopass->this_frame_stats.inactive_zone_cols * 2); right_edge -= (int)(twopass->this_frame_stats.inactive_zone_cols * 2); - right_edge = MAX(left_edge, right_edge); + right_edge = VPXMAX(left_edge, right_edge); } if (((left_edge >= mi_col) && (left_edge < (mi_col + mi_step))) || @@ -3135,7 +3137,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, } if ((ref_frame_skip_mask[0] & (1 << ref_frame)) && - (ref_frame_skip_mask[1] & (1 << MAX(0, second_ref_frame)))) + (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame)))) continue; if (mode_skip_mask[ref_frame] & (1 << this_mode)) @@ -3149,10 +3151,10 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, continue; if (sf->motion_field_mode_search) { - const int mi_width = MIN(num_8x8_blocks_wide_lookup[bsize], - tile_info->mi_col_end - mi_col); - const int mi_height = MIN(num_8x8_blocks_high_lookup[bsize], - tile_info->mi_row_end - mi_row); + const int mi_width = VPXMIN(num_8x8_blocks_wide_lookup[bsize], + tile_info->mi_col_end - mi_col); + const int mi_height = VPXMIN(num_8x8_blocks_high_lookup[bsize], + tile_info->mi_row_end - mi_row); const int bsl = mi_width_log2_lookup[bsize]; int cb_partition_search_ctrl = (((mi_row + mi_col) >> bsl) + get_chessboard_index(cm->current_video_frame)) & 0x1; @@ -3370,9 +3372,9 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, if (!disable_skip && ref_frame == INTRA_FRAME) { for (i = 0; i < REFERENCE_MODES; ++i) - best_pred_rd[i] = MIN(best_pred_rd[i], this_rd); + best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd); for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) - best_filter_rd[i] = MIN(best_filter_rd[i], this_rd); + best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd); } // Did this mode help.. i.e. is it the new best mode @@ -3471,7 +3473,7 @@ void vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, adj_rd = filter_cache[i] - ref; adj_rd += this_rd; - best_filter_rd[i] = MIN(best_filter_rd[i], adj_rd); + best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd); } } } @@ -3814,7 +3816,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, } if ((ref_frame_skip_mask[0] & (1 << ref_frame)) && - (ref_frame_skip_mask[1] & (1 << MAX(0, second_ref_frame)))) + (ref_frame_skip_mask[1] & (1 << VPXMAX(0, second_ref_frame)))) continue; // Test best rd so far against threshold for trying this mode. @@ -3969,12 +3971,11 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0); filter_cache[switchable_filter_index] = tmp_rd; filter_cache[SWITCHABLE_FILTERS] = - MIN(filter_cache[SWITCHABLE_FILTERS], - tmp_rd + rs_rd); + VPXMIN(filter_cache[SWITCHABLE_FILTERS], tmp_rd + rs_rd); if (cm->interp_filter == SWITCHABLE) tmp_rd += rs_rd; - mask_filter = MAX(mask_filter, tmp_rd); + mask_filter = VPXMAX(mask_filter, tmp_rd); newbest = (tmp_rd < tmp_best_rd); if (newbest) { @@ -4051,9 +4052,9 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, compmode_cost = vp9_cost_bit(comp_mode_p, comp_pred); - tmp_best_rdu = best_rd - - MIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2), - RDCOST(x->rdmult, x->rddiv, 0, total_sse)); + tmp_best_rdu = + best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2), + RDCOST(x->rdmult, x->rddiv, 0, total_sse)); if (tmp_best_rdu > 0) { // If even the 'Y' rd value of split is higher than best so far @@ -4113,9 +4114,9 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, if (!disable_skip && ref_frame == INTRA_FRAME) { for (i = 0; i < REFERENCE_MODES; ++i) - best_pred_rd[i] = MIN(best_pred_rd[i], this_rd); + best_pred_rd[i] = VPXMIN(best_pred_rd[i], this_rd); for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) - best_filter_rd[i] = MIN(best_filter_rd[i], this_rd); + best_filter_rd[i] = VPXMIN(best_filter_rd[i], this_rd); } // Did this mode help.. i.e. is it the new best mode @@ -4214,7 +4215,7 @@ void vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, adj_rd = filter_cache[i] - ref; adj_rd += this_rd; - best_filter_rd[i] = MIN(best_filter_rd[i], adj_rd); + best_filter_rd[i] = VPXMIN(best_filter_rd[i], adj_rd); } } diff --git a/vp9/encoder/vp9_speed_features.c b/vp9/encoder/vp9_speed_features.c index 5e72c4c..8530f98 100644 --- a/vp9/encoder/vp9_speed_features.c +++ b/vp9/encoder/vp9_speed_features.c @@ -49,7 +49,7 @@ static void set_good_speed_feature_framesize_dependent(VP9_COMP *cpi, VP9_COMMON *const cm = &cpi->common; if (speed >= 1) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; sf->partition_search_breakout_dist_thr = (1 << 23); @@ -60,7 +60,7 @@ static void set_good_speed_feature_framesize_dependent(VP9_COMP *cpi, } if (speed >= 2) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; sf->adaptive_pred_interp_filter = 0; @@ -75,7 +75,7 @@ static void set_good_speed_feature_framesize_dependent(VP9_COMP *cpi, } if (speed >= 3) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->disable_split_mask = DISABLE_ALL_SPLIT; sf->schedule_mode_search = cm->base_qindex < 220 ? 1 : 0; sf->partition_search_breakout_dist_thr = (1 << 25); @@ -99,7 +99,7 @@ static void set_good_speed_feature_framesize_dependent(VP9_COMP *cpi, } if (speed >= 4) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->partition_search_breakout_dist_thr = (1 << 26); } else { sf->partition_search_breakout_dist_thr = (1 << 24); @@ -215,7 +215,7 @@ static void set_rt_speed_feature_framesize_dependent(VP9_COMP *cpi, VP9_COMMON *const cm = &cpi->common; if (speed >= 1) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; } else { @@ -224,7 +224,7 @@ static void set_rt_speed_feature_framesize_dependent(VP9_COMP *cpi, } if (speed >= 2) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->disable_split_mask = cm->show_frame ? DISABLE_ALL_SPLIT : DISABLE_ALL_INTER_SPLIT; } else { @@ -233,7 +233,7 @@ static void set_rt_speed_feature_framesize_dependent(VP9_COMP *cpi, } if (speed >= 5) { - if (MIN(cm->width, cm->height) >= 720) { + if (VPXMIN(cm->width, cm->height) >= 720) { sf->partition_search_breakout_dist_thr = (1 << 25); } else { sf->partition_search_breakout_dist_thr = (1 << 23); @@ -241,7 +241,7 @@ static void set_rt_speed_feature_framesize_dependent(VP9_COMP *cpi, } if (speed >= 7) { - sf->encode_breakout_thresh = (MIN(cm->width, cm->height) >= 720) ? + sf->encode_breakout_thresh = (VPXMIN(cm->width, cm->height) >= 720) ? 800 : 300; } } diff --git a/vp9/encoder/vp9_svc_layercontext.c b/vp9/encoder/vp9_svc_layercontext.c index 7b9c4cd..b619840 100644 --- a/vp9/encoder/vp9_svc_layercontext.c +++ b/vp9/encoder/vp9_svc_layercontext.c @@ -139,8 +139,8 @@ void vp9_update_layer_context_change_config(VP9_COMP *const cpi, lrc->maximum_buffer_size = (int64_t)(rc->maximum_buffer_size * bitrate_alloc); lrc->bits_off_target = - MIN(lrc->bits_off_target, lrc->maximum_buffer_size); - lrc->buffer_level = MIN(lrc->buffer_level, lrc->maximum_buffer_size); + VPXMIN(lrc->bits_off_target, lrc->maximum_buffer_size); + lrc->buffer_level = VPXMIN(lrc->buffer_level, lrc->maximum_buffer_size); lc->framerate = cpi->framerate / oxcf->ts_rate_decimator[tl]; lrc->avg_frame_bandwidth = (int)(lc->target_bandwidth / lc->framerate); lrc->max_frame_bandwidth = rc->max_frame_bandwidth; @@ -171,9 +171,9 @@ void vp9_update_layer_context_change_config(VP9_COMP *const cpi, (int64_t)(rc->optimal_buffer_level * bitrate_alloc); lrc->maximum_buffer_size = (int64_t)(rc->maximum_buffer_size * bitrate_alloc); - lrc->bits_off_target = MIN(lrc->bits_off_target, - lrc->maximum_buffer_size); - lrc->buffer_level = MIN(lrc->buffer_level, lrc->maximum_buffer_size); + lrc->bits_off_target = VPXMIN(lrc->bits_off_target, + lrc->maximum_buffer_size); + lrc->buffer_level = VPXMIN(lrc->buffer_level, lrc->maximum_buffer_size); // Update framerate-related quantities. if (svc->number_temporal_layers > 1 && cpi->oxcf.rc_mode == VPX_CBR) { lc->framerate = cpi->framerate / oxcf->ts_rate_decimator[layer]; diff --git a/vp9/encoder/vp9_temporal_filter.c b/vp9/encoder/vp9_temporal_filter.c index 439eac6..9c1629e 100644 --- a/vp9/encoder/vp9_temporal_filter.c +++ b/vp9/encoder/vp9_temporal_filter.c @@ -242,7 +242,7 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi, xd->plane[0].pre[0].stride = stride; step_param = mv_sf->reduce_first_step_size; - step_param = MIN(step_param, MAX_MVSEARCH_STEPS - 2); + step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2); // Ignore mv costing by sending NULL pointer instead of cost arrays vp9_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1, diff --git a/vp9/vp9_dx_iface.c b/vp9/vp9_dx_iface.c index 96ede3c..8119df8 100644 --- a/vp9/vp9_dx_iface.c +++ b/vp9/vp9_dx_iface.c @@ -18,6 +18,7 @@ #include "vpx/vp8dx.h" #include "vpx/vpx_decoder.h" #include "vpx_dsp/bitreader_buffer.h" +#include "vpx_dsp/vpx_dsp_common.h" #include "vpx_util/vpx_thread.h" #include "vp9/common/vp9_alloccommon.h" @@ -183,7 +184,7 @@ static vpx_codec_err_t decoder_peek_si_internal(const uint8_t *data, si->w = si->h = 0; if (decrypt_cb) { - data_sz = MIN(sizeof(clear_buffer), data_sz); + data_sz = VPXMIN(sizeof(clear_buffer), data_sz); decrypt_cb(decrypt_state, data, clear_buffer, data_sz); data = clear_buffer; } diff --git a/vpx_dsp/bitreader.c b/vpx_dsp/bitreader.c index 4420fad..ba87022 100644 --- a/vpx_dsp/bitreader.c +++ b/vpx_dsp/bitreader.c @@ -48,7 +48,7 @@ void vpx_reader_fill(vpx_reader *r) { int shift = BD_VALUE_SIZE - CHAR_BIT - (count + CHAR_BIT); if (r->decrypt_cb) { - size_t n = MIN(sizeof(r->clear_buffer), bytes_left); + size_t n = VPXMIN(sizeof(r->clear_buffer), bytes_left); r->decrypt_cb(r->decrypt_state, buffer, r->clear_buffer, (int)n); buffer = r->clear_buffer; buffer_start = r->clear_buffer; diff --git a/vpx_dsp/prob.h b/vpx_dsp/prob.h index 729f90a..c3cb103 100644 --- a/vpx_dsp/prob.h +++ b/vpx_dsp/prob.h @@ -65,7 +65,7 @@ static INLINE vpx_prob merge_probs(vpx_prob pre_prob, unsigned int count_sat, unsigned int max_update_factor) { const vpx_prob prob = get_binary_prob(ct[0], ct[1]); - const unsigned int count = MIN(ct[0] + ct[1], count_sat); + const unsigned int count = VPXMIN(ct[0] + ct[1], count_sat); const unsigned int factor = max_update_factor * count / count_sat; return weighted_prob(pre_prob, prob, factor); } @@ -82,7 +82,7 @@ static INLINE vpx_prob mode_mv_merge_probs(vpx_prob pre_prob, if (den == 0) { return pre_prob; } else { - const unsigned int count = MIN(den, MODE_MV_COUNT_SAT); + const unsigned int count = VPXMIN(den, MODE_MV_COUNT_SAT); const unsigned int factor = count_to_update_factor[count]; const vpx_prob prob = clip_prob(((int64_t)(ct[0]) * 256 + (den >> 1)) / den); diff --git a/vpx_dsp/vpx_dsp_common.h b/vpx_dsp/vpx_dsp_common.h index ccb8189..a83339e 100644 --- a/vpx_dsp/vpx_dsp_common.h +++ b/vpx_dsp/vpx_dsp_common.h @@ -19,8 +19,8 @@ extern "C" { #endif -#define MIN(x, y) (((x) < (y)) ? (x) : (y)) -#define MAX(x, y) (((x) > (y)) ? (x) : (y)) +#define VPXMIN(x, y) (((x) < (y)) ? (x) : (y)) +#define VPXMAX(x, y) (((x) > (y)) ? (x) : (y)) #if CONFIG_VP9_HIGHBITDEPTH // Note: -- 2.7.4