2 * Copyright (c) 2011 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "./vpx_config.h"
15 #include "vp9/common/vp9_common.h"
17 #include "vp9/encoder/vp9_extend.h"
18 #include "vp9/encoder/vp9_lookahead.h"
19 #include "vp9/encoder/vp9_onyx_int.h"
21 struct lookahead_ctx {
22 unsigned int max_sz; /* Absolute size of the queue */
23 unsigned int sz; /* Number of buffers currently in the queue */
24 unsigned int read_idx; /* Read index */
25 unsigned int write_idx; /* Write index */
26 struct lookahead_entry *buf; /* Buffer list */
30 /* Return the buffer at the given absolute index and increment the index */
31 static struct lookahead_entry * pop(struct lookahead_ctx *ctx,
33 unsigned int index = *idx;
34 struct lookahead_entry *buf = ctx->buf + index;
36 assert(index < ctx->max_sz);
37 if (++index >= ctx->max_sz)
44 void vp9_lookahead_destroy(struct lookahead_ctx *ctx) {
49 for (i = 0; i < ctx->max_sz; i++)
50 vp9_free_frame_buffer(&ctx->buf[i].img);
58 struct lookahead_ctx * vp9_lookahead_init(unsigned int width,
60 unsigned int subsampling_x,
61 unsigned int subsampling_y,
63 struct lookahead_ctx *ctx = NULL;
65 // Clamp the lookahead queue depth
66 depth = clamp(depth, 1, MAX_LAG_BUFFERS);
68 // Allocate the lookahead structures
69 ctx = calloc(1, sizeof(*ctx));
73 ctx->buf = calloc(depth, sizeof(*ctx->buf));
76 for (i = 0; i < depth; i++)
77 if (vp9_alloc_frame_buffer(&ctx->buf[i].img,
78 width, height, subsampling_x, subsampling_y,
79 VP9_ENC_BORDER_IN_PIXELS))
84 vp9_lookahead_destroy(ctx);
88 #define USE_PARTIAL_COPY 0
90 int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
91 int64_t ts_start, int64_t ts_end, unsigned int flags,
92 unsigned char *active_map) {
93 struct lookahead_entry *buf;
95 int row, col, active_end;
96 int mb_rows = (src->y_height + 15) >> 4;
97 int mb_cols = (src->y_width + 15) >> 4;
100 if (ctx->sz + 1 > ctx->max_sz)
103 buf = pop(ctx, &ctx->write_idx);
106 // TODO(jkoleszar): This is disabled for now, as
107 // vp9_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
109 // Only do this partial copy if the following conditions are all met:
110 // 1. Lookahead queue has has size of 1.
111 // 2. Active map is provided.
112 // 3. This is not a key frame, golden nor altref frame.
113 if (ctx->max_sz == 1 && active_map && !flags) {
114 for (row = 0; row < mb_rows; ++row) {
118 // Find the first active macroblock in this row.
119 for (; col < mb_cols; ++col) {
124 // No more active macroblock in this row.
128 // Find the end of active region in this row.
131 for (; active_end < mb_cols; ++active_end) {
132 if (!active_map[active_end])
136 // Only copy this active region.
137 vp9_copy_and_extend_frame_with_rect(src, &buf->img,
140 (active_end - col) << 4);
142 // Start again from the end of this active region.
146 active_map += mb_cols;
149 vp9_copy_and_extend_frame(src, &buf->img);
152 // Partial copy not implemented yet
153 vp9_copy_and_extend_frame(src, &buf->img);
156 buf->ts_start = ts_start;
157 buf->ts_end = ts_end;
163 struct lookahead_entry * vp9_lookahead_pop(struct lookahead_ctx *ctx,
165 struct lookahead_entry *buf = NULL;
167 if (ctx->sz && (drain || ctx->sz == ctx->max_sz)) {
168 buf = pop(ctx, &ctx->read_idx);
175 struct lookahead_entry * vp9_lookahead_peek(struct lookahead_ctx *ctx,
177 struct lookahead_entry *buf = NULL;
179 if (index < (int)ctx->sz) {
180 index += ctx->read_idx;
181 if (index >= (int)ctx->max_sz)
182 index -= ctx->max_sz;
183 buf = ctx->buf + index;
188 unsigned int vp9_lookahead_depth(struct lookahead_ctx *ctx) {