# For this we import the 'cpufeatures' module from the NDK sources.
# libvpx can also be configured without this runtime detection method.
# Configuring with --disable-runtime-cpu-detect will assume presence of NEON.
-# Configuring with --disable-runtime-cpu-detect --disable-neon will remove any
-# NEON dependency.
+# Configuring with --disable-runtime-cpu-detect --disable-neon \
+# --disable-neon-asm
+# will remove any NEON dependency.
# To change to building armeabi, run ./libvpx/configure again, but with
# --target=arm5te-android-gcc and modify the Application.mk file to
local makefile=$2
shift 2
for cfg; do
- upname="`toupper $cfg`"
if enabled $cfg; then
+ upname="`toupper $cfg`"
echo "${prefix}_${upname}=yes" >> $makefile
fi
done
unistd_h
"
EXPERIMENT_LIST="
- multiple_arf
alpha
+ multiple_arf
+ spatial_svc
"
CONFIG_LIST="
external_build
endif
vpxenc.GUID = 548DEC74-7A15-4B2B-AFC3-AA102E7C25C1
vpxenc.DESCRIPTION = Full featured encoder
-EXAMPLES-$(CONFIG_VP9_ENCODER) += vp9_spatial_svc_encoder.c
-vp9_spatial_svc_encoder.SRCS += args.c args.h
-vp9_spatial_svc_encoder.SRCS += ivfenc.c ivfenc.h
-vp9_spatial_svc_encoder.SRCS += tools_common.c tools_common.h
-vp9_spatial_svc_encoder.SRCS += video_common.h
-vp9_spatial_svc_encoder.SRCS += video_writer.h video_writer.c
-vp9_spatial_svc_encoder.SRCS += vpxstats.c vpxstats.h
-vp9_spatial_svc_encoder.GUID = 4A38598D-627D-4505-9C7B-D4020C84100D
-vp9_spatial_svc_encoder.DESCRIPTION = VP9 Spatial SVC Encoder
+ifeq ($(CONFIG_SPATIAL_SVC),yes)
+ EXAMPLES-$(CONFIG_VP9_ENCODER) += vp9_spatial_svc_encoder.c
+ vp9_spatial_svc_encoder.SRCS += args.c args.h
+ vp9_spatial_svc_encoder.SRCS += ivfenc.c ivfenc.h
+ vp9_spatial_svc_encoder.SRCS += tools_common.c tools_common.h
+ vp9_spatial_svc_encoder.SRCS += video_common.h
+ vp9_spatial_svc_encoder.SRCS += video_writer.h video_writer.c
+ vp9_spatial_svc_encoder.SRCS += vpxstats.c vpxstats.h
+ vp9_spatial_svc_encoder.GUID = 4A38598D-627D-4505-9C7B-D4020C84100D
+ vp9_spatial_svc_encoder.DESCRIPTION = VP9 Spatial SVC Encoder
+endif
ifneq ($(CONFIG_SHARED),yes)
EXAMPLES-$(CONFIG_VP9_ENCODER) += resize_util.c
static void set_roi_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
- vpx_roi_map_t roi = {0};
+ vpx_roi_map_t roi;
+ memset(&roi, 0, sizeof(roi));
roi.rows = (cfg->g_h + 15) / 16;
roi.cols = (cfg->g_w + 15) / 16;
static void set_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
unsigned int i;
- vpx_active_map_t map = {0};
+ vpx_active_map_t map = {0, 0, 0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
static void unset_active_map(const vpx_codec_enc_cfg_t *cfg,
vpx_codec_ctx_t *codec) {
- vpx_active_map_t map = {0};
+ vpx_active_map_t map = {0, 0, 0};
map.rows = (cfg->g_h + 15) / 16;
map.cols = (cfg->g_w + 15) / 16;
int main(int argc, char **argv) {
FILE *infile = NULL;
- vpx_codec_ctx_t codec = {0};
- vpx_codec_enc_cfg_t cfg = {0};
+ vpx_codec_ctx_t codec;
+ vpx_codec_enc_cfg_t cfg;
int frame_count = 0;
- vpx_image_t raw = {0};
+ vpx_image_t raw;
vpx_codec_err_t res;
- VpxVideoInfo info = {0};
+ VpxVideoInfo info;
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
const int fps = 2; // TODO(dkovalev) add command line argument
const double bits_per_pixel_per_frame = 0.067;
exec_name = argv[0];
-
if (argc != 6)
die("Invalid number of arguments");
+ memset(&info, 0, sizeof(info));
+
encoder = get_vpx_encoder_by_name(argv[1]);
if (!encoder)
die("Unsupported codec.");
CODEC_EXPORTS-yes += $(addprefix $(VP9_PREFIX),$(VP9_CX_EXPORTS))
CODEC_SRCS-yes += $(VP9_PREFIX)vp9cx.mk vpx/vp8.h vpx/vp8cx.h
INSTALL-LIBS-yes += include/vpx/vp8.h include/vpx/vp8cx.h
- INSTALL-LIBS-yes += include/vpx/svc_context.h
+ INSTALL-LIBS-$(CONFIG_SPATIAL_SVC) += include/vpx/svc_context.h
INSTALL_MAPS += include/vpx/% $(SRC_PATH_BARE)/$(VP9_PREFIX)/%
CODEC_DOC_SRCS += vpx/vp8.h vpx/vp8cx.h
CODEC_DOC_SECTIONS += vp9 vp9_encoder
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
const InterpKernel *filters =
vp9_get_interp_kernel(static_cast<INTERP_FILTER>(filter_bank));
+ const InterpKernel *const eighttap_smooth =
+ vp9_get_interp_kernel(EIGHTTAP_SMOOTH);
+
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
filter_block2d_8_c(in, kInputStride,
ref, kOutputStride,
Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
+ if (filters == eighttap_smooth || (filter_x && filter_y))
REGISTER_STATE_CHECK(
UUT_->hv8_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
const InterpKernel *filters =
vp9_get_interp_kernel(static_cast<INTERP_FILTER>(filter_bank));
+ const InterpKernel *const eighttap_smooth =
+ vp9_get_interp_kernel(EIGHTTAP_SMOOTH);
for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
ref, kOutputStride,
Width(), Height());
- if (filters == vp9_sub_pel_filters_8lp || (filter_x && filter_y))
+ if (filters == eighttap_smooth || (filter_x && filter_y))
REGISTER_STATE_CHECK(
UUT_->hv8_avg_(in, kInputStride, out, kOutputStride,
filters[filter_x], 16, filters[filter_y], 16,
TEST_P(ConvolveTest, CheckScalingFiltering) {
uint8_t* const in = input();
uint8_t* const out = output();
+ const InterpKernel *const eighttap = vp9_get_interp_kernel(EIGHTTAP);
SetConstantInput(127);
for (int step = 1; step <= 32; ++step) {
/* Test the horizontal and vertical filters in combination. */
REGISTER_STATE_CHECK(UUT_->hv8_(in, kInputStride, out, kOutputStride,
- vp9_sub_pel_filters_8[frac], step,
- vp9_sub_pel_filters_8[frac], step,
+ eighttap[frac], step,
+ eighttap[frac], step,
Width(), Height()));
CheckGuardBlocks();
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += dct32x32_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct4x4_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct8x8_test.cc
-LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += svc_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += variance_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_subtract_test.cc
+ifeq ($(CONFIG_VP9_ENCODER),yes)
+LIBVPX_TEST_SRCS-$(CONFIG_SPATIAL_SVC) += svc_test.cc
+endif
+
endif # VP9
LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += sad_test.cc
# functions and are run unconditionally. Functions in positional parameter two
# are run according to the rules specified in vpx_test_usage().
run_tests() {
- env_tests="verify_vpx_test_environment ${1}"
- tests_to_filter="${2}"
+ local env_tests="verify_vpx_test_environment $1"
+ local tests_to_filter="$2"
+ local test_name="${VPX_TEST_NAME}"
+
+ if [ -z "${test_name}" ]; then
+ test_name="$(basename \"${0%.*}\")"
+ fi
if [ "${VPX_TEST_RUN_DISABLED_TESTS}" != "yes" ]; then
# Filter out DISABLED tests.
tests_to_filter=$(filter_strings "${tests_to_filter}" ${VPX_TEST_FILTER})
fi
- tests_to_run="${env_tests} ${tests_to_filter}"
+ local tests_to_run="${env_tests} ${tests_to_filter}"
check_git_hashes
test_end "${test}"
done
- tested_config="$(test_configuration_target) @ $(current_hash)"
- echo $(basename "${0%.*}"): Done, all tests pass for ${tested_config}.
+ local tested_config="$(test_configuration_target) @ $(current_hash)"
+ echo "${test_name}: Done, all tests pass for ${tested_config}."
}
vpx_test_usage() {
--- /dev/null
+#!/bin/sh
+##
+## Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+## This file tests the libvpx vp9_spatial_svc_encoder example. To add new
+## tests to to this file, do the following:
+## 1. Write a shell function (this is your test).
+## 2. Add the function to vp9_spatial_svc_tests (on a new line).
+##
+. $(dirname $0)/tools_common.sh
+
+# Environment check: $YUV_RAW_INPUT is required.
+vp9_spatial_svc_encoder_verify_environment() {
+ if [ ! -e "${YUV_RAW_INPUT}" ]; then
+ echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
+ return 1
+ fi
+}
+
+# Runs vp9_spatial_svc_encoder. $1 is the test name.
+vp9_spatial_svc_encoder() {
+ local encoder="${LIBVPX_BIN_PATH}/vp9_spatial_svc_encoder"
+ encoder="${encoder}${VPX_TEST_EXE_SUFFIX}"
+ local test_name="$1"
+ local output_file="${VPX_TEST_OUTPUT_DIR}/vp9_ssvc_encoder${test_name}.ivf"
+ local frames_to_encode="10"
+ local max_kf="9999"
+
+ shift
+
+ [ -x "${encoder}" ] || return 1
+
+ eval "${encoder}" -w "${YUV_RAW_INPUT_WIDTH}" -h "${YUV_RAW_INPUT_HEIGHT}" \
+ -k "${max_kf}" -f "${frames_to_encode}" "$@" "${YUV_RAW_INPUT}" \
+ "${output_file}" \
+ ${devnull}
+
+ [ -e "${output_file}" ] || return 1
+}
+
+# Each mode is run with layer count 1-$vp9_ssvc_test_layers.
+vp9_ssvc_test_layers=5
+
+vp9_spatial_svc_mode_i() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ local test_name="${FUNCNAME}"
+ for layers in $(seq 1 ${vp9_ssvc_test_layers}); do
+ vp9_spatial_svc_encoder "${test_name}" -m i -l ${layers}
+ done
+ fi
+}
+
+vp9_spatial_svc_mode_altip() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ local test_name="${FUNCNAME}"
+ for layers in $(seq 1 ${vp9_ssvc_test_layers}); do
+ vp9_spatial_svc_encoder "${test_name}" -m "alt-ip" -l ${layers}
+ done
+ fi
+}
+
+vp9_spatial_svc_mode_ip() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ local test_name="${FUNCNAME}"
+ vp9_spatial_svc_encoder "${test_name}" -m ip -l 1
+ fi
+}
+
+vp9_spatial_svc_mode_gf() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ local test_name="${FUNCNAME}"
+ for layers in $(seq 1 ${vp9_ssvc_test_layers}); do
+ vp9_spatial_svc_encoder "${test_name}" -m gf -l ${layers}
+ done
+ fi
+}
+
+vp9_spatial_svc_tests="vp9_spatial_svc_mode_i
+ vp9_spatial_svc_mode_altip
+ vp9_spatial_svc_mode_ip
+ vp9_spatial_svc_mode_gf"
+
+run_tests vp9_spatial_svc_encoder_verify_environment "${vp9_spatial_svc_tests}"
--- /dev/null
+#!/bin/sh
+##
+## Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+##
+## Use of this source code is governed by a BSD-style license
+## that can be found in the LICENSE file in the root of the source
+## tree. An additional intellectual property rights grant can be found
+## in the file PATENTS. All contributing project authors may
+## be found in the AUTHORS file in the root of the source tree.
+##
+## This file tests the libvpx vpx_temporal_svc_encoder example. To add new
+## tests to this file, do the following:
+## 1. Write a shell function (this is your test).
+## 2. Add the function to vpx_tsvc_encoder_tests (on a new line).
+##
+. $(dirname $0)/tools_common.sh
+
+# Environment check: $YUV_RAW_INPUT is required.
+vpx_tsvc_encoder_verify_environment() {
+ if [ ! -e "${YUV_RAW_INPUT}" ]; then
+ echo "Libvpx test data must exist in LIBVPX_TEST_DATA_PATH."
+ return 1
+ fi
+}
+
+# Runs vpx_temporal_svc_encoder using the codec specified by $1 and output file
+# name by $2. Additional positional parameters are passed directly to
+# vpx_temporal_svc_encoder.
+vpx_tsvc_encoder() {
+ local encoder="${LIBVPX_BIN_PATH}/vpx_temporal_svc_encoder"
+ encoder="${encoder}${VPX_TEST_EXE_SUFFIX}"
+ local codec="$1"
+ local output_file_base="$2"
+ local output_file="${VPX_TEST_OUTPUT_DIR}/${output_file_base}"
+ local timebase_num="1"
+ local timebase_den="1000"
+ local speed="6"
+ local frame_drop_thresh="30"
+
+ shift 2
+
+ [ -x "${encoder}" ] || return 1
+
+ eval "${encoder}" "${YUV_RAW_INPUT}" "${output_file}" "${codec}" \
+ "${YUV_RAW_INPUT_WIDTH}" "${YUV_RAW_INPUT_HEIGHT}" \
+ "${timebase_num}" "${timebase_den}" "${speed}" "${frame_drop_thresh}" \
+ "$@" \
+ ${devnull}
+}
+
+# Confirms that all expected output files exist given the output file name
+# passed to vpx_temporal_svc_encoder.
+# The file name passed to vpx_temporal_svc_encoder is joined with the stream
+# number and the extension .ivf to produce per stream output files. Here $1 is
+# file name, and $2 is expected number of files.
+files_exist() {
+ local file_name="${VPX_TEST_OUTPUT_DIR}/$1"
+ local num_files="$(($2 - 1))"
+ for stream_num in $(seq 0 ${num_files}); do
+ [ -e "${file_name}_${stream_num}.ivf" ] || return 1
+ done
+}
+
+# Run vpx_temporal_svc_encoder in all supported modes for vp8 and vp9.
+
+vpx_tsvc_encoder_vp8_mode_0() {
+ if [ "$(vp8_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp8 "${FUNCNAME}" 0 200 || return 1
+ # Mode 0 produces 1 stream
+ files_exist "${FUNCNAME}" 1 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp8_mode_1() {
+ if [ "$(vp8_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp8 "${FUNCNAME}" 1 200 400 || return 1
+ # Mode 1 produces 2 streams
+ files_exist "${FUNCNAME}" 2 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp8_mode_2() {
+ if [ "$(vp8_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp8 "${FUNCNAME}" 2 200 400 || return 1
+ # Mode 2 produces 2 streams
+ files_exist "${FUNCNAME}" 2 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp8_mode_3() {
+ if [ "$(vp8_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp8 "${FUNCNAME}" 3 200 400 600 || return 1
+ # Mode 3 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp8_mode_4() {
+ if [ "$(vp8_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp8 "${FUNCNAME}" 4 200 400 600 || return 1
+ # Mode 4 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp8_mode_5() {
+ if [ "$(vp8_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp8 "${FUNCNAME}" 5 200 400 600 || return 1
+ # Mode 5 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp8_mode_6() {
+ if [ "$(vp8_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp8 "${FUNCNAME}" 6 200 400 600 || return 1
+ # Mode 6 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp8_mode_7() {
+ if [ "$(vp8_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp8 "${FUNCNAME}" 7 200 400 600 800 1000 || return 1
+ # Mode 7 produces 5 streams
+ files_exist "${FUNCNAME}" 5 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp8_mode_8() {
+ if [ "$(vp8_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp8 "${FUNCNAME}" 8 200 400 || return 1
+ # Mode 8 produces 2 streams
+ files_exist "${FUNCNAME}" 2 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp8_mode_9() {
+ if [ "$(vp8_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp8 "${FUNCNAME}" 9 200 400 600 || return 1
+ # Mode 9 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp8_mode_10() {
+ if [ "$(vp8_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp8 "${FUNCNAME}" 10 200 400 600 || return 1
+ # Mode 10 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp8_mode_11() {
+ if [ "$(vp8_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp8 "${FUNCNAME}" 11 200 400 600 || return 1
+ # Mode 11 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp9_mode_0() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp9 "${FUNCNAME}" 0 200 || return 1
+ # Mode 0 produces 1 stream
+ files_exist "${FUNCNAME}" 1 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp9_mode_1() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp9 "${FUNCNAME}" 1 200 400 || return 1
+ # Mode 1 produces 2 streams
+ files_exist "${FUNCNAME}" 2 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp9_mode_2() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp9 "${FUNCNAME}" 2 200 400 || return 1
+ # Mode 2 produces 2 streams
+ files_exist "${FUNCNAME}" 2 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp9_mode_3() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp9 "${FUNCNAME}" 3 200 400 600 || return 1
+ # Mode 3 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp9_mode_4() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp9 "${FUNCNAME}" 4 200 400 600 || return 1
+ # Mode 4 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp9_mode_5() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp9 "${FUNCNAME}" 5 200 400 600 || return 1
+ # Mode 5 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp9_mode_6() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp9 "${FUNCNAME}" 6 200 400 600 || return 1
+ # Mode 6 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp9_mode_7() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp9 "${FUNCNAME}" 7 200 400 600 800 1000 || return 1
+ # Mode 7 produces 5 streams
+ files_exist "${FUNCNAME}" 5 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp9_mode_8() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp9 "${FUNCNAME}" 8 200 400 || return 1
+ # Mode 8 produces 2 streams
+ files_exist "${FUNCNAME}" 2 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp9_mode_9() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp9 "${FUNCNAME}" 9 200 400 600 || return 1
+ # Mode 9 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp9_mode_10() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp9 "${FUNCNAME}" 10 200 400 600 || return 1
+ # Mode 10 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_vp9_mode_11() {
+ if [ "$(vp9_encode_available)" = "yes" ]; then
+ vpx_tsvc_encoder vp9 "${FUNCNAME}" 11 200 400 600 || return 1
+ # Mode 11 produces 3 streams
+ files_exist "${FUNCNAME}" 3 || return 1
+ fi
+}
+
+vpx_tsvc_encoder_tests="vpx_tsvc_encoder_vp8_mode_0
+ vpx_tsvc_encoder_vp8_mode_1
+ vpx_tsvc_encoder_vp8_mode_2
+ vpx_tsvc_encoder_vp8_mode_3
+ vpx_tsvc_encoder_vp8_mode_4
+ vpx_tsvc_encoder_vp8_mode_5
+ vpx_tsvc_encoder_vp8_mode_6
+ vpx_tsvc_encoder_vp8_mode_7
+ vpx_tsvc_encoder_vp8_mode_8
+ vpx_tsvc_encoder_vp8_mode_9
+ vpx_tsvc_encoder_vp8_mode_10
+ vpx_tsvc_encoder_vp8_mode_11
+ vpx_tsvc_encoder_vp9_mode_0
+ vpx_tsvc_encoder_vp9_mode_1
+ vpx_tsvc_encoder_vp9_mode_2
+ vpx_tsvc_encoder_vp9_mode_3
+ vpx_tsvc_encoder_vp9_mode_4
+ vpx_tsvc_encoder_vp9_mode_5
+ vpx_tsvc_encoder_vp9_mode_6
+ vpx_tsvc_encoder_vp9_mode_7
+ vpx_tsvc_encoder_vp9_mode_8
+ vpx_tsvc_encoder_vp9_mode_9
+ vpx_tsvc_encoder_vp9_mode_10
+ vpx_tsvc_encoder_vp9_mode_11"
+
+run_tests vpx_tsvc_encoder_verify_environment "${vpx_tsvc_encoder_tests}"
%assign n_arg_names %0
%endmacro
+%if ARCH_X86_64
+%macro ALLOC_STACK 2 ; stack_size, num_regs
+ %assign %%stack_aligment ((mmsize + 15) & ~15)
+ %assign stack_size_padded %1
+
+ %assign %%reg_num (%2 - 1)
+ %xdefine rsp_tmp r %+ %%reg_num
+ mov rsp_tmp, rsp
+ sub rsp, stack_size_padded
+ and rsp, ~(%%stack_aligment - 1)
+%endmacro
+
+%macro RESTORE_STACK 0 ; reset rsp register
+ mov rsp, rsp_tmp
+%endmacro
+%endif
+
%if WIN64 ; Windows x64 ;=================================================
DECLARE_REG 0, rcx, ecx, cx, cl
enum VideoFileType file_type;
uint32_t width;
uint32_t height;
- int use_i420;
+ vpx_img_fmt_t fmt;
int only_i420;
uint32_t fourcc;
struct VpxRational framerate;
#include "vpx_config.h"
#include "vp8/common/blockd.h"
-#if HAVE_NEON_ASM
-extern void vp8_dequantize_b_loop_neon(short *Q, short *DQC, short *DQ);
-#endif
-
#if HAVE_MEDIA
extern void vp8_dequantize_b_loop_v6(short *Q, short *DQC, short *DQ);
-#endif
-
-#if HAVE_NEON_ASM
-
-void vp8_dequantize_b_neon(BLOCKD *d, short *DQC)
-{
- short *DQ = d->dqcoeff;
- short *Q = d->qcoeff;
-
- vp8_dequantize_b_loop_neon(Q, DQC, DQ);
-}
-#endif
-#if HAVE_MEDIA
void vp8_dequantize_b_v6(BLOCKD *d, short *DQC)
{
short *DQ = d->dqcoeff;
#include <arm_neon.h>
-void vp8_dequantize_b_loop_neon(
- int16_t *Q,
- int16_t *DQC,
- int16_t *DQ) {
+#include "vp8/common/blockd.h"
+
+void vp8_dequantize_b_neon(BLOCKD *d, short *DQC) {
int16x8x2_t qQ, qDQC, qDQ;
- qQ = vld2q_s16(Q);
+ qQ = vld2q_s16(d->qcoeff);
qDQC = vld2q_s16(DQC);
qDQ.val[0] = vmulq_s16(qQ.val[0], qDQC.val[0]);
qDQ.val[1] = vmulq_s16(qQ.val[1], qDQC.val[1]);
- vst2q_s16(DQ, qDQ);
- return;
+ vst2q_s16(d->dqcoeff, qDQ);
}
#include "vp8/common/blockd.h"
#include "vpx_mem/vpx_mem.h"
-#if HAVE_NEON_ARM
+#if HAVE_NEON_ASM
extern void vp8_build_intra_predictors_mby_neon_func(
unsigned char *y_buffer,
unsigned char *ypred_ptr,
# Dequant
#
add_proto qw/void vp8_dequantize_b/, "struct blockd*, short *dqc";
-specialize qw/vp8_dequantize_b mmx media neon_asm/;
+specialize qw/vp8_dequantize_b mmx media neon/;
$vp8_dequantize_b_media=vp8_dequantize_b_v6;
-$vp8_dequantize_b_neon_asm=vp8_dequantize_b_neon;
add_proto qw/void vp8_dequant_idct_add/, "short *input, short *dq, unsigned char *output, int stride";
specialize qw/vp8_dequant_idct_add mmx media neon dspr2/;
# Denoiser filter
#
if (vpx_config("CONFIG_TEMPORAL_DENOISING") eq "yes") {
- add_proto qw/int vp8_denoiser_filter/, "unsigned char *mc_running_avg_y, int mc_avg_y_stride, unsigned char *running_avg_y, int avg_y_stride, unsigned char *sig, int sig_stride, unsigned int motion_magnitude";
+ add_proto qw/int vp8_denoiser_filter/, "unsigned char *mc_running_avg_y, int mc_avg_y_stride, unsigned char *running_avg_y, int avg_y_stride, unsigned char *sig, int sig_stride, unsigned int motion_magnitude, int increase_denoising";
specialize qw/vp8_denoiser_filter sse2 neon/;
}
movsxd rdx, dword ptr arg(3) ;[recon_stride]
; Row 1
- movq mm0, [rax] ; Copy eight bytes to mm0
- movq mm1, [rbx] ; Copy eight bytes to mm1
+ movd mm0, [rax] ; Copy four bytes to mm0
+ movd mm1, [rbx] ; Copy four bytes to mm1
punpcklbw mm0, mm6 ; unpack to higher prrcision
punpcklbw mm1, mm6
psubsw mm0, mm1 ; A-B (low order) to MM0
pmaddwd mm0, mm0 ; square and accumulate
add rbx,rdx ; Inc pointer into ref data
add rax,rcx ; Inc pointer into the new data
- movq mm1, [rbx] ; Copy eight bytes to mm1
+ movd mm1, [rbx] ; Copy four bytes to mm1
paddd mm7, mm0 ; accumulate in mm7
; Row 2
- movq mm0, [rax] ; Copy eight bytes to mm0
+ movd mm0, [rax] ; Copy four bytes to mm0
punpcklbw mm0, mm6 ; unpack to higher prrcision
punpcklbw mm1, mm6
psubsw mm0, mm1 ; A-B (low order) to MM0
pmaddwd mm0, mm0 ; square and accumulate
add rbx,rdx ; Inc pointer into ref data
add rax,rcx ; Inc pointer into the new data
- movq mm1, [rbx] ; Copy eight bytes to mm1
+ movd mm1, [rbx] ; Copy four bytes to mm1
paddd mm7, mm0 ; accumulate in mm7
; Row 3
- movq mm0, [rax] ; Copy eight bytes to mm0
- punpcklbw mm0, mm6 ; unpack to higher prrcision
+ movd mm0, [rax] ; Copy four bytes to mm0
+ punpcklbw mm0, mm6 ; unpack to higher precision
punpcklbw mm1, mm6
psubsw mm0, mm1 ; A-B (low order) to MM0
paddw mm5, mm0 ; accumulate differences in mm5
pmaddwd mm0, mm0 ; square and accumulate
add rbx,rdx ; Inc pointer into ref data
add rax,rcx ; Inc pointer into the new data
- movq mm1, [rbx] ; Copy eight bytes to mm1
+ movd mm1, [rbx] ; Copy four bytes to mm1
paddd mm7, mm0 ; accumulate in mm7
; Row 4
- movq mm0, [rax] ; Copy eight bytes to mm0
+ movd mm0, [rax] ; Copy four bytes to mm0
punpcklbw mm0, mm6 ; unpack to higher prrcision
punpcklbw mm1, mm6
unsigned char *running_avg_y,
int running_avg_y_stride,
unsigned char *sig, int sig_stride,
- unsigned int motion_magnitude) {
+ unsigned int motion_magnitude,
+ int increase_denoising) {
/* If motion_magnitude is small, making the denoiser more aggressive by
* increasing the adjustment for each level, level1 adjustment is
* increased, the deltas stay the same.
int optimize;
int q_index;
+ int increase_denoising;
#if CONFIG_TEMPORAL_DENOISING
MB_PREDICTION_MODE best_sse_inter_mode;
*/
static const unsigned int SSE_DIFF_THRESHOLD = 16 * 16 * 20;
static const unsigned int SSE_THRESHOLD = 16 * 16 * 40;
+static const unsigned int SSE_THRESHOLD_HIGH = 16 * 16 * 60;
/*
* The filter function was modified to reduce the computational complexity.
int vp8_denoiser_filter_c(unsigned char *mc_running_avg_y, int mc_avg_y_stride,
unsigned char *running_avg_y, int avg_y_stride,
unsigned char *sig, int sig_stride,
- unsigned int motion_magnitude)
+ unsigned int motion_magnitude,
+ int increase_denoising)
{
unsigned char *running_avg_y_start = running_avg_y;
unsigned char *sig_start = sig;
- int r, c, i;
+ int sum_diff_thresh;
+ int r, c;
int sum_diff = 0;
int adj_val[3] = {3, 4, 6};
-
+ int shift_inc1 = 0;
+ int shift_inc2 = 1;
/* If motion_magnitude is small, making the denoiser more aggressive by
- * increasing the adjustment for each level. */
+ * increasing the adjustment for each level. Add another increment for
+ * blocks that are labeled for increase denoising. */
if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
{
- for (i = 0; i < 3; i++)
- adj_val[i] += 1;
+ if (increase_denoising) {
+ shift_inc1 = 1;
+ shift_inc2 = 2;
+ }
+ adj_val[0] += shift_inc2;
+ adj_val[1] += shift_inc2;
+ adj_val[2] += shift_inc2;
}
for (r = 0; r < 16; ++r)
diff = mc_running_avg_y[c] - sig[c];
absdiff = abs(diff);
- /* When |diff| < 4, use pixel value from last denoised raw. */
- if (absdiff <= 3)
+ // When |diff| <= |3 + shift_inc1|, use pixel value from
+ // last denoised raw.
+ if (absdiff <= 3 + shift_inc1)
{
running_avg_y[c] = mc_running_avg_y[c];
sum_diff += diff;
running_avg_y += avg_y_stride;
}
- if (abs(sum_diff) > SUM_DIFF_THRESHOLD)
+ sum_diff_thresh= SUM_DIFF_THRESHOLD;
+ if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH;
+ if (abs(sum_diff) > sum_diff_thresh)
return COPY_BLOCK;
vp8_copy_mem16x16(running_avg_y_start, avg_y_stride, sig_start, sig_stride);
int mv_row;
int mv_col;
unsigned int motion_magnitude2;
-
+ unsigned int sse_thresh;
MV_REFERENCE_FRAME frame = x->best_reference_frame;
MV_REFERENCE_FRAME zero_frame = x->best_zeromv_reference_frame;
mv_row = x->best_sse_mv.as_mv.row;
mv_col = x->best_sse_mv.as_mv.col;
motion_magnitude2 = mv_row * mv_row + mv_col * mv_col;
- if (best_sse > SSE_THRESHOLD || motion_magnitude2
+ sse_thresh = SSE_THRESHOLD;
+ if (x->increase_denoising) sse_thresh = SSE_THRESHOLD_HIGH;
+
+ if (best_sse > sse_thresh || motion_magnitude2
> 8 * NOISE_MOTION_THRESHOLD)
{
decision = COPY_BLOCK;
/* Filter. */
decision = vp8_denoiser_filter(mc_running_avg_y, mc_avg_y_stride,
running_avg_y, avg_y_stride,
- x->thismb, 16, motion_magnitude2);
+ x->thismb, 16, motion_magnitude2,
+ x->increase_denoising);
}
if (decision == COPY_BLOCK)
{
#endif
#define SUM_DIFF_THRESHOLD (16 * 16 * 2)
+#define SUM_DIFF_THRESHOLD_HIGH (16 * 16 * 3)
#define MOTION_MAGNITUDE_THRESHOLD (8*3)
enum vp8_denoiser_decision
x->best_reference_frame = best_mbmode.ref_frame;
best_sse = best_rd_sse;
}
+ x->increase_denoising = 0;
vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse,
recon_yoffset, recon_uvoffset);
int mc_avg_y_stride,
unsigned char *running_avg_y, int avg_y_stride,
unsigned char *sig, int sig_stride,
- unsigned int motion_magnitude)
+ unsigned int motion_magnitude,
+ int increase_denoising)
{
unsigned char *running_avg_y_start = running_avg_y;
unsigned char *sig_start = sig;
+ int sum_diff_thresh;
int r;
+ int shift_inc = (increase_denoising &&
+ motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 1 : 0;
__m128i acc_diff = _mm_setzero_si128();
const __m128i k_0 = _mm_setzero_si128();
- const __m128i k_4 = _mm_set1_epi8(4);
+ const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
const __m128i k_8 = _mm_set1_epi8(8);
const __m128i k_16 = _mm_set1_epi8(16);
/* Modify each level's adjustment according to motion_magnitude. */
const __m128i l3 = _mm_set1_epi8(
- (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 : 6);
+ (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ?
+ 7 + shift_inc : 6);
/* Difference between level 3 and level 2 is 2. */
const __m128i l32 = _mm_set1_epi8(2);
/* Difference between level 2 and level 1 is 1. */
+ s.e[6] + s.e[7] + s.e[8] + s.e[9] + s.e[10] + s.e[11]
+ s.e[12] + s.e[13] + s.e[14] + s.e[15];
- if (abs(sum_diff) > SUM_DIFF_THRESHOLD)
+ sum_diff_thresh = SUM_DIFF_THRESHOLD;
+ if (increase_denoising) sum_diff_thresh = SUM_DIFF_THRESHOLD_HIGH;
+ if (abs(sum_diff) > sum_diff_thresh)
{
return COPY_BLOCK;
}
VP8_COMMON_SRCS-$(HAVE_MEDIA) += common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6$(ASM)
# common (neon)
-VP8_COMMON_SRCS-$(ARCH_NEON_ASM) += common/arm/reconintra_arm.c
+#VP8_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/reconintra_arm.c
VP8_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/loopfilter_neon$(ASM)
VP8_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/loopfiltersimpleverticaledge_neon$(ASM)
-VP8_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/buildintrapredictorsmby_neon$(ASM)
+#VP8_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/buildintrapredictorsmby_neon$(ASM)
VP8_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/idct_blk_neon.c
VP8_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/idct_dequant_0_2x_neon$(ASM)
VP8_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/idct_dequant_full_2x_neon$(ASM)
};
// Lagrangian interpolation filter
-const InterpKernel vp9_sub_pel_filters_8[SUBPEL_SHIFTS] = {
+DECLARE_ALIGNED(256, const InterpKernel,
+ vp9_sub_pel_filters_8[SUBPEL_SHIFTS]) = {
{ 0, 0, 0, 128, 0, 0, 0, 0},
{ 0, 1, -5, 126, 8, -3, 1, 0},
{ -1, 3, -10, 122, 18, -6, 2, 0},
};
// DCT based filter
-const InterpKernel vp9_sub_pel_filters_8s[SUBPEL_SHIFTS] = {
+DECLARE_ALIGNED(256, const InterpKernel,
+ vp9_sub_pel_filters_8s[SUBPEL_SHIFTS]) = {
{0, 0, 0, 128, 0, 0, 0, 0},
{-1, 3, -7, 127, 8, -3, 1, 0},
{-2, 5, -13, 125, 17, -6, 3, -1},
};
// freqmultiplier = 0.5
-const InterpKernel vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS] = {
+DECLARE_ALIGNED(256, const InterpKernel,
+ vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS]) = {
{ 0, 0, 0, 128, 0, 0, 0, 0},
{-3, -1, 32, 64, 38, 1, -3, 0},
{-2, -2, 29, 63, 41, 2, -3, 0},
DECLARE_ALIGNED(256, extern const InterpKernel,
vp9_bilinear_filters[SUBPEL_SHIFTS]);
-DECLARE_ALIGNED(256, extern const InterpKernel,
- vp9_sub_pel_filters_8[SUBPEL_SHIFTS]);
-DECLARE_ALIGNED(256, extern const InterpKernel,
- vp9_sub_pel_filters_8s[SUBPEL_SHIFTS]);
-DECLARE_ALIGNED(256, extern const InterpKernel,
- vp9_sub_pel_filters_8lp[SUBPEL_SHIFTS]);
// The VP9_BILINEAR_FILTERS_2TAP macro returns a pointer to the bilinear
// filter kernel as a 2 tap filter.
}
void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer,
- VP9_COMMON *cm, MACROBLOCKD *xd,
+ VP9_COMMON *cm,
+ struct macroblockd_plane planes[MAX_MB_PLANE],
int start, int stop, int y_only) {
const int num_planes = y_only ? 1 : MAX_MB_PLANE;
- const int use_420 = y_only || (xd->plane[1].subsampling_y == 1 &&
- xd->plane[1].subsampling_x == 1);
+ const int use_420 = y_only || (planes[1].subsampling_y == 1 &&
+ planes[1].subsampling_x == 1);
LOOP_FILTER_MASK lfm;
int mi_row, mi_col;
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
int plane;
- vp9_setup_dst_planes(xd, frame_buffer, mi_row, mi_col);
+ vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
// TODO(JBB): Make setup_mask work for non 420.
if (use_420)
for (plane = 0; plane < num_planes; ++plane) {
if (use_420)
- vp9_filter_block_plane(cm, &xd->plane[plane], mi_row, &lfm);
+ vp9_filter_block_plane(cm, &planes[plane], mi_row, &lfm);
else
- filter_block_plane_non420(cm, &xd->plane[plane], mi + mi_col,
+ filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
mi_row, mi_col);
}
}
}
end_mi_row = start_mi_row + mi_rows_to_filter;
vp9_loop_filter_frame_init(cm, frame_filter_level);
- vp9_loop_filter_rows(frame, cm, xd,
+ vp9_loop_filter_rows(frame, cm, xd->plane,
start_mi_row, end_mi_row,
y_only);
}
int vp9_loop_filter_worker(void *arg1, void *arg2) {
LFWorkerData *const lf_data = (LFWorkerData*)arg1;
(void)arg2;
- vp9_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, &lf_data->xd,
+ vp9_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
lf_data->start, lf_data->stop, lf_data->y_only);
return 1;
}
// Apply the loop filter to [start, stop) macro block rows in frame_buffer.
void vp9_loop_filter_rows(const YV12_BUFFER_CONFIG *frame_buffer,
- struct VP9Common *cm, struct macroblockd *xd,
+ struct VP9Common *cm,
+ struct macroblockd_plane planes[MAX_MB_PLANE],
int start, int stop, int y_only);
typedef struct LoopFilterWorkerData {
const YV12_BUFFER_CONFIG *frame_buffer;
struct VP9Common *cm;
- struct macroblockd xd; // TODO(jzern): most of this is unnecessary to the
- // loopfilter. the planes are necessary as their state
- // is changed during decode.
+ struct macroblockd_plane planes[MAX_MB_PLANE];
+
int start;
int stop;
int y_only;
#include "vp9/common/vp9_systemdependent.h"
#include "vp9/common/vp9_textblit.h"
-#define RGB_TO_YUV(t) \
- ( (0.257*(float)(t >> 16)) + (0.504*(float)(t >> 8 & 0xff)) + \
- (0.098*(float)(t & 0xff)) + 16), \
- (-(0.148*(float)(t >> 16)) - (0.291*(float)(t >> 8 & 0xff)) + \
- (0.439*(float)(t & 0xff)) + 128), \
- ( (0.439*(float)(t >> 16)) - (0.368*(float)(t >> 8 & 0xff)) - \
- (0.071*(float)(t & 0xff)) + 128)
-
-/* global constants */
-#if 0 && CONFIG_POSTPROC_VISUALIZER
-static const unsigned char PREDICTION_MODE_colors[MB_MODE_COUNT][3] = {
- { RGB_TO_YUV(0x98FB98) }, /* PaleGreen */
- { RGB_TO_YUV(0x00FF00) }, /* Green */
- { RGB_TO_YUV(0xADFF2F) }, /* GreenYellow */
- { RGB_TO_YUV(0x8F0000) }, /* Dark Red */
- { RGB_TO_YUV(0x008F8F) }, /* Dark Cyan */
- { RGB_TO_YUV(0x008F8F) }, /* Dark Cyan */
- { RGB_TO_YUV(0x008F8F) }, /* Dark Cyan */
- { RGB_TO_YUV(0x8F0000) }, /* Dark Red */
- { RGB_TO_YUV(0x8F0000) }, /* Dark Red */
- { RGB_TO_YUV(0x228B22) }, /* ForestGreen */
- { RGB_TO_YUV(0x006400) }, /* DarkGreen */
- { RGB_TO_YUV(0x98F5FF) }, /* Cadet Blue */
- { RGB_TO_YUV(0x6CA6CD) }, /* Sky Blue */
- { RGB_TO_YUV(0x00008B) }, /* Dark blue */
- { RGB_TO_YUV(0x551A8B) }, /* Purple */
- { RGB_TO_YUV(0xFF0000) } /* Red */
- { RGB_TO_YUV(0xCC33FF) }, /* Magenta */
-};
-
-static const unsigned char B_PREDICTION_MODE_colors[INTRA_MODES][3] = {
- { RGB_TO_YUV(0x6633ff) }, /* Purple */
- { RGB_TO_YUV(0xcc33ff) }, /* Magenta */
- { RGB_TO_YUV(0xff33cc) }, /* Pink */
- { RGB_TO_YUV(0xff3366) }, /* Coral */
- { RGB_TO_YUV(0x3366ff) }, /* Blue */
- { RGB_TO_YUV(0xed00f5) }, /* Dark Blue */
- { RGB_TO_YUV(0x2e00b8) }, /* Dark Purple */
- { RGB_TO_YUV(0xff6633) }, /* Orange */
- { RGB_TO_YUV(0x33ccff) }, /* Light Blue */
- { RGB_TO_YUV(0x8ab800) }, /* Green */
- { RGB_TO_YUV(0xffcc33) }, /* Light Orange */
- { RGB_TO_YUV(0x33ffcc) }, /* Aqua */
- { RGB_TO_YUV(0x66ff33) }, /* Light Green */
- { RGB_TO_YUV(0xccff33) }, /* Yellow */
-};
-
-static const unsigned char MV_REFERENCE_FRAME_colors[MAX_REF_FRAMES][3] = {
- { RGB_TO_YUV(0x00ff00) }, /* Blue */
- { RGB_TO_YUV(0x0000ff) }, /* Green */
- { RGB_TO_YUV(0xffff00) }, /* Yellow */
- { RGB_TO_YUV(0xff0000) }, /* Red */
-};
-#endif
-
+#if CONFIG_VP9_POSTPROC
static const short kernel5[] = {
1, 1, 4, 1, 1
};
}
}
-/* Blend the macro block with a solid colored square. Leave the
- * edges unblended to give distinction to macro blocks in areas
- * filled with the same color block.
- */
-void vp9_blend_mb_inner_c(uint8_t *y, uint8_t *u, uint8_t *v,
- int y1, int u1, int v1, int alpha, int stride) {
- int i, j;
- int y1_const = y1 * ((1 << 16) - alpha);
- int u1_const = u1 * ((1 << 16) - alpha);
- int v1_const = v1 * ((1 << 16) - alpha);
-
- y += 2 * stride + 2;
- for (i = 0; i < 12; i++) {
- for (j = 0; j < 12; j++) {
- y[j] = (y[j] * alpha + y1_const) >> 16;
- }
- y += stride;
- }
-
- stride >>= 1;
-
- u += stride + 1;
- v += stride + 1;
-
- for (i = 0; i < 6; i++) {
- for (j = 0; j < 6; j++) {
- u[j] = (u[j] * alpha + u1_const) >> 16;
- v[j] = (v[j] * alpha + v1_const) >> 16;
- }
- u += stride;
- v += stride;
- }
-}
-
-/* Blend only the edge of the macro block. Leave center
- * unblended to allow for other visualizations to be layered.
- */
-void vp9_blend_mb_outer_c(uint8_t *y, uint8_t *u, uint8_t *v,
- int y1, int u1, int v1, int alpha, int stride) {
- int i, j;
- int y1_const = y1 * ((1 << 16) - alpha);
- int u1_const = u1 * ((1 << 16) - alpha);
- int v1_const = v1 * ((1 << 16) - alpha);
-
- for (i = 0; i < 2; i++) {
- for (j = 0; j < 16; j++) {
- y[j] = (y[j] * alpha + y1_const) >> 16;
- }
- y += stride;
- }
-
- for (i = 0; i < 12; i++) {
- y[0] = (y[0] * alpha + y1_const) >> 16;
- y[1] = (y[1] * alpha + y1_const) >> 16;
- y[14] = (y[14] * alpha + y1_const) >> 16;
- y[15] = (y[15] * alpha + y1_const) >> 16;
- y += stride;
- }
-
- for (i = 0; i < 2; i++) {
- for (j = 0; j < 16; j++) {
- y[j] = (y[j] * alpha + y1_const) >> 16;
- }
- y += stride;
- }
-
- stride >>= 1;
-
- for (j = 0; j < 8; j++) {
- u[j] = (u[j] * alpha + u1_const) >> 16;
- v[j] = (v[j] * alpha + v1_const) >> 16;
- }
- u += stride;
- v += stride;
-
- for (i = 0; i < 6; i++) {
- u[0] = (u[0] * alpha + u1_const) >> 16;
- v[0] = (v[0] * alpha + v1_const) >> 16;
-
- u[7] = (u[7] * alpha + u1_const) >> 16;
- v[7] = (v[7] * alpha + v1_const) >> 16;
-
- u += stride;
- v += stride;
- }
-
- for (j = 0; j < 8; j++) {
- u[j] = (u[j] * alpha + u1_const) >> 16;
- v[j] = (v[j] * alpha + v1_const) >> 16;
- }
-}
-
-void vp9_blend_b_c(uint8_t *y, uint8_t *u, uint8_t *v,
- int y1, int u1, int v1, int alpha, int stride) {
- int i, j;
- int y1_const = y1 * ((1 << 16) - alpha);
- int u1_const = u1 * ((1 << 16) - alpha);
- int v1_const = v1 * ((1 << 16) - alpha);
-
- for (i = 0; i < 4; i++) {
- for (j = 0; j < 4; j++) {
- y[j] = (y[j] * alpha + y1_const) >> 16;
- }
- y += stride;
- }
-
- stride >>= 1;
-
- for (i = 0; i < 2; i++) {
- for (j = 0; j < 2; j++) {
- u[j] = (u[j] * alpha + u1_const) >> 16;
- v[j] = (v[j] * alpha + v1_const) >> 16;
- }
- u += stride;
- v += stride;
- }
-}
-
-static void constrain_line(int x0, int *x1, int y0, int *y1,
- int width, int height) {
- int dx;
- int dy;
-
- if (*x1 > width) {
- dx = *x1 - x0;
- dy = *y1 - y0;
-
- *x1 = width;
- if (dx)
- *y1 = ((width - x0) * dy) / dx + y0;
- }
- if (*x1 < 0) {
- dx = *x1 - x0;
- dy = *y1 - y0;
-
- *x1 = 0;
- if (dx)
- *y1 = ((0 - x0) * dy) / dx + y0;
- }
- if (*y1 > height) {
- dx = *x1 - x0;
- dy = *y1 - y0;
-
- *y1 = height;
- if (dy)
- *x1 = ((height - y0) * dx) / dy + x0;
- }
- if (*y1 < 0) {
- dx = *x1 - x0;
- dy = *y1 - y0;
-
- *y1 = 0;
- if (dy)
- *x1 = ((0 - y0) * dx) / dy + x0;
- }
-}
-
int vp9_post_proc_frame(struct VP9Common *cm,
YV12_BUFFER_CONFIG *dest, vp9_ppflags_t *ppflags) {
const int q = MIN(63, cm->lf.filter_level * 10 / 6);
ppbuf->y_width, ppbuf->y_height, ppbuf->y_stride);
}
-#if 0 && CONFIG_POSTPROC_VISUALIZER
- if (flags & VP9D_DEBUG_TXT_FRAME_INFO) {
- char message[512];
- snprintf(message, sizeof(message) -1,
- "F%1dG%1dQ%3dF%3dP%d_s%dx%d",
- (cm->frame_type == KEY_FRAME),
- cm->refresh_golden_frame,
- cm->base_qindex,
- cm->filter_level,
- flags,
- cm->mb_cols, cm->mb_rows);
- vp9_blit_text(message, ppbuf->y_buffer, ppbuf->y_stride);
- }
-
- if (flags & VP9D_DEBUG_TXT_MBLK_MODES) {
- int i, j;
- uint8_t *y_ptr;
- int mb_rows = ppbuf->y_height >> 4;
- int mb_cols = ppbuf->y_width >> 4;
- int mb_index = 0;
- MODE_INFO *mi = cm->mi;
-
- y_ptr = post->y_buffer + 4 * post->y_stride + 4;
-
- /* vp9_filter each macro block */
- for (i = 0; i < mb_rows; i++) {
- for (j = 0; j < mb_cols; j++) {
- char zz[4];
-
- snprintf(zz, sizeof(zz) - 1, "%c", mi[mb_index].mbmi.mode + 'a');
-
- vp9_blit_text(zz, y_ptr, post->y_stride);
- mb_index++;
- y_ptr += 16;
- }
-
- mb_index++; /* border */
- y_ptr += post->y_stride * 16 - post->y_width;
- }
- }
-
- if (flags & VP9D_DEBUG_TXT_DC_DIFF) {
- int i, j;
- uint8_t *y_ptr;
- int mb_rows = ppbuf->y_height >> 4;
- int mb_cols = ppbuf->y_width >> 4;
- int mb_index = 0;
- MODE_INFO *mi = cm->mi;
-
- y_ptr = post->y_buffer + 4 * post->y_stride + 4;
-
- /* vp9_filter each macro block */
- for (i = 0; i < mb_rows; i++) {
- for (j = 0; j < mb_cols; j++) {
- char zz[4];
- int dc_diff = !(mi[mb_index].mbmi.mode != I4X4_PRED &&
- mi[mb_index].mbmi.mode != SPLITMV &&
- mi[mb_index].mbmi.skip);
-
- if (cm->frame_type == KEY_FRAME)
- snprintf(zz, sizeof(zz) - 1, "a");
- else
- snprintf(zz, sizeof(zz) - 1, "%c", dc_diff + '0');
-
- vp9_blit_text(zz, y_ptr, post->y_stride);
- mb_index++;
- y_ptr += 16;
- }
-
- mb_index++; /* border */
- y_ptr += post->y_stride * 16 - post->y_width;
- }
- }
-
- if (flags & VP9D_DEBUG_TXT_RATE_INFO) {
- char message[512];
- snprintf(message, sizeof(message),
- "Bitrate: %10.2f framerate: %10.2f ",
- cm->bitrate, cm->framerate);
- vp9_blit_text(message, ppbuf->y_buffer, ppbuf->y_stride);
- }
-
- /* Draw motion vectors */
- if ((flags & VP9D_DEBUG_DRAW_MV) && ppflags->display_mv_flag) {
- int width = ppbuf->y_width;
- int height = ppbuf->y_height;
- uint8_t *y_buffer = ppbuf->y_buffer;
- int y_stride = ppbuf->y_stride;
- MODE_INFO *mi = cm->mi;
- int x0, y0;
-
- for (y0 = 0; y0 < height; y0 += 16) {
- for (x0 = 0; x0 < width; x0 += 16) {
- int x1, y1;
-
- if (!(ppflags->display_mv_flag & (1 << mi->mbmi.mode))) {
- mi++;
- continue;
- }
-
- if (mi->mbmi.mode == SPLITMV) {
- switch (mi->mbmi.partitioning) {
- case PARTITIONING_16X8 : { /* mv_top_bottom */
- union b_mode_info *bmi = &mi->bmi[0];
- MV *mv = &bmi->mv.as_mv;
-
- x1 = x0 + 8 + (mv->col >> 3);
- y1 = y0 + 4 + (mv->row >> 3);
-
- constrain_line(x0 + 8, &x1, y0 + 4, &y1, width, height);
- vp9_blit_line(x0 + 8, x1, y0 + 4, y1, y_buffer, y_stride);
-
- bmi = &mi->bmi[8];
-
- x1 = x0 + 8 + (mv->col >> 3);
- y1 = y0 + 12 + (mv->row >> 3);
-
- constrain_line(x0 + 8, &x1, y0 + 12, &y1, width, height);
- vp9_blit_line(x0 + 8, x1, y0 + 12, y1, y_buffer, y_stride);
-
- break;
- }
- case PARTITIONING_8X16 : { /* mv_left_right */
- union b_mode_info *bmi = &mi->bmi[0];
- MV *mv = &bmi->mv.as_mv;
-
- x1 = x0 + 4 + (mv->col >> 3);
- y1 = y0 + 8 + (mv->row >> 3);
-
- constrain_line(x0 + 4, &x1, y0 + 8, &y1, width, height);
- vp9_blit_line(x0 + 4, x1, y0 + 8, y1, y_buffer, y_stride);
-
- bmi = &mi->bmi[2];
-
- x1 = x0 + 12 + (mv->col >> 3);
- y1 = y0 + 8 + (mv->row >> 3);
-
- constrain_line(x0 + 12, &x1, y0 + 8, &y1, width, height);
- vp9_blit_line(x0 + 12, x1, y0 + 8, y1, y_buffer, y_stride);
-
- break;
- }
- case PARTITIONING_8X8 : { /* mv_quarters */
- union b_mode_info *bmi = &mi->bmi[0];
- MV *mv = &bmi->mv.as_mv;
-
- x1 = x0 + 4 + (mv->col >> 3);
- y1 = y0 + 4 + (mv->row >> 3);
-
- constrain_line(x0 + 4, &x1, y0 + 4, &y1, width, height);
- vp9_blit_line(x0 + 4, x1, y0 + 4, y1, y_buffer, y_stride);
-
- bmi = &mi->bmi[2];
-
- x1 = x0 + 12 + (mv->col >> 3);
- y1 = y0 + 4 + (mv->row >> 3);
-
- constrain_line(x0 + 12, &x1, y0 + 4, &y1, width, height);
- vp9_blit_line(x0 + 12, x1, y0 + 4, y1, y_buffer, y_stride);
-
- bmi = &mi->bmi[8];
-
- x1 = x0 + 4 + (mv->col >> 3);
- y1 = y0 + 12 + (mv->row >> 3);
-
- constrain_line(x0 + 4, &x1, y0 + 12, &y1, width, height);
- vp9_blit_line(x0 + 4, x1, y0 + 12, y1, y_buffer, y_stride);
-
- bmi = &mi->bmi[10];
-
- x1 = x0 + 12 + (mv->col >> 3);
- y1 = y0 + 12 + (mv->row >> 3);
-
- constrain_line(x0 + 12, &x1, y0 + 12, &y1, width, height);
- vp9_blit_line(x0 + 12, x1, y0 + 12, y1, y_buffer, y_stride);
- break;
- }
- case PARTITIONING_4X4:
- default : {
- union b_mode_info *bmi = mi->bmi;
- int bx0, by0;
-
- for (by0 = y0; by0 < (y0 + 16); by0 += 4) {
- for (bx0 = x0; bx0 < (x0 + 16); bx0 += 4) {
- MV *mv = &bmi->mv.as_mv;
-
- x1 = bx0 + 2 + (mv->col >> 3);
- y1 = by0 + 2 + (mv->row >> 3);
-
- constrain_line(bx0 + 2, &x1, by0 + 2, &y1, width, height);
- vp9_blit_line(bx0 + 2, x1, by0 + 2, y1, y_buffer, y_stride);
-
- bmi++;
- }
- }
- }
- }
- } else if (is_inter_mode(mi->mbmi.mode)) {
- MV *mv = &mi->mbmi.mv.as_mv;
- const int lx0 = x0 + 8;
- const int ly0 = y0 + 8;
-
- x1 = lx0 + (mv->col >> 3);
- y1 = ly0 + (mv->row >> 3);
-
- if (x1 != lx0 && y1 != ly0) {
- constrain_line(lx0, &x1, ly0 - 1, &y1, width, height);
- vp9_blit_line(lx0, x1, ly0 - 1, y1, y_buffer, y_stride);
-
- constrain_line(lx0, &x1, ly0 + 1, &y1, width, height);
- vp9_blit_line(lx0, x1, ly0 + 1, y1, y_buffer, y_stride);
- } else {
- vp9_blit_line(lx0, x1, ly0, y1, y_buffer, y_stride);
- }
- }
-
- mi++;
- }
- mi++;
- }
- }
-
- /* Color in block modes */
- if ((flags & VP9D_DEBUG_CLR_BLK_MODES)
- && (ppflags->display_mb_modes_flag || ppflags->display_b_modes_flag)) {
- int y, x;
- int width = ppbuf->y_width;
- int height = ppbuf->y_height;
- uint8_t *y_ptr = ppbuf->y_buffer;
- uint8_t *u_ptr = ppbuf->u_buffer;
- uint8_t *v_ptr = ppbuf->v_buffer;
- int y_stride = ppbuf->y_stride;
- MODE_INFO *mi = cm->mi;
-
- for (y = 0; y < height; y += 16) {
- for (x = 0; x < width; x += 16) {
- int Y = 0, U = 0, V = 0;
-
- if (mi->mbmi.mode == I4X4_PRED &&
- ((ppflags->display_mb_modes_flag & I4X4_PRED) ||
- ppflags->display_b_modes_flag)) {
- int by, bx;
- uint8_t *yl, *ul, *vl;
- union b_mode_info *bmi = mi->bmi;
-
- yl = y_ptr + x;
- ul = u_ptr + (x >> 1);
- vl = v_ptr + (x >> 1);
-
- for (by = 0; by < 16; by += 4) {
- for (bx = 0; bx < 16; bx += 4) {
- if ((ppflags->display_b_modes_flag & (1 << mi->mbmi.mode))
- || (ppflags->display_mb_modes_flag & I4X4_PRED)) {
- Y = B_PREDICTION_MODE_colors[bmi->as_mode][0];
- U = B_PREDICTION_MODE_colors[bmi->as_mode][1];
- V = B_PREDICTION_MODE_colors[bmi->as_mode][2];
-
- vp9_blend_b(yl + bx, ul + (bx >> 1), vl + (bx >> 1), Y, U, V,
- 0xc000, y_stride);
- }
- bmi++;
- }
-
- yl += y_stride * 4;
- ul += y_stride * 1;
- vl += y_stride * 1;
- }
- } else if (ppflags->display_mb_modes_flag & (1 << mi->mbmi.mode)) {
- Y = PREDICTION_MODE_colors[mi->mbmi.mode][0];
- U = PREDICTION_MODE_colors[mi->mbmi.mode][1];
- V = PREDICTION_MODE_colors[mi->mbmi.mode][2];
-
- vp9_blend_mb_inner(y_ptr + x, u_ptr + (x >> 1), v_ptr + (x >> 1),
- Y, U, V, 0xc000, y_stride);
- }
-
- mi++;
- }
- y_ptr += y_stride * 16;
- u_ptr += y_stride * 4;
- v_ptr += y_stride * 4;
-
- mi++;
- }
- }
-
- /* Color in frame reference blocks */
- if ((flags & VP9D_DEBUG_CLR_FRM_REF_BLKS) &&
- ppflags->display_ref_frame_flag) {
- int y, x;
- int width = ppbuf->y_width;
- int height = ppbuf->y_height;
- uint8_t *y_ptr = ppbuf->y_buffer;
- uint8_t *u_ptr = ppbuf->u_buffer;
- uint8_t *v_ptr = ppbuf->v_buffer;
- int y_stride = ppbuf->y_stride;
- MODE_INFO *mi = cm->mi;
-
- for (y = 0; y < height; y += 16) {
- for (x = 0; x < width; x += 16) {
- int Y = 0, U = 0, V = 0;
-
- if (ppflags->display_ref_frame_flag & (1 << mi->mbmi.ref_frame)) {
- Y = MV_REFERENCE_FRAME_colors[mi->mbmi.ref_frame][0];
- U = MV_REFERENCE_FRAME_colors[mi->mbmi.ref_frame][1];
- V = MV_REFERENCE_FRAME_colors[mi->mbmi.ref_frame][2];
-
- vp9_blend_mb_outer(y_ptr + x, u_ptr + (x >> 1), v_ptr + (x >> 1),
- Y, U, V, 0xc000, y_stride);
- }
-
- mi++;
- }
- y_ptr += y_stride * 16;
- u_ptr += y_stride * 4;
- v_ptr += y_stride * 4;
-
- mi++;
- }
- }
-#endif
-
*dest = *ppbuf;
/* handle problem with extending borders */
return 0;
}
+#endif
int post_proc_flag;
int deblocking_level;
int noise_level;
-#if CONFIG_POSTPROC_VISUALIZER
- int display_ref_frame_flag;
- int display_mb_modes_flag;
- int display_b_modes_flag;
- int display_mv_flag;
-#endif // CONFIG_POSTPROC_VISUALIZER
} vp9_ppflags_t;
#ifdef __cplusplus
}
}
-void vp9_setup_dst_planes(MACROBLOCKD *xd,
+void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
const YV12_BUFFER_CONFIG *src,
int mi_row, int mi_col) {
uint8_t *const buffers[4] = {src->y_buffer, src->u_buffer, src->v_buffer,
int i;
for (i = 0; i < MAX_MB_PLANE; ++i) {
- struct macroblockd_plane *const pd = &xd->plane[i];
+ struct macroblockd_plane *const pd = &planes[i];
setup_pred_plane(&pd->dst, buffers[i], strides[i], mi_row, mi_col, NULL,
pd->subsampling_x, pd->subsampling_y);
}
dst->stride = stride;
}
-void vp9_setup_dst_planes(MACROBLOCKD *xd, const YV12_BUFFER_CONFIG *src,
+void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
+ const YV12_BUFFER_CONFIG *src,
int mi_row, int mi_col);
void vp9_setup_pre_planes(MACROBLOCKD *xd, int idx,
return MIN(offset, mis);
}
-void vp9_tile_init(TileInfo *tile, const VP9_COMMON *cm, int row, int col) {
+void vp9_tile_set_row(TileInfo *tile, const VP9_COMMON *cm, int row) {
tile->mi_row_start = get_tile_offset(row, cm->mi_rows, cm->log2_tile_rows);
tile->mi_row_end = get_tile_offset(row + 1, cm->mi_rows, cm->log2_tile_rows);
+}
+
+void vp9_tile_set_col(TileInfo *tile, const VP9_COMMON *cm, int col) {
tile->mi_col_start = get_tile_offset(col, cm->mi_cols, cm->log2_tile_cols);
tile->mi_col_end = get_tile_offset(col + 1, cm->mi_cols, cm->log2_tile_cols);
}
+void vp9_tile_init(TileInfo *tile, const VP9_COMMON *cm, int row, int col) {
+ vp9_tile_set_row(tile, cm, row);
+ vp9_tile_set_col(tile, cm, col);
+}
+
void vp9_get_tile_n_bits(int mi_cols,
int *min_log2_tile_cols, int *max_log2_tile_cols) {
const int sb_cols = mi_cols_aligned_to_sb(mi_cols) >> MI_BLOCK_SIZE_LOG2;
void vp9_tile_init(TileInfo *tile, const struct VP9Common *cm,
int row, int col);
+void vp9_tile_set_row(TileInfo *tile, const struct VP9Common *cm, int row);
+void vp9_tile_set_col(TileInfo *tile, const struct VP9Common *cm, int col);
+
void vp9_get_tile_n_bits(int mi_cols,
int *min_log2_tile_cols, int *max_log2_tile_cols);
// as they are always compared to values that are in 1/8th pel units
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
- vp9_setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col);
+ vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
return &xd->mi[0]->mbmi;
}
setup_display_size(cm, rb);
}
-static void decode_tile(VP9Decoder *pbi, const TileInfo *const tile,
- int do_loopfilter_inline, vp9_reader *r) {
- const int num_threads = pbi->max_threads;
- VP9_COMMON *const cm = &pbi->common;
- int mi_row, mi_col;
- MACROBLOCKD *xd = &pbi->mb;
-
- if (do_loopfilter_inline) {
- LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
- lf_data->frame_buffer = get_frame_new_buffer(cm);
- lf_data->cm = cm;
- lf_data->xd = pbi->mb;
- lf_data->stop = 0;
- lf_data->y_only = 0;
- vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
- }
-
- for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
- mi_row += MI_BLOCK_SIZE) {
- // For a SB there are 2 left contexts, each pertaining to a MB row within
- vp9_zero(xd->left_context);
- vp9_zero(xd->left_seg_context);
- for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
- mi_col += MI_BLOCK_SIZE) {
- decode_partition(cm, xd, tile, mi_row, mi_col, r, BLOCK_64X64);
- }
-
- if (do_loopfilter_inline) {
- const int lf_start = mi_row - MI_BLOCK_SIZE;
- LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
-
- // delay the loopfilter by 1 macroblock row.
- if (lf_start < 0) continue;
-
- // decoding has completed: finish up the loop filter in this thread.
- if (mi_row + MI_BLOCK_SIZE >= tile->mi_row_end) continue;
-
- vp9_worker_sync(&pbi->lf_worker);
- lf_data->start = lf_start;
- lf_data->stop = mi_row;
- if (num_threads > 1) {
- vp9_worker_launch(&pbi->lf_worker);
- } else {
- vp9_worker_execute(&pbi->lf_worker);
- }
- }
- }
-
- if (do_loopfilter_inline) {
- LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
-
- vp9_worker_sync(&pbi->lf_worker);
- lf_data->start = lf_data->stop;
- lf_data->stop = cm->mi_rows;
- vp9_worker_execute(&pbi->lf_worker);
- }
-}
-
static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
int min_log2_tile_cols, max_log2_tile_cols, max_ones;
vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
static const uint8_t *decode_tiles(VP9Decoder *pbi,
const uint8_t *data,
- const uint8_t *data_end,
- int do_loopfilter_inline) {
+ const uint8_t *data_end) {
VP9_COMMON *const cm = &pbi->common;
const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
const int tile_cols = 1 << cm->log2_tile_cols;
const int tile_rows = 1 << cm->log2_tile_rows;
TileBuffer tile_buffers[4][1 << 6];
int tile_row, tile_col;
- const uint8_t *end = NULL;
- vp9_reader r;
+ int mi_row, mi_col;
+ TileData *tile_data = NULL;
+
+ if (cm->lf.filter_level && pbi->lf_worker.data1 == NULL) {
+ CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
+ vpx_memalign(32, sizeof(LFWorkerData)));
+ pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker;
+ if (pbi->max_threads > 1 && !vp9_worker_reset(&pbi->lf_worker)) {
+ vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
+ "Loop filter thread creation failed");
+ }
+ }
+
+ if (cm->lf.filter_level) {
+ LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+ lf_data->frame_buffer = get_frame_new_buffer(cm);
+ lf_data->cm = cm;
+ vp9_copy(lf_data->planes, pbi->mb.plane);
+ lf_data->stop = 0;
+ lf_data->y_only = 0;
+ vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
+ }
assert(tile_rows <= 4);
assert(tile_cols <= (1 << 6));
get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
- // Decode tiles using data from tile_buffers
+ if (pbi->tile_data == NULL ||
+ (tile_cols * tile_rows) != pbi->total_tiles) {
+ vpx_free(pbi->tile_data);
+ CHECK_MEM_ERROR(
+ cm,
+ pbi->tile_data,
+ vpx_malloc(tile_cols * tile_rows * (sizeof(*pbi->tile_data))));
+ pbi->total_tiles = tile_rows * tile_cols;
+ }
+
+ // Load all tile information into tile_data.
for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
- const int col = pbi->inv_tile_order ? tile_cols - tile_col - 1 : tile_col;
- const int last_tile = tile_row == tile_rows - 1 &&
- col == tile_cols - 1;
- const TileBuffer *const buf = &tile_buffers[tile_row][col];
TileInfo tile;
+ const TileBuffer *const buf = &tile_buffers[tile_row][tile_col];
+ tile_data = pbi->tile_data + tile_cols * tile_row + tile_col;
+ tile_data->cm = cm;
+ tile_data->xd = pbi->mb;
+ tile_data->xd.corrupted = 0;
+ vp9_tile_init(&tile, tile_data->cm, tile_row, tile_col);
+ setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
+ &tile_data->bit_reader, pbi->decrypt_cb,
+ pbi->decrypt_state);
+ init_macroblockd(cm, &tile_data->xd);
+ vp9_zero(tile_data->xd.dqcoeff);
+ }
+ }
- vp9_tile_init(&tile, cm, tile_row, col);
- setup_token_decoder(buf->data, data_end, buf->size, &cm->error, &r,
- pbi->decrypt_cb, pbi->decrypt_state);
- decode_tile(pbi, &tile, do_loopfilter_inline, &r);
-
- if (last_tile)
- end = vp9_reader_find_end(&r);
+ for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
+ TileInfo tile;
+ vp9_tile_set_row(&tile, cm, tile_row);
+ for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end;
+ mi_row += MI_BLOCK_SIZE) {
+ for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
+ const int col = pbi->inv_tile_order ?
+ tile_cols - tile_col - 1 : tile_col;
+ tile_data = pbi->tile_data + tile_cols * tile_row + col;
+ vp9_tile_set_col(&tile, tile_data->cm, col);
+ vp9_zero(tile_data->xd.left_context);
+ vp9_zero(tile_data->xd.left_seg_context);
+ for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
+ mi_col += MI_BLOCK_SIZE) {
+ decode_partition(tile_data->cm, &tile_data->xd, &tile, mi_row, mi_col,
+ &tile_data->bit_reader, BLOCK_64X64);
+ }
+ }
+ // Loopfilter one row.
+ if (cm->lf.filter_level) {
+ const int lf_start = mi_row - MI_BLOCK_SIZE;
+ LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+
+ // delay the loopfilter by 1 macroblock row.
+ if (lf_start < 0) continue;
+
+ // decoding has completed: finish up the loop filter in this thread.
+ if (mi_row + MI_BLOCK_SIZE >= cm->mi_rows) continue;
+
+ vp9_worker_sync(&pbi->lf_worker);
+ lf_data->start = lf_start;
+ lf_data->stop = mi_row;
+ if (pbi->max_threads > 1) {
+ vp9_worker_launch(&pbi->lf_worker);
+ } else {
+ vp9_worker_execute(&pbi->lf_worker);
+ }
+ }
}
}
- return end;
+ // Loopfilter remaining rows in the frame.
+ if (cm->lf.filter_level) {
+ LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
+ vp9_worker_sync(&pbi->lf_worker);
+ lf_data->start = lf_data->stop;
+ lf_data->stop = cm->mi_rows;
+ vp9_worker_execute(&pbi->lf_worker);
+ }
+
+ // Get last tile data.
+ tile_data = pbi->tile_data + tile_cols * tile_rows - 1;
+
+ return vp9_reader_find_end(&tile_data->bit_reader);
}
static int tile_worker_hook(void *arg1, void *arg2) {
const uint8_t *data,
const uint8_t *data_end,
uint8_t *clear_data /* buffer size MAX_VP9_HEADER_SIZE */) {
- vp9_zero(*rb);
rb->bit_offset = 0;
rb->error_handler = error_handler;
rb->error_handler_data = &pbi->common;
const uint8_t **p_data_end) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
- struct vp9_read_bit_buffer rb;
+ struct vp9_read_bit_buffer rb = { 0 };
uint8_t clear_data[MAX_VP9_HEADER_SIZE];
const size_t first_partition_size = read_uncompressed_header(pbi,
init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
const int tile_rows = 1 << cm->log2_tile_rows;
const int tile_cols = 1 << cm->log2_tile_cols;
YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
- const int do_loopfilter_inline = tile_rows == 1 && tile_cols == 1 &&
- cm->lf.filter_level;
xd->cur_buf = new_fb;
if (!first_partition_size) {
*p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end);
// If multiple threads are used to decode tiles, then we use those threads
// to do parallel loopfiltering.
- vp9_loop_filter_frame_mt(new_fb, pbi, cm, cm->lf.filter_level, 0, 0);
+ vp9_loop_filter_frame_mt(new_fb, pbi, cm, cm->lf.filter_level, 0);
} else {
- if (do_loopfilter_inline && pbi->lf_worker.data1 == NULL) {
- CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
- vpx_memalign(32, sizeof(LFWorkerData)));
- pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker;
- if (pbi->max_threads > 1 && !vp9_worker_reset(&pbi->lf_worker)) {
- vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
- "Loop filter thread creation failed");
- }
- }
- *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end,
- do_loopfilter_inline);
- if (!do_loopfilter_inline)
- vp9_loop_filter_frame(new_fb, cm, &pbi->mb, cm->lf.filter_level, 0, 0);
+ *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
}
new_fb->corrupted |= xd->corrupted;
#include "vp9/decoder/vp9_detokenize.h"
#include "vp9/decoder/vp9_dthread.h"
-void vp9_initialize_dec() {
+static void initialize_dec() {
static int init_done = 0;
if (!init_done) {
}
cm->error.setjmp = 1;
- vp9_initialize_dec();
+ initialize_dec();
vp9_rtcd();
vp9_remove_common(cm);
vp9_worker_end(&pbi->lf_worker);
vpx_free(pbi->lf_worker.data1);
+ vpx_free(pbi->tile_data);
for (i = 0; i < pbi->num_tile_workers; ++i) {
VP9Worker *const worker = &pbi->tile_workers[i];
vp9_worker_end(worker);
int64_t *time_stamp, int64_t *time_end_stamp,
vp9_ppflags_t *flags) {
int ret = -1;
+#if !CONFIG_VP9_POSTPROC
+ (void)*flags;
+#endif
if (pbi->ready_for_new_data == 1)
return ret;
- /* ie no raw frame to show!!! */
+ /* no raw frame to show!!! */
if (pbi->common.show_frame == 0)
return ret;
#if CONFIG_VP9_POSTPROC
ret = vp9_post_proc_frame(&pbi->common, sd, flags);
#else
- *sd = *pbi->common.frame_to_show;
- ret = 0;
+ *sd = *pbi->common.frame_to_show;
+ ret = 0;
#endif /*!CONFIG_POSTPROC*/
vp9_clear_system_state();
return ret;
extern "C" {
#endif
+// TODO(hkuang): combine this with TileWorkerData.
+typedef struct TileData {
+ VP9_COMMON *cm;
+ vp9_reader bit_reader;
+ DECLARE_ALIGNED(16, MACROBLOCKD, xd);
+} TileData;
+
typedef struct VP9Decoder {
DECLARE_ALIGNED(16, MACROBLOCKD, mb);
int decoded_key_frame;
VP9Worker lf_worker;
-
VP9Worker *tile_workers;
int num_tile_workers;
+ TileData *tile_data;
+ int total_tiles;
+
VP9LfSync lf_row_sync;
vpx_decrypt_cb decrypt_cb;
int inv_tile_order;
} VP9Decoder;
-void vp9_initialize_dec();
-
int vp9_receive_compressed_data(struct VP9Decoder *pbi,
size_t size, const uint8_t **dest,
int64_t time_stamp);
// Implement row loopfiltering for each thread.
static void loop_filter_rows_mt(const YV12_BUFFER_CONFIG *const frame_buffer,
- VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ VP9_COMMON *const cm,
+ struct macroblockd_plane planes[MAX_MB_PLANE],
int start, int stop, int y_only,
VP9LfSync *const lf_sync, int num_lf_workers) {
const int num_planes = y_only ? 1 : MAX_MB_PLANE;
sync_read(lf_sync, r, c);
- vp9_setup_dst_planes(xd, frame_buffer, mi_row, mi_col);
+ vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
vp9_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
for (plane = 0; plane < num_planes; ++plane) {
- vp9_filter_block_plane(cm, &xd->plane[plane], mi_row, &lfm);
+ vp9_filter_block_plane(cm, &planes[plane], mi_row, &lfm);
}
sync_write(lf_sync, r, c, sb_cols);
TileWorkerData *const tile_data = (TileWorkerData*)arg1;
LFWorkerData *const lf_data = &tile_data->lfdata;
- loop_filter_rows_mt(lf_data->frame_buffer, lf_data->cm, &lf_data->xd,
+ loop_filter_rows_mt(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
lf_data->start, lf_data->stop, lf_data->y_only,
lf_data->lf_sync, lf_data->num_lf_workers);
return 1;
void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame,
VP9Decoder *pbi, VP9_COMMON *cm,
int frame_filter_level,
- int y_only, int partial_frame) {
+ int y_only) {
VP9LfSync *const lf_sync = &pbi->lf_row_sync;
// Number of superblock rows and cols
const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
// Loopfilter data
lf_data->frame_buffer = frame;
lf_data->cm = cm;
- lf_data->xd = pbi->mb;
+ vp9_copy(lf_data->planes, pbi->mb.plane);
lf_data->start = i;
lf_data->stop = sb_rows;
lf_data->y_only = y_only; // always do all planes in decoder
struct VP9Decoder *pbi,
struct VP9Common *cm,
int frame_filter_level,
- int y_only, int partial_frame);
+ int y_only);
#endif // VP9_DECODER_VP9_DTHREAD_H_
extern "C" {
#endif
-// Structure to hold snapshot of coding context during the mode picking process
-typedef struct {
- MODE_INFO mic;
- uint8_t *zcoeff_blk;
- int16_t *coeff[MAX_MB_PLANE][3];
- int16_t *qcoeff[MAX_MB_PLANE][3];
- int16_t *dqcoeff[MAX_MB_PLANE][3];
- uint16_t *eobs[MAX_MB_PLANE][3];
-
- // dual buffer pointers, 0: in use, 1: best in store
- int16_t *coeff_pbuf[MAX_MB_PLANE][3];
- int16_t *qcoeff_pbuf[MAX_MB_PLANE][3];
- int16_t *dqcoeff_pbuf[MAX_MB_PLANE][3];
- uint16_t *eobs_pbuf[MAX_MB_PLANE][3];
-
- int is_coded;
- int num_4x4_blk;
- int skip;
- int_mv best_ref_mv[2];
- int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
- int rate;
- int distortion;
- int best_mode_index;
- int rddiv;
- int rdmult;
- int hybrid_pred_diff;
- int comp_pred_diff;
- int single_pred_diff;
- int64_t tx_rd_diff[TX_MODES];
- int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
-
- // motion vector cache for adaptive motion search control in partition
- // search loop
- int_mv pred_mv[MAX_REF_FRAMES];
- INTERP_FILTER pred_interp_filter;
-} PICK_MODE_CONTEXT;
-
struct macroblock_plane {
DECLARE_ALIGNED(16, int16_t, src_diff[64 * 64]);
int16_t *qcoeff;
// Zbin Over Quant value
int16_t zbin_extra;
};
-typedef struct PC_TREE {
- int index;
- PARTITION_TYPE partitioning;
- BLOCK_SIZE block_size;
- PICK_MODE_CONTEXT none;
- PICK_MODE_CONTEXT horizontal[2];
- PICK_MODE_CONTEXT vertical[2];
- union {
- struct PC_TREE *split[4];
- PICK_MODE_CONTEXT *leaf_split[4];
- };
-} PC_TREE;
/* The [2] dimension is for whether we skip the EOB node (i.e. if previous
* coefficient in this block was zero) or not. */
// Used to store sub partition's choices.
int_mv pred_mv[MAX_REF_FRAMES];
- PICK_MODE_CONTEXT *leaf_tree;
- PC_TREE *pc_tree;
- PC_TREE *pc_root;
- int partition_cost[PARTITION_CONTEXTS][PARTITION_TYPES];
-
void (*fwd_txm4x4)(const int16_t *input, int16_t *output, int stride);
};
-
#ifdef __cplusplus
} // extern "C"
#endif
*/
#include "vp9/encoder/vp9_context_tree.h"
+#include "vp9/encoder/vp9_encoder.h"
static const BLOCK_SIZE square[] = {
BLOCK_8X8,
// partition level. There are contexts for none, horizontal, vertical, and
// split. Along with a block_size value and a selected block_size which
// represents the state of our search.
-void vp9_setup_pc_tree(VP9_COMMON *cm, MACROBLOCK *x) {
+void vp9_setup_pc_tree(VP9_COMMON *cm, VP9_COMP *cpi) {
int i, j;
const int leaf_nodes = 64;
const int tree_nodes = 64 + 16 + 4 + 1;
int square_index = 1;
int nodes;
- vpx_free(x->leaf_tree);
- CHECK_MEM_ERROR(cm, x->leaf_tree, vpx_calloc(leaf_nodes,
- sizeof(*x->leaf_tree)));
- vpx_free(x->pc_tree);
- CHECK_MEM_ERROR(cm, x->pc_tree, vpx_calloc(tree_nodes, sizeof(*x->pc_tree)));
+ vpx_free(cpi->leaf_tree);
+ CHECK_MEM_ERROR(cm, cpi->leaf_tree, vpx_calloc(leaf_nodes,
+ sizeof(*cpi->leaf_tree)));
+ vpx_free(cpi->pc_tree);
+ CHECK_MEM_ERROR(cm, cpi->pc_tree, vpx_calloc(tree_nodes,
+ sizeof(*cpi->pc_tree)));
- this_pc = &x->pc_tree[0];
- this_leaf = &x->leaf_tree[0];
+ this_pc = &cpi->pc_tree[0];
+ this_leaf = &cpi->leaf_tree[0];
// 4x4 blocks smaller than 8x8 but in the same 8x8 block share the same
// context so we only need to allocate 1 for each 8x8 block.
for (i = 0; i < leaf_nodes; ++i)
- alloc_mode_context(cm, 1, &x->leaf_tree[i]);
+ alloc_mode_context(cm, 1, &cpi->leaf_tree[i]);
// Sets up all the leaf nodes in the tree.
for (pc_tree_index = 0; pc_tree_index < leaf_nodes; ++pc_tree_index) {
- PC_TREE *const tree = &x->pc_tree[pc_tree_index];
+ PC_TREE *const tree = &cpi->pc_tree[pc_tree_index];
tree->block_size = square[0];
alloc_tree_contexts(cm, tree, 4);
tree->leaf_split[0] = this_leaf++;
// from leafs to the root.
for (nodes = 16; nodes > 0; nodes >>= 2) {
for (i = 0; i < nodes; ++i) {
- PC_TREE *const tree = &x->pc_tree[pc_tree_index];
+ PC_TREE *const tree = &cpi->pc_tree[pc_tree_index];
alloc_tree_contexts(cm, tree, 4 << (2 * square_index));
tree->block_size = square[square_index];
for (j = 0; j < 4; j++)
}
++square_index;
}
- x->pc_root = &x->pc_tree[tree_nodes - 1];
- x->pc_root[0].none.best_mode_index = 2;
+ cpi->pc_root = &cpi->pc_tree[tree_nodes - 1];
+ cpi->pc_root[0].none.best_mode_index = 2;
}
-void vp9_free_pc_tree(MACROBLOCK *x) {
+void vp9_free_pc_tree(VP9_COMP *cpi) {
const int tree_nodes = 64 + 16 + 4 + 1;
int i;
// Set up all 4x4 mode contexts
for (i = 0; i < 64; ++i)
- free_mode_context(&x->leaf_tree[i]);
+ free_mode_context(&cpi->leaf_tree[i]);
// Sets up all the leaf nodes in the tree.
for (i = 0; i < tree_nodes; ++i)
- free_tree_contexts(&x->pc_tree[i]);
+ free_tree_contexts(&cpi->pc_tree[i]);
- vpx_free(x->pc_tree);
- x->pc_tree = NULL;
- vpx_free(x->leaf_tree);
- x->leaf_tree = NULL;
+ vpx_free(cpi->pc_tree);
+ cpi->pc_tree = NULL;
+ vpx_free(cpi->leaf_tree);
+ cpi->leaf_tree = NULL;
}
#ifndef VP9_ENCODER_VP9_CONTEXT_TREE_H_
#define VP9_ENCODER_VP9_CONTEXT_TREE_H_
-#include "vp9/encoder/vp9_encoder.h"
+#include "vp9/common/vp9_onyxc_int.h"
-void vp9_setup_pc_tree(VP9_COMMON *cm, MACROBLOCK *x);
-void vp9_free_pc_tree(MACROBLOCK *x);
+struct VP9_COMP;
+
+// Structure to hold snapshot of coding context during the mode picking process
+typedef struct {
+ MODE_INFO mic;
+ uint8_t *zcoeff_blk;
+ int16_t *coeff[MAX_MB_PLANE][3];
+ int16_t *qcoeff[MAX_MB_PLANE][3];
+ int16_t *dqcoeff[MAX_MB_PLANE][3];
+ uint16_t *eobs[MAX_MB_PLANE][3];
+
+ // dual buffer pointers, 0: in use, 1: best in store
+ int16_t *coeff_pbuf[MAX_MB_PLANE][3];
+ int16_t *qcoeff_pbuf[MAX_MB_PLANE][3];
+ int16_t *dqcoeff_pbuf[MAX_MB_PLANE][3];
+ uint16_t *eobs_pbuf[MAX_MB_PLANE][3];
+
+ int is_coded;
+ int num_4x4_blk;
+ int skip;
+ int best_mode_index;
+ int hybrid_pred_diff;
+ int comp_pred_diff;
+ int single_pred_diff;
+ int64_t tx_rd_diff[TX_MODES];
+ int64_t best_filter_diff[SWITCHABLE_FILTER_CONTEXTS];
+
+ // motion vector cache for adaptive motion search control in partition
+ // search loop
+ int_mv pred_mv[MAX_REF_FRAMES];
+ INTERP_FILTER pred_interp_filter;
+} PICK_MODE_CONTEXT;
+
+typedef struct PC_TREE {
+ int index;
+ PARTITION_TYPE partitioning;
+ BLOCK_SIZE block_size;
+ PICK_MODE_CONTEXT none;
+ PICK_MODE_CONTEXT horizontal[2];
+ PICK_MODE_CONTEXT vertical[2];
+ union {
+ struct PC_TREE *split[4];
+ PICK_MODE_CONTEXT *leaf_split[4];
+ };
+} PC_TREE;
+
+void vp9_setup_pc_tree(struct VP9Common *cm, struct VP9_COMP *cpi);
+void vp9_free_pc_tree(struct VP9_COMP *cpi);
#endif /* VP9_ENCODER_VP9_CONTEXT_TREE_H_ */
mbmi = &xd->mi[0]->mbmi;
// Set up destination pointers.
- vp9_setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col);
+ vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
// Set up limit values for MV components.
// Mv beyond the range do not produce new/different prediction block.
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (none_rate < INT_MAX) {
- none_rate += x->partition_cost[pl][PARTITION_NONE];
+ none_rate += cpi->partition_cost[pl][PARTITION_NONE];
none_rd = RDCOST(x->rdmult, x->rddiv, none_rate, none_dist);
}
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (last_part_rate < INT_MAX) {
- last_part_rate += x->partition_cost[pl][partition];
+ last_part_rate += cpi->partition_cost[pl][partition];
last_part_rd = RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist);
}
pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
split_subsize);
- chosen_rate += x->partition_cost[pl][PARTITION_NONE];
+ chosen_rate += cpi->partition_cost[pl][PARTITION_NONE];
}
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (chosen_rate < INT_MAX) {
- chosen_rate += x->partition_cost[pl][PARTITION_SPLIT];
+ chosen_rate += cpi->partition_cost[pl][PARTITION_SPLIT];
chosen_rd = RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist);
}
}
if (this_rate != INT_MAX) {
if (bsize >= BLOCK_8X8) {
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
- this_rate += x->partition_cost[pl][PARTITION_NONE];
+ this_rate += cpi->partition_cost[pl][PARTITION_NONE];
}
sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist);
if (sum_rd < best_rd) {
if (sum_rd < best_rd && i == 4) {
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
- sum_rate += x->partition_cost[pl][PARTITION_SPLIT];
+ sum_rate += cpi->partition_cost[pl][PARTITION_SPLIT];
sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
if (sum_rd < best_rd) {
best_rate = sum_rate;
}
if (sum_rd < best_rd) {
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
- sum_rate += x->partition_cost[pl][PARTITION_HORZ];
+ sum_rate += cpi->partition_cost[pl][PARTITION_HORZ];
sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
if (sum_rd < best_rd) {
best_rd = sum_rd;
}
if (sum_rd < best_rd) {
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
- sum_rate += x->partition_cost[pl][PARTITION_VERT];
+ sum_rate += cpi->partition_cost[pl][PARTITION_VERT];
sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
if (sum_rd < best_rd) {
best_rate = sum_rate;
int64_t dummy_dist;
int i;
- MACROBLOCK *x = &cpi->mb;
if (sf->adaptive_pred_interp_filter) {
for (i = 0; i < 64; ++i)
- x->leaf_tree[i].pred_interp_filter = SWITCHABLE;
+ cpi->leaf_tree[i].pred_interp_filter = SWITCHABLE;
for (i = 0; i < 64; ++i) {
- x->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
- x->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
- x->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
- x->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
+ cpi->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
+ cpi->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
+ cpi->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
+ cpi->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
}
}
set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col,
sf->always_this_block_size);
rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
- &dummy_rate, &dummy_dist, 1, x->pc_root);
+ &dummy_rate, &dummy_dist, 1, cpi->pc_root);
} else if (sf->partition_search_type == VAR_BASED_FIXED_PARTITION) {
BLOCK_SIZE bsize;
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
bsize = get_rd_var_based_fixed_partition(cpi, mi_row, mi_col);
set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize);
rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
- &dummy_rate, &dummy_dist, 1, x->pc_root);
+ &dummy_rate, &dummy_dist, 1, cpi->pc_root);
} else if (sf->partition_search_type == VAR_BASED_PARTITION) {
choose_partitioning(cpi, tile, mi_row, mi_col);
rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
- &dummy_rate, &dummy_dist, 1, x->pc_root);
+ &dummy_rate, &dummy_dist, 1, cpi->pc_root);
} else {
if ((cm->current_video_frame
% sf->last_partitioning_redo_frequency) == 0
&sf->max_partition_size);
}
rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
- &dummy_rate, &dummy_dist, 1, INT64_MAX, x->pc_root);
+ &dummy_rate, &dummy_dist, 1, INT64_MAX,
+ cpi->pc_root);
} else {
if (sf->constrain_copy_partition &&
sb_has_motion(cm, prev_mi))
else
copy_partitioning(cm, mi, prev_mi);
rd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
- &dummy_rate, &dummy_dist, 1, x->pc_root);
+ &dummy_rate, &dummy_dist, 1, cpi->pc_root);
}
}
} else {
&sf->max_partition_size);
}
rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
- &dummy_rate, &dummy_dist, 1, INT64_MAX, x->pc_root);
+ &dummy_rate, &dummy_dist, 1, INT64_MAX, cpi->pc_root);
}
}
}
if (this_rate != INT_MAX) {
int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
- this_rate += x->partition_cost[pl][PARTITION_NONE];
+ this_rate += cpi->partition_cost[pl][PARTITION_NONE];
sum_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_dist);
if (sum_rd < best_rd) {
int64_t stop_thresh = 4096;
sum_rd = 0;
if (do_split) {
int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
- sum_rate += x->partition_cost[pl][PARTITION_SPLIT];
+ sum_rate += cpi->partition_cost[pl][PARTITION_SPLIT];
subsize = get_subsize(bsize, PARTITION_SPLIT);
for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
const int x_idx = (i & 1) * ms;
sum_rd = INT64_MAX;
} else {
int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
- this_rate += x->partition_cost[pl][PARTITION_HORZ];
+ this_rate += cpi->partition_cost[pl][PARTITION_HORZ];
sum_rate += this_rate;
sum_dist += this_dist;
sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
sum_rd = INT64_MAX;
} else {
int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
- this_rate += x->partition_cost[pl][PARTITION_VERT];
+ this_rate += cpi->partition_cost[pl][PARTITION_VERT];
sum_rate += this_rate;
sum_dist += this_dist;
sum_rd = RDCOST(x->rdmult, x->rddiv, sum_rate, sum_dist);
case VAR_BASED_PARTITION:
choose_partitioning(cpi, tile, mi_row, mi_col);
nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
- 1, &dummy_rate, &dummy_dist, x->pc_root);
+ 1, &dummy_rate, &dummy_dist, cpi->pc_root);
break;
case SOURCE_VAR_BASED_PARTITION:
set_source_var_based_partition(cpi, tile, mi, mi_row, mi_col);
nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
- 1, &dummy_rate, &dummy_dist, x->pc_root);
+ 1, &dummy_rate, &dummy_dist, cpi->pc_root);
break;
case VAR_BASED_FIXED_PARTITION:
case FIXED_PARTITION:
get_nonrd_var_based_fixed_partition(cpi, mi_row, mi_col);
set_fixed_partitioning(cpi, tile, mi, mi_row, mi_col, bsize);
nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col, BLOCK_64X64,
- 1, &dummy_rate, &dummy_dist, x->pc_root);
+ 1, &dummy_rate, &dummy_dist, cpi->pc_root);
break;
case REFERENCE_PARTITION:
if (cpi->sf.partition_check ||
&cpi->sf.max_partition_size);
nonrd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX,
- x->pc_root);
+ cpi->pc_root);
} else {
copy_partitioning(cm, mi, prev_mi);
nonrd_use_partition(cpi, tile, mi, tp, mi_row, mi_col,
BLOCK_64X64, 1, &dummy_rate, &dummy_dist,
- x->pc_root);
+ cpi->pc_root);
}
break;
default:
int i;
struct macroblock_plane *const p = x->plane;
struct macroblockd_plane *const pd = xd->plane;
- PICK_MODE_CONTEXT *ctx = &x->pc_root->none;
+ PICK_MODE_CONTEXT *ctx = &cpi->pc_root->none;
for (i = 0; i < MAX_MB_PLANE; ++i) {
p[i].coeff = ctx->coeff_pbuf[i][0];
vpx_free(cpi->tok);
cpi->tok = 0;
- vp9_free_pc_tree(&cpi->mb);
+ vp9_free_pc_tree(cpi);
for (i = 0; i < cpi->svc.number_spatial_layers; ++i) {
LAYER_CONTEXT *const lc = &cpi->svc.layer_context[i];
CHECK_MEM_ERROR(cm, cpi->tok, vpx_calloc(tokens, sizeof(*cpi->tok)));
}
- vp9_setup_pc_tree(&cpi->common, &cpi->mb);
+ vp9_setup_pc_tree(&cpi->common, cpi);
}
static void update_frame_size(VP9_COMP *cpi) {
dst->alpha_buffer};
const int dst_strides[4] = {dst->y_stride, dst->uv_stride, dst->uv_stride,
dst->alpha_stride};
+ const InterpKernel *const kernel = vp9_get_interp_kernel(EIGHTTAP);
int x, y, i;
for (y = 0; y < dst_h; y += 16) {
uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor);
vp9_convolve8(src_ptr, src_stride, dst_ptr, dst_stride,
- vp9_sub_pel_filters_8[x_q4 & 0xf], 16 * src_w / dst_w,
- vp9_sub_pel_filters_8[y_q4 & 0xf], 16 * src_h / dst_h,
+ kernel[x_q4 & 0xf], 16 * src_w / dst_w,
+ kernel[y_q4 & 0xf], 16 * src_h / dst_h,
16 / factor, 16 / factor);
}
}
#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
+#include "vp9/encoder/vp9_context_tree.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_firstpass.h"
#include "vp9/encoder/vp9_lookahead.h"
// Default value is 1. From first pass stats, encode_breakout may be disabled.
ENCODE_BREAKOUT_TYPE allow_encode_breakout;
- // Get threshold from external input. In real time mode, it can be
- // overwritten according to encoding speed.
+ // Get threshold from external input. A suggested threshold is 800 for HD
+ // clips, and 300 for < HD clips.
int encode_breakout;
unsigned char *segmentation_map;
uint64_t time_pick_lpf;
uint64_t time_encode_sb_row;
- struct twopass_rc twopass;
+ TWO_PASS twopass;
YV12_BUFFER_CONFIG alt_ref_buffer;
YV12_BUFFER_CONFIG *frames[MAX_LAG_BUFFERS];
int y_mode_costs[INTRA_MODES][INTRA_MODES][INTRA_MODES];
int switchable_interp_costs[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS];
+ PICK_MODE_CONTEXT *leaf_tree;
+ PC_TREE *pc_tree;
+ PC_TREE *pc_root;
+ int partition_cost[PARTITION_CONTEXTS][PARTITION_TYPES];
+
#if CONFIG_MULTIPLE_ARF
// ARF tracking variables.
int multi_arf_enabled;
#define GF_RMAX 96.0
#define ERR_DIVISOR 150.0
#define MIN_DECAY_FACTOR 0.1
+#define SVC_FACTOR_PT_LOW 0.45
+#define FACTOR_PT_LOW 0.5
+#define FACTOR_PT_HIGH 0.9
#define KF_MB_INTRA_MIN 150
#define GF_MB_INTRA_MIN 100
// Resets the first pass file to the given position using a relative seek from
// the current position.
-static void reset_fpf_position(struct twopass_rc *p,
+static void reset_fpf_position(TWO_PASS *p,
const FIRSTPASS_STATS *position) {
p->stats_in = position;
}
-static int lookup_next_frame_stats(const struct twopass_rc *p,
+static int lookup_next_frame_stats(const TWO_PASS *p,
FIRSTPASS_STATS *next_frame) {
if (p->stats_in >= p->stats_in_end)
return EOF;
// Read frame stats at an offset from the current position.
-static int read_frame_stats(const struct twopass_rc *p,
+static int read_frame_stats(const TWO_PASS *p,
FIRSTPASS_STATS *frame_stats, int offset) {
const FIRSTPASS_STATS *fps_ptr = p->stats_in;
return 1;
}
-static int input_stats(struct twopass_rc *p, FIRSTPASS_STATS *fps) {
+static int input_stats(TWO_PASS *p, FIRSTPASS_STATS *fps) {
if (p->stats_in >= p->stats_in_end)
return EOF;
// harder frames.
static double calculate_modified_err(const VP9_COMP *cpi,
const FIRSTPASS_STATS *this_frame) {
- const struct twopass_rc *twopass = &cpi->twopass;
+ const TWO_PASS *twopass = &cpi->twopass;
const SVC *const svc = &cpi->svc;
const FIRSTPASS_STATS *stats;
double av_err;
TileInfo tile;
struct macroblock_plane *const p = x->plane;
struct macroblockd_plane *const pd = xd->plane;
- const PICK_MODE_CONTEXT *ctx = &x->pc_root->none;
+ const PICK_MODE_CONTEXT *ctx = &cpi->pc_root->none;
int i;
int recon_yoffset, recon_uvoffset;
int new_mv_count = 0;
int sum_in_vectors = 0;
uint32_t lastmv_as_int = 0;
- struct twopass_rc *twopass = &cpi->twopass;
+ TWO_PASS *twopass = &cpi->twopass;
const MV zero_mv = {0, 0};
const YV12_BUFFER_CONFIG *first_ref_buf = lst_yv12;
vp9_setup_src_planes(x, cpi->Source, 0, 0);
vp9_setup_pre_planes(xd, 0, first_ref_buf, 0, 0, NULL);
- vp9_setup_dst_planes(xd, new_yv12, 0, 0);
+ vp9_setup_dst_planes(xd->plane, new_yv12, 0, 0);
xd->mi = cm->mi_grid_visible;
xd->mi[0] = cm->mi;
for (q = rc->best_quality; q < rc->worst_quality; ++q) {
const double factor =
calc_correction_factor(err_per_mb, ERR_DIVISOR,
- is_svc_upper_layer ? 0.8 : 0.5,
- is_svc_upper_layer ? 1.0 : 0.90, q);
+ is_svc_upper_layer ? SVC_FACTOR_PT_LOW :
+ FACTOR_PT_LOW, FACTOR_PT_HIGH, q);
const int bits_per_mb = vp9_rc_bits_per_mb(INTER_FRAME, q,
factor * speed_term);
if (bits_per_mb <= target_norm_bits_per_mb)
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
const int is_spatial_svc = (svc->number_spatial_layers > 1) &&
(svc->number_temporal_layers == 1);
- struct twopass_rc *const twopass = is_spatial_svc ?
+ TWO_PASS *const twopass = is_spatial_svc ?
&svc->layer_context[svc->spatial_layer_id].twopass : &cpi->twopass;
double frame_rate;
FIRSTPASS_STATS *stats;
// This variable monitors how far behind the second ref update is lagging.
twopass->sr_update_lag = 1;
- // Scan the first pass file and calculate an average Intra / Inter error
- // score ratio for the sequence.
- {
- const FIRSTPASS_STATS *const start_pos = twopass->stats_in;
- FIRSTPASS_STATS this_frame;
- double sum_iiratio = 0.0;
-
- while (input_stats(twopass, &this_frame) != EOF) {
- const double iiratio = this_frame.intra_error /
- DOUBLE_DIVIDE_CHECK(this_frame.coded_error);
- sum_iiratio += fclamp(iiratio, 1.0, 20.0);
- }
-
- twopass->avg_iiratio = sum_iiratio /
- DOUBLE_DIVIDE_CHECK((double)stats->count);
-
- reset_fpf_position(twopass, start_pos);
- }
-
// Scan the first pass file and calculate a modified total error based upon
// the bias/power function used to allocate bits.
{
// Function to test for a condition where a complex transition is followed
// by a static section. For example in slide shows where there is a fade
// between slides. This is to help with more optimal kf and gf positioning.
-static int detect_transition_to_still(struct twopass_rc *twopass,
+static int detect_transition_to_still(TWO_PASS *twopass,
int frame_interval, int still_interval,
double loop_decay_rate,
double last_decay_rate) {
// This function detects a flash through the high relative pcnt_second_ref
// score in the frame following a flash frame. The offset passed in should
// reflect this.
-static int detect_flash(const struct twopass_rc *twopass, int offset) {
+static int detect_flash(const TWO_PASS *twopass, int offset) {
FIRSTPASS_STATS next_frame;
int flash_detected = 0;
int f_frames, int b_frames,
int *f_boost, int *b_boost) {
FIRSTPASS_STATS this_frame;
- struct twopass_rc *const twopass = &cpi->twopass;
+ TWO_PASS *const twopass = &cpi->twopass;
int i;
double boost_score = 0.0;
double mv_ratio_accumulator = 0.0;
#endif
// Calculate a section intra ratio used in setting max loop filter.
-static void calculate_section_intra_ratio(struct twopass_rc *twopass,
+static void calculate_section_intra_ratio(TWO_PASS *twopass,
const FIRSTPASS_STATS *start_pos,
int section_length) {
FIRSTPASS_STATS next_frame;
static int64_t calculate_total_gf_group_bits(VP9_COMP *cpi,
double gf_group_err) {
const RATE_CONTROL *const rc = &cpi->rc;
- const struct twopass_rc *const twopass = &cpi->twopass;
+ const TWO_PASS *const twopass = &cpi->twopass;
const int max_bits = frame_max_bits(rc, &cpi->oxcf);
int64_t total_group_bits;
static void define_gf_group(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
RATE_CONTROL *const rc = &cpi->rc;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
- struct twopass_rc *const twopass = &cpi->twopass;
+ TWO_PASS *const twopass = &cpi->twopass;
FIRSTPASS_STATS next_frame;
const FIRSTPASS_STATS *start_pos;
int i;
// Allocate bits to a normal frame that is neither a gf an arf or a key frame.
static void assign_std_frame_bits(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
- struct twopass_rc *twopass = &cpi->twopass;
+ TWO_PASS *twopass = &cpi->twopass;
// For a single frame.
const int max_bits = frame_max_bits(&cpi->rc, &cpi->oxcf);
// Calculate modified prediction error used in bit allocation.
vp9_rc_set_frame_target(cpi, target_frame_size);
}
-static int test_candidate_kf(struct twopass_rc *twopass,
+static int test_candidate_kf(TWO_PASS *twopass,
const FIRSTPASS_STATS *last_frame,
const FIRSTPASS_STATS *this_frame,
const FIRSTPASS_STATS *next_frame) {
static void find_next_key_frame(VP9_COMP *cpi, FIRSTPASS_STATS *this_frame) {
int i, j;
RATE_CONTROL *const rc = &cpi->rc;
- struct twopass_rc *const twopass = &cpi->twopass;
+ TWO_PASS *const twopass = &cpi->twopass;
const FIRSTPASS_STATS first_frame = *this_frame;
const FIRSTPASS_STATS *start_position = twopass->stats_in;
FIRSTPASS_STATS next_frame;
void vp9_rc_get_second_pass_params(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
RATE_CONTROL *const rc = &cpi->rc;
- struct twopass_rc *const twopass = &cpi->twopass;
+ TWO_PASS *const twopass = &cpi->twopass;
int frames_left;
FIRSTPASS_STATS this_frame;
FIRSTPASS_STATS this_frame_copy;
- double this_frame_intra_error;
- double this_frame_coded_error;
int target;
LAYER_CONTEXT *lc = NULL;
- int is_spatial_svc = (cpi->use_svc && cpi->svc.number_temporal_layers == 1);
-
+ const int is_spatial_svc = (cpi->use_svc &&
+ cpi->svc.number_temporal_layers == 1);
if (is_spatial_svc) {
lc = &cpi->svc.layer_context[cpi->svc.spatial_layer_id];
frames_left = (int)(twopass->total_stats.count -
if (EOF == input_stats(twopass, &this_frame))
return;
- this_frame_intra_error = this_frame.intra_error;
- this_frame_coded_error = this_frame.coded_error;
-
// Keyframe and section processing.
if (rc->frames_to_key == 0 ||
(cpi->frame_flags & FRAMEFLAGS_KEY)) {
// Define next KF group and assign bits to it.
this_frame_copy = this_frame;
find_next_key_frame(cpi, &this_frame_copy);
- // Don't place key frame in any enhancement layers in spatial svc
- if (cpi->use_svc && cpi->svc.number_temporal_layers == 1) {
- lc->is_key_frame = 1;
- if (cpi->svc.spatial_layer_id > 0) {
- cm->frame_type = INTER_FRAME;
- }
- }
} else {
- if (cpi->use_svc && cpi->svc.number_temporal_layers == 1) {
- lc->is_key_frame = 0;
- }
cm->frame_type = INTER_FRAME;
}
+ if (is_spatial_svc) {
+ if (cpi->svc.spatial_layer_id == 0) {
+ lc->is_key_frame = (cm->frame_type == KEY_FRAME);
+ } else {
+ cm->frame_type = INTER_FRAME;
+ lc->is_key_frame = cpi->svc.layer_context[0].is_key_frame;
+
+ if (lc->is_key_frame) {
+ cpi->ref_frame_flags &= (~VP9_LAST_FLAG);
+ }
+ }
+ }
+
// Is this frame a GF / ARF? (Note: a key frame is always also a GF).
if (rc->frames_till_gf_update_due == 0) {
// Define next gf group and assign bits to it.
assign_std_frame_bits(cpi, &this_frame_copy);
}
- // Keep a globally available copy of this and the next frame's iiratio.
- twopass->this_iiratio = (int)(this_frame_intra_error /
- DOUBLE_DIVIDE_CHECK(this_frame_coded_error));
{
FIRSTPASS_STATS next_frame;
if (lookup_next_frame_stats(twopass, &next_frame) != EOF) {
int64_t spatial_layer_id;
} FIRSTPASS_STATS;
-struct twopass_rc {
+typedef struct {
unsigned int section_intra_rating;
unsigned int next_iiratio;
- unsigned int this_iiratio;
FIRSTPASS_STATS total_stats;
FIRSTPASS_STATS this_frame_stats;
const FIRSTPASS_STATS *stats_in;
FIRSTPASS_STATS total_left_stats;
int first_pass_done;
int64_t bits_left;
- int64_t clip_bits_total;
- double avg_iiratio;
double modified_error_min;
double modified_error_max;
double modified_error_total;
int64_t gf_group_bits;
// Bits for the golden frame or ARF - 2 pass only
int gf_bits;
- int alt_extra_bits;
int sr_update_lag;
int gf_zeromotion_pct;
int active_worst_quality;
-};
+} TWO_PASS;
struct VP9_COMP;
int mb_col, mb_row, offset = 0;
int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
- MV arf_top_mv = {0, 0}, gld_top_mv = {0, 0};
+ MV gld_top_mv = {0, 0};
MODE_INFO mi_local;
vp9_zero(mi_local);
mi_local.mbmi.ref_frame[1] = NONE;
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
- MV arf_left_mv = arf_top_mv, gld_left_mv = gld_top_mv;
+ MV gld_left_mv = gld_top_mv;
int mb_y_in_offset = mb_y_offset;
int arf_y_in_offset = arf_y_offset;
int gld_y_in_offset = gld_y_offset;
update_mbgraph_mb_stats(cpi, mb_stats, buf, mb_y_in_offset,
golden_ref, &gld_left_mv, alt_ref,
mb_row, mb_col);
- arf_left_mv = mb_stats->ref[ALTREF_FRAME].m.mv.as_mv;
gld_left_mv = mb_stats->ref[GOLDEN_FRAME].m.mv.as_mv;
if (mb_col == 0) {
- arf_top_mv = arf_left_mv;
gld_top_mv = gld_left_mv;
}
xd->left_available = 1;
static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE bsize,
MACROBLOCK *x, MACROBLOCKD *xd,
- int *out_rate_sum, int64_t *out_dist_sum) {
+ int *out_rate_sum, int64_t *out_dist_sum,
+ unsigned int *var_y, unsigned int *sse_y) {
// Note our transform coeffs are 8 times an orthogonal transform.
// Hence quantizer step is also 8 times. To get effective quantizer
// we need to divide by 8 before sending to modeling function.
unsigned int var = cpi->fn_ptr[bsize].vf(p->src.buf, p->src.stride,
pd->dst.buf, pd->dst.stride, &sse);
+ *var_y = var;
+ *sse_y = sse;
+
// TODO(jingning) This is a temporary solution to account for frames with
// light changes. Need to customize the rate-distortion modeling for non-RD
// mode decision.
int rate = INT_MAX;
int64_t dist = INT64_MAX;
+ // var_y and sse_y are saved to be used in skipping checking
+ unsigned int var_y = UINT_MAX;
+ unsigned int sse_y = UINT_MAX;
VP9_COMMON *cm = &cpi->common;
int intra_cost_penalty = 20 * vp9_dc_quant(cm->base_qindex, cm->y_dc_delta_q);
x->skip_encode = cpi->sf.skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
x->skip = 0;
- if (!x->in_active_map)
- x->skip = 1;
+
// initialize mode decisions
*returnrate = INT_MAX;
*returndistortion = INT64_MAX;
pred_filter_search &&
((mbmi->mv[0].as_mv.row & 0x07) != 0 ||
(mbmi->mv[0].as_mv.col & 0x07) != 0)) {
- int64_t tmp_rdcost1 = INT64_MAX;
- int64_t tmp_rdcost2 = INT64_MAX;
- int64_t tmp_rdcost3 = INT64_MAX;
int pf_rate[3];
int64_t pf_dist[3];
-
- mbmi->interp_filter = EIGHTTAP;
- vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
- model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rate[EIGHTTAP],
- &pf_dist[EIGHTTAP]);
- tmp_rdcost1 = RDCOST(x->rdmult, x->rddiv,
- vp9_get_switchable_rate(cpi) + pf_rate[EIGHTTAP],
- pf_dist[EIGHTTAP]);
-
- mbmi->interp_filter = EIGHTTAP_SHARP;
- vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
- model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rate[EIGHTTAP_SHARP],
- &pf_dist[EIGHTTAP_SHARP]);
- tmp_rdcost2 = RDCOST(x->rdmult, x->rddiv, vp9_get_switchable_rate(cpi) +
- pf_rate[EIGHTTAP_SHARP],
- pf_dist[EIGHTTAP_SHARP]);
-
- mbmi->interp_filter = EIGHTTAP_SMOOTH;
- vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
- model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rate[EIGHTTAP_SMOOTH],
- &pf_dist[EIGHTTAP_SMOOTH]);
- tmp_rdcost3 = RDCOST(x->rdmult, x->rddiv, vp9_get_switchable_rate(cpi) +
- pf_rate[EIGHTTAP_SMOOTH],
- pf_dist[EIGHTTAP_SMOOTH]);
-
- if (tmp_rdcost2 < tmp_rdcost1) {
- if (tmp_rdcost2 < tmp_rdcost3)
- mbmi->interp_filter = EIGHTTAP_SHARP;
- else
- mbmi->interp_filter = EIGHTTAP_SMOOTH;
- } else {
- if (tmp_rdcost1 < tmp_rdcost3)
- mbmi->interp_filter = EIGHTTAP;
- else
- mbmi->interp_filter = EIGHTTAP_SMOOTH;
+ unsigned int pf_var[3];
+ unsigned int pf_sse[3];
+ int64_t best_cost = INT64_MAX;
+ INTERP_FILTER best_filter = SWITCHABLE, filter;
+
+ for (filter = EIGHTTAP; filter <= EIGHTTAP_SHARP; ++filter) {
+ int64_t cost;
+ mbmi->interp_filter = filter;
+ vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
+ model_rd_for_sb_y(cpi, bsize, x, xd, &pf_rate[filter],
+ &pf_dist[filter], &pf_var[filter], &pf_sse[filter]);
+ cost = RDCOST(x->rdmult, x->rddiv,
+ vp9_get_switchable_rate(cpi) + pf_rate[filter],
+ pf_dist[filter]);
+ if (cost < best_cost) {
+ best_filter = filter;
+ best_cost = cost;
+ }
}
+ mbmi->interp_filter = best_filter;
rate = pf_rate[mbmi->interp_filter];
dist = pf_dist[mbmi->interp_filter];
+ var_y = pf_var[mbmi->interp_filter];
+ sse_y = pf_sse[mbmi->interp_filter];
} else {
mbmi->interp_filter = (filter_ref == SWITCHABLE) ? EIGHTTAP: filter_ref;
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, bsize);
- model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist);
+ model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist, &var_y, &sse_y);
}
rate += rate_mv;
[INTER_OFFSET(this_mode)];
this_rd = RDCOST(x->rdmult, x->rddiv, rate, dist);
- if (this_rd < best_rd) {
+ // Skipping checking: test to see if this block can be reconstructed by
+ // prediction only.
+ if (!x->in_active_map) {
+ x->skip = 1;
+ } else if (cpi->allow_encode_breakout && x->encode_breakout) {
+ const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
+ unsigned int var = var_y, sse = sse_y;
+ // Skipping threshold for ac.
+ unsigned int thresh_ac;
+ // Skipping threshold for dc.
+ unsigned int thresh_dc;
+ // Set a maximum for threshold to avoid big PSNR loss in low bit rate
+ // case. Use extreme low threshold for static frames to limit skipping.
+ const unsigned int max_thresh = 36000;
+ // The encode_breakout input
+ const unsigned int min_thresh =
+ MIN(((unsigned int)x->encode_breakout << 4), max_thresh);
+
+ // Calculate threshold according to dequant value.
+ thresh_ac = (xd->plane[0].dequant[1] * xd->plane[0].dequant[1]) / 9;
+ thresh_ac = clamp(thresh_ac, min_thresh, max_thresh);
+
+ // Adjust ac threshold according to partition size.
+ thresh_ac >>= 8 - (b_width_log2_lookup[bsize] +
+ b_height_log2_lookup[bsize]);
+
+ thresh_dc = (xd->plane[0].dequant[0] * xd->plane[0].dequant[0] >> 6);
+
+ // Y skipping condition checking for ac and dc.
+ if (var <= thresh_ac && (sse - var) <= thresh_dc) {
+ unsigned int sse_u, sse_v;
+ unsigned int var_u, var_v;
+
+ // Skip u v prediction for less calculation, that won't affect
+ // result much.
+ var_u = cpi->fn_ptr[uv_size].vf(x->plane[1].src.buf,
+ x->plane[1].src.stride,
+ xd->plane[1].dst.buf,
+ xd->plane[1].dst.stride, &sse_u);
+
+ // U skipping condition checking
+ if ((var_u * 4 <= thresh_ac) && (sse_u - var_u <= thresh_dc)) {
+ var_v = cpi->fn_ptr[uv_size].vf(x->plane[2].src.buf,
+ x->plane[2].src.stride,
+ xd->plane[2].dst.buf,
+ xd->plane[2].dst.stride, &sse_v);
+
+ // V skipping condition checking
+ if ((var_v * 4 <= thresh_ac) && (sse_v - var_v <= thresh_dc)) {
+ x->skip = 1;
+
+ // The cost of skip bit needs to be added.
+ rate = rate_mv;
+ rate += cpi->inter_mode_cost[mbmi->mode_context[ref_frame]]
+ [INTER_OFFSET(this_mode)];
+
+ // More on this part of rate
+ // rate += vp9_cost_bit(vp9_get_skip_prob(cm, xd), 1);
+
+ // Scaling factor for SSE from spatial domain to frequency
+ // domain is 16. Adjust distortion accordingly.
+ // TODO(yunqingwang): In this function, only y-plane dist is
+ // calculated.
+ dist = (sse << 4); // + ((sse_u + sse_v) << 4);
+ this_rd = RDCOST(x->rdmult, x->rddiv, rate, dist);
+ // *disable_skip = 1;
+ }
+ }
+ }
+ }
+
+ if (this_rd < best_rd || x->skip) {
best_rd = this_rd;
*returnrate = rate;
*returndistortion = dist;
best_pred_filter = mbmi->interp_filter;
best_ref_frame = ref_frame;
}
+
+ if (x->skip)
+ break;
}
}
// Perform intra prediction search, if the best SAD is above a certain
// threshold.
- if (best_rd > inter_mode_thresh && bsize < cpi->sf.max_intra_bsize) {
+ if (!x->skip && best_rd > inter_mode_thresh &&
+ bsize < cpi->sf.max_intra_bsize) {
for (this_mode = DC_PRED; this_mode <= DC_PRED; ++this_mode) {
vp9_predict_intra_block(xd, 0, b_width_log2(bsize),
mbmi->tx_size, this_mode,
&p->src.buf[0], p->src.stride,
&pd->dst.buf[0], pd->dst.stride, 0, 0, 0);
- model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist);
+ model_rd_for_sb_y(cpi, bsize, x, xd, &rate, &dist, &var_y, &sse_y);
rate += cpi->mbmode_cost[this_mode];
rate += intra_cost_penalty;
this_rd = RDCOST(x->rdmult, x->rddiv, rate, dist);
cpi->mb.sadperbit4 = sad_per_bit4lut[qindex];
}
+static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
+ int m, int n, int min_plane, int max_plane) {
+ int i;
+
+ for (i = min_plane; i < max_plane; ++i) {
+ struct macroblock_plane *const p = &x->plane[i];
+ struct macroblockd_plane *const pd = &x->e_mbd.plane[i];
+
+ p->coeff = ctx->coeff_pbuf[i][m];
+ p->qcoeff = ctx->qcoeff_pbuf[i][m];
+ pd->dqcoeff = ctx->dqcoeff_pbuf[i][m];
+ p->eobs = ctx->eobs_pbuf[i][m];
+
+ ctx->coeff_pbuf[i][m] = ctx->coeff_pbuf[i][n];
+ ctx->qcoeff_pbuf[i][m] = ctx->qcoeff_pbuf[i][n];
+ ctx->dqcoeff_pbuf[i][m] = ctx->dqcoeff_pbuf[i][n];
+ ctx->eobs_pbuf[i][m] = ctx->eobs_pbuf[i][n];
+
+ ctx->coeff_pbuf[i][n] = p->coeff;
+ ctx->qcoeff_pbuf[i][n] = p->qcoeff;
+ ctx->dqcoeff_pbuf[i][n] = pd->dqcoeff;
+ ctx->eobs_pbuf[i][n] = p->eobs;
+ }
+}
+
static void set_block_thresholds(const VP9_COMMON *cm, RD_OPT *rd) {
int i, bsize, segment_id;
fill_token_costs(x->token_costs, cm->fc.coef_probs);
for (i = 0; i < PARTITION_CONTEXTS; i++)
- vp9_cost_tokens(x->partition_cost[i], get_partition_probs(cm, i),
+ vp9_cost_tokens(cpi->partition_cost[i], get_partition_probs(cm, i),
vp9_partition_tree);
}
*rate_tokenonly = this_rate_tokenonly;
*distortion = this_distortion;
*skippable = s;
- if (!x->select_txfm_size) {
- int i;
- struct macroblock_plane *const p = x->plane;
- struct macroblockd_plane *const pd = xd->plane;
- for (i = 1; i < MAX_MB_PLANE; ++i) {
- p[i].coeff = ctx->coeff_pbuf[i][2];
- p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
- pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
- p[i].eobs = ctx->eobs_pbuf[i][2];
-
- ctx->coeff_pbuf[i][2] = ctx->coeff_pbuf[i][0];
- ctx->qcoeff_pbuf[i][2] = ctx->qcoeff_pbuf[i][0];
- ctx->dqcoeff_pbuf[i][2] = ctx->dqcoeff_pbuf[i][0];
- ctx->eobs_pbuf[i][2] = ctx->eobs_pbuf[i][0];
-
- ctx->coeff_pbuf[i][0] = p[i].coeff;
- ctx->qcoeff_pbuf[i][0] = p[i].qcoeff;
- ctx->dqcoeff_pbuf[i][0] = pd[i].dqcoeff;
- ctx->eobs_pbuf[i][0] = p[i].eobs;
- }
- }
+ if (!x->select_txfm_size)
+ swap_block_ptr(x, ctx, 2, 0, 1, MAX_MB_PLANE);
}
}
ctx->skip = x->skip;
ctx->best_mode_index = mode_index;
ctx->mic = *xd->mi[0];
-
- ctx->best_ref_mv[0].as_int = ref_mv->as_int;
- ctx->best_ref_mv[1].as_int = second_ref_mv->as_int;
-
ctx->single_pred_diff = (int)comp_pred_diff[SINGLE_REFERENCE];
ctx->comp_pred_diff = (int)comp_pred_diff[COMPOUND_REFERENCE];
ctx->hybrid_pred_diff = (int)comp_pred_diff[REFERENCE_MODE_SELECT];
return this_rd; // if 0, this will be re-calculated by caller
}
-static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
- int max_plane) {
- struct macroblock_plane *const p = x->plane;
- struct macroblockd_plane *const pd = x->e_mbd.plane;
- int i;
-
- for (i = 0; i < max_plane; ++i) {
- p[i].coeff = ctx->coeff_pbuf[i][1];
- p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
- pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
- p[i].eobs = ctx->eobs_pbuf[i][1];
-
- ctx->coeff_pbuf[i][1] = ctx->coeff_pbuf[i][0];
- ctx->qcoeff_pbuf[i][1] = ctx->qcoeff_pbuf[i][0];
- ctx->dqcoeff_pbuf[i][1] = ctx->dqcoeff_pbuf[i][0];
- ctx->eobs_pbuf[i][1] = ctx->eobs_pbuf[i][0];
-
- ctx->coeff_pbuf[i][0] = p[i].coeff;
- ctx->qcoeff_pbuf[i][0] = p[i].qcoeff;
- ctx->dqcoeff_pbuf[i][0] = pd[i].dqcoeff;
- ctx->eobs_pbuf[i][0] = p[i].eobs;
- }
-}
-
void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x,
int *returnrate, int64_t *returndist,
BLOCK_SIZE bsize,
best_mbmode = *mbmi;
best_skip2 = this_skip2;
if (!x->select_txfm_size)
- swap_block_ptr(x, ctx, max_plane);
+ swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
vpx_memcpy(ctx->zcoeff_blk, x->zcoeff_blk[mbmi->tx_size],
sizeof(uint8_t) * ctx->num_4x4_blk);
best_mbmode = *mbmi;
best_skip2 = this_skip2;
if (!x->select_txfm_size)
- swap_block_ptr(x, ctx, max_plane);
+ swap_block_ptr(x, ctx, 1, 0, 0, max_plane);
vpx_memcpy(ctx->zcoeff_blk, x->zcoeff_blk[TX_4X4],
sizeof(uint8_t) * ctx->num_4x4_blk);
int speed) {
sf->static_segmentation = 0;
sf->adaptive_rd_thresh = 1;
- sf->encode_breakout_thresh = 1;
sf->use_fast_coef_costing = 1;
if (speed == 1) {
sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_H_V;
sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V;
sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V;
- sf->encode_breakout_thresh = 8;
}
if (speed >= 2) {
sf->intra_y_mode_mask[TX_16X16] = INTRA_DC_H_V;
sf->intra_uv_mode_mask[TX_32X32] = INTRA_DC_H_V;
sf->intra_uv_mode_mask[TX_16X16] = INTRA_DC_H_V;
- sf->encode_breakout_thresh = 200;
}
if (speed >= 3) {
sf->optimize_coefficients = 0;
sf->disable_split_mask = DISABLE_ALL_SPLIT;
sf->lpf_pick = LPF_PICK_FROM_Q;
- sf->encode_breakout_thresh = 700;
}
if (speed >= 4) {
}
sf->intra_y_mode_mask[TX_32X32] = INTRA_DC_ONLY;
sf->frame_parameter_update = 0;
- sf->encode_breakout_thresh = 1000;
sf->search_method = FAST_HEX;
sf->disable_inter_mode_mask[BLOCK_32X32] = 1 << INTER_OFFSET(ZEROMV);
sf->disable_inter_mode_mask[BLOCK_32X64] = ~(1 << INTER_OFFSET(NEARESTMV));
sf->use_fast_coef_costing = 0;
sf->mode_skip_start = MAX_MODES; // Mode index at which mode skip mask set
sf->use_nonrd_pick_mode = 0;
- sf->encode_breakout_thresh = 0;
for (i = 0; i < BLOCK_SIZES; ++i)
sf->disable_inter_mode_mask[i] = 0;
sf->max_intra_bsize = BLOCK_64X64;
cpi->mb.optimize = sf->optimize_coefficients == 1 && cpi->pass != 1;
- if (cpi->encode_breakout && oxcf->mode == REALTIME &&
- sf->encode_breakout_thresh > cpi->encode_breakout)
- cpi->encode_breakout = sf->encode_breakout_thresh;
-
if (sf->disable_split_mask == DISABLE_ALL_SPLIT)
sf->adaptive_pred_interp_filter = 0;
// This flag controls the use of non-RD mode decision.
int use_nonrd_pick_mode;
- // This variable sets the encode_breakout threshold. Currently, it is only
- // enabled in real time mode.
- int encode_breakout_thresh;
-
// A binary mask indicating if NEARESTMV, NEARMV, ZEROMV, NEWMV
// modes are disabled in order from LSB to MSB for each BLOCK_SIZE.
int disable_inter_mode_mask[BLOCK_SIZES];
int i;
for (i = 0; i < svc->number_spatial_layers; ++i) {
- struct twopass_rc *const twopass = &svc->layer_context[i].twopass;
+ TWO_PASS *const twopass = &svc->layer_context[i].twopass;
svc->spatial_layer_id = i;
vp9_init_second_pass(cpi);
int64_t maximum_buffer_size;
double framerate;
int avg_frame_size;
- struct twopass_rc twopass;
+ TWO_PASS twopass;
struct vpx_fixed_buf rc_twopass_stats_in;
unsigned int current_video_frame_in_layer;
int is_key_frame;
MACROBLOCK *x;
int *skippable;
};
-
static void is_skippable(int plane, int block,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
void *argv) {
args->skippable[0] &= (!args->x->plane[plane].eobs[block]);
}
+// TODO(yaowu): rewrite and optimize this function to remove the usage of
+// vp9_foreach_transform_block() and simplify is_skippable().
int vp9_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
int result = 1;
struct is_skippable_args args = {x, &result};
ERROR("Option --tune=ssim is not currently supported in VP9.");
if (cfg->g_pass == VPX_RC_LAST_PASS) {
- size_t packet_sz = sizeof(FIRSTPASS_STATS);
- int n_packets = (int)(cfg->rc_twopass_stats_in.sz / packet_sz);
+ const size_t packet_sz = sizeof(FIRSTPASS_STATS);
+ const int n_packets = (int)(cfg->rc_twopass_stats_in.sz / packet_sz);
const FIRSTPASS_STATS *stats;
if (cfg->rc_twopass_stats_in.buf == NULL)
static vpx_codec_err_t ctrl_get_param(vpx_codec_alg_priv_t *ctx, int ctrl_id,
va_list args) {
- void *arg = va_arg(args, void *);
+ void *const arg = va_arg(args, void *);
#define MAP(id, var) case id: *(RECAST(id, arg)) = var; break
static vpx_codec_err_t encoder_init(vpx_codec_ctx_t *ctx,
vpx_codec_priv_enc_mr_cfg_t *data) {
vpx_codec_err_t res = VPX_CODEC_OK;
+ (void)data;
if (ctx->priv == NULL) {
int i;
return res;
}
-static const vpx_codec_cx_pkt_t *encoder_get_cxdata(vpx_codec_alg_priv_t *ctx,
+static const vpx_codec_cx_pkt_t *encoder_get_cxdata(vpx_codec_alg_priv_t *ctx,
vpx_codec_iter_t *iter) {
return vpx_codec_pkt_list_get(&ctx->pkt_list.head, iter);
}
static vpx_codec_err_t ctrl_set_reference(vpx_codec_alg_priv_t *ctx,
- int ctr_id, va_list args) {
+ int ctrl_id, va_list args) {
vpx_ref_frame_t *const frame = va_arg(args, vpx_ref_frame_t *);
+ (void)ctrl_id;
if (frame != NULL) {
YV12_BUFFER_CONFIG sd;
}
static vpx_codec_err_t ctrl_copy_reference(vpx_codec_alg_priv_t *ctx,
- int ctr_id, va_list args) {
+ int ctrl_id, va_list args) {
vpx_ref_frame_t *const frame = va_arg(args, vpx_ref_frame_t *);
+ (void)ctrl_id;
if (frame != NULL) {
YV12_BUFFER_CONFIG sd;
}
static vpx_codec_err_t ctrl_get_reference(vpx_codec_alg_priv_t *ctx,
- int ctr_id, va_list args) {
- vp9_ref_frame_t *frame = va_arg(args, vp9_ref_frame_t *);
+ int ctrl_id, va_list args) {
+ vp9_ref_frame_t *const frame = va_arg(args, vp9_ref_frame_t *);
+ (void)ctrl_id;
if (frame != NULL) {
- YV12_BUFFER_CONFIG* fb;
+ YV12_BUFFER_CONFIG *fb;
vp9_get_reference_enc(ctx->cpi, frame->idx, &fb);
yuvconfig2image(&frame->img, fb, NULL);
static vpx_image_t *encoder_get_preview(vpx_codec_alg_priv_t *ctx) {
YV12_BUFFER_CONFIG sd;
- vp9_ppflags_t flags = {0};
+ vp9_ppflags_t flags;
+ vp9_zero(flags);
if (ctx->preview_ppcfg.post_proc_flag) {
flags.post_proc_flag = ctx->preview_ppcfg.post_proc_flag;
}
static vpx_codec_err_t ctrl_update_entropy(vpx_codec_alg_priv_t *ctx,
- int ctr_id, va_list args) {
+ int ctrl_id, va_list args) {
const int update = va_arg(args, int);
+ (void)ctrl_id;
+
vp9_update_entropy(ctx->cpi, update);
return VPX_CODEC_OK;
}
static vpx_codec_err_t ctrl_update_reference(vpx_codec_alg_priv_t *ctx,
- int ctr_id, va_list args) {
+ int ctrl_id, va_list args) {
const int ref_frame_flags = va_arg(args, int);
+ (void)ctrl_id;
+
vp9_update_reference(ctx->cpi, ref_frame_flags);
return VPX_CODEC_OK;
}
static vpx_codec_err_t ctrl_use_reference(vpx_codec_alg_priv_t *ctx,
- int ctr_id, va_list args) {
+ int ctrl_id, va_list args) {
const int reference_flag = va_arg(args, int);
+ (void)ctrl_id;
+
vp9_use_as_reference(ctx->cpi, reference_flag);
return VPX_CODEC_OK;
}
static vpx_codec_err_t ctrl_set_roi_map(vpx_codec_alg_priv_t *ctx,
- int ctr_id, va_list args) {
+ int ctrl_id, va_list args) {
+ (void)ctx;
+ (void)ctrl_id;
+ (void)args;
+
// TODO(yaowu): Need to re-implement and test for VP9.
return VPX_CODEC_INVALID_PARAM;
}
static vpx_codec_err_t ctrl_set_active_map(vpx_codec_alg_priv_t *ctx,
- int ctr_id, va_list args) {
+ int ctrl_id, va_list args) {
vpx_active_map_t *const map = va_arg(args, vpx_active_map_t *);
+ (void)ctrl_id;
if (map) {
if (!vp9_set_active_map(ctx->cpi, map->active_map,
}
static vpx_codec_err_t ctrl_set_scale_mode(vpx_codec_alg_priv_t *ctx,
- int ctr_id, va_list args) {
+ int ctrl_id, va_list args) {
vpx_scaling_mode_t *const mode = va_arg(args, vpx_scaling_mode_t *);
+ (void)ctrl_id;
if (mode) {
const int res = vp9_set_internal_size(ctx->cpi,
}
}
-static vpx_codec_err_t ctrl_set_svc(vpx_codec_alg_priv_t *ctx, int ctr_id,
+static vpx_codec_err_t ctrl_set_svc(vpx_codec_alg_priv_t *ctx, int ctrl_id,
va_list args) {
int data = va_arg(args, int);
const vpx_codec_enc_cfg_t *cfg = &ctx->cfg;
+ (void)ctrl_id;
+
vp9_set_svc(ctx->cpi, data);
// CBR or two pass mode for SVC with both temporal and spatial layers
// not yet supported.
}
static vpx_codec_err_t ctrl_set_svc_layer_id(vpx_codec_alg_priv_t *ctx,
- int ctr_id,
- va_list args) {
+ int ctrl_id, va_list args) {
vpx_svc_layer_id_t *const data = va_arg(args, vpx_svc_layer_id_t *);
VP9_COMP *const cpi = (VP9_COMP *)ctx->cpi;
SVC *const svc = &cpi->svc;
+ (void)ctrl_id;
+
svc->spatial_layer_id = data->spatial_layer_id;
svc->temporal_layer_id = data->temporal_layer_id;
// Checks on valid layer_id input.
}
static vpx_codec_err_t ctrl_set_svc_parameters(vpx_codec_alg_priv_t *ctx,
- int ctr_id, va_list args) {
+ int ctrl_id, va_list args) {
VP9_COMP *const cpi = ctx->cpi;
vpx_svc_parameters_t *const params = va_arg(args, vpx_svc_parameters_t *);
+ (void)ctrl_id;
if (params == NULL)
return VPX_CODEC_INVALID_PARAM;
NOT_IMPLEMENTED, // vpx_codec_get_si_fn_t
NOT_IMPLEMENTED, // vpx_codec_decode_fn_t
NOT_IMPLEMENTED, // vpx_codec_frame_get_fn_t
+ NOT_IMPLEMENTED // vpx_codec_set_fb_fn_t
},
{ // NOLINT
encoder_usage_cfg_map, // vpx_codec_enc_cfg_map_t
encoder_set_config, // vpx_codec_enc_config_set_fn_t
NOT_IMPLEMENTED, // vpx_codec_get_global_headers_fn_t
encoder_get_preview, // vpx_codec_get_preview_frame_fn_t
- NOT_IMPLEMENTED , // vpx_codec_enc_mr_get_mem_loc_fn_t
+ NOT_IMPLEMENTED // vpx_codec_enc_mr_get_mem_loc_fn_t
}
};
vpx_codec_priv_t base;
vpx_codec_dec_cfg_t cfg;
vp9_stream_info_t si;
- int decoder_init;
struct VP9Decoder *pbi;
int postproc_cfg_set;
vp8_postproc_cfg_t postproc_cfg;
-#if CONFIG_POSTPROC_VISUALIZER
- unsigned int dbg_postproc_flag;
- int dbg_color_ref_frame_flag;
- int dbg_color_mb_modes_flag;
- int dbg_color_b_modes_flag;
- int dbg_display_mv_flag;
-#endif
vpx_decrypt_cb decrypt_cb;
void *decrypt_state;
vpx_image_t img;
static void set_ppflags(const vpx_codec_alg_priv_t *ctx,
vp9_ppflags_t *flags) {
flags->post_proc_flag =
-#if CONFIG_POSTPROC_VISUALIZER
- (ctx->dbg_color_ref_frame_flag ? VP9D_DEBUG_CLR_FRM_REF_BLKS : 0) |
- (ctx->dbg_color_mb_modes_flag ? VP9D_DEBUG_CLR_BLK_MODES : 0) |
- (ctx->dbg_color_b_modes_flag ? VP9D_DEBUG_CLR_BLK_MODES : 0) |
- (ctx->dbg_display_mv_flag ? VP9D_DEBUG_DRAW_MV : 0) |
-#endif
ctx->postproc_cfg.post_proc_flag;
flags->deblocking_level = ctx->postproc_cfg.deblocking_level;
flags->noise_level = ctx->postproc_cfg.noise_level;
-#if CONFIG_POSTPROC_VISUALIZER
- flags->display_ref_frame_flag = ctx->dbg_color_ref_frame_flag;
- flags->display_mb_modes_flag = ctx->dbg_color_mb_modes_flag;
- flags->display_b_modes_flag = ctx->dbg_color_b_modes_flag;
- flags->display_mv_flag = ctx->dbg_display_mv_flag;
-#endif
}
static void init_decoder(vpx_codec_alg_priv_t *ctx) {
ctx->pbi->max_threads = ctx->cfg.threads;
ctx->pbi->inv_tile_order = ctx->invert_tile_order;
- vp9_initialize_dec();
-
// If postprocessing was enabled by the application and a
// configuration has not been provided, default it.
if (!ctx->postproc_cfg_set &&
}
// Initialize the decoder instance on the first frame
- if (!ctx->decoder_init) {
+ if (ctx->pbi == NULL) {
init_decoder(ctx);
if (ctx->pbi == NULL)
return VPX_CODEC_ERROR;
-
- ctx->decoder_init = 1;
}
// Set these even if already initialized. The caller may have changed the
static vpx_codec_err_t ctrl_set_dbg_options(vpx_codec_alg_priv_t *ctx,
int ctrl_id, va_list args) {
-#if CONFIG_POSTPROC_VISUALIZER && CONFIG_POSTPROC
- int data = va_arg(args, int);
-
-#define MAP(id, var) case id: var = data; break;
-
- switch (ctrl_id) {
- MAP(VP8_SET_DBG_COLOR_REF_FRAME, ctx->dbg_color_ref_frame_flag);
- MAP(VP8_SET_DBG_COLOR_MB_MODES, ctx->dbg_color_mb_modes_flag);
- MAP(VP8_SET_DBG_COLOR_B_MODES, ctx->dbg_color_b_modes_flag);
- MAP(VP8_SET_DBG_DISPLAY_MV, ctx->dbg_display_mv_flag);
- }
-
- return VPX_CODEC_OK;
-#else
return VPX_CODEC_INCAPABLE;
-#endif
}
static vpx_codec_err_t ctrl_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
* \ref MUST be non-zero.
*/
typedef const struct vpx_codec_ctrl_fn_map {
- int ctrl_id;
- vpx_codec_control_fn_t fn;
+ int ctrl_id;
+ vpx_codec_control_fn_t fn;
} vpx_codec_ctrl_fn_map_t;
/*!\brief decode data function pointer prototype
vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx,
vpx_codec_iface_t *iface,
vpx_codec_enc_cfg_t *enc_cfg) {
- int max_intra_size_pct;
vpx_codec_err_t res;
SvcInternal *const si = get_svc_internal(svc_ctx);
if (svc_ctx == NULL || codec_ctx == NULL || iface == NULL ||
// modify encoder configuration
enc_cfg->ss_number_layers = si->layers;
enc_cfg->ts_number_layers = 1; // Temporal layers not used in this encoder.
- enc_cfg->kf_mode = VPX_KF_DISABLED;
// Lag in frames not currently supported
enc_cfg->g_lag_in_frames = 0;
}
vpx_codec_control(codec_ctx, VP9E_SET_SVC, 1);
- vpx_codec_control(codec_ctx, VP8E_SET_CPUUSED, 1);
- vpx_codec_control(codec_ctx, VP8E_SET_STATIC_THRESHOLD, 1);
- vpx_codec_control(codec_ctx, VP8E_SET_NOISE_SENSITIVITY, 1);
vpx_codec_control(codec_ctx, VP8E_SET_TOKEN_PARTITIONS, 1);
- max_intra_size_pct =
- (int)(((double)enc_cfg->rc_buf_optimal_sz * 0.5) *
- ((double)enc_cfg->g_timebase.den / enc_cfg->g_timebase.num) / 10.0);
- vpx_codec_control(codec_ctx, VP8E_SET_MAX_INTRA_BITRATE_PCT,
- max_intra_size_pct);
return VPX_CODEC_OK;
}
si->rc_stats_buf_used = 0;
si->layers = svc_ctx->spatial_layers;
- if (si->frame_within_gop >= si->kf_dist ||
- si->encode_frame_count == 0) {
+ if (si->encode_frame_count == 0) {
si->frame_within_gop = 0;
}
si->is_keyframe = (si->frame_within_gop == 0);
API_SRCS-$(CONFIG_VP8_ENCODER) += vp8cx.h
API_DOC_SRCS-$(CONFIG_VP8_ENCODER) += vp8.h
API_DOC_SRCS-$(CONFIG_VP8_ENCODER) += vp8cx.h
-API_SRCS-$(CONFIG_VP9_ENCODER) += src/svc_encodeframe.c
-API_SRCS-$(CONFIG_VP9_ENCODER) += svc_context.h
+ifeq ($(CONFIG_VP9_ENCODER),yes)
+ API_SRCS-$(CONFIG_SPATIAL_SVC) += src/svc_encodeframe.c
+ API_SRCS-$(CONFIG_SPATIAL_SVC) += svc_context.h
+endif
API_SRCS-$(CONFIG_VP8_DECODER) += vp8.h
API_SRCS-$(CONFIG_VP8_DECODER) += vp8dx.h
input->height = input->y4m.pic_h;
input->framerate.numerator = input->y4m.fps_n;
input->framerate.denominator = input->y4m.fps_d;
- input->use_i420 = 0;
+ input->fmt = input->y4m.vpx_fmt;
} else
fatal("Unsupported Y4M stream.");
} else if (input->detect.buf_read == 4 && fourcc_is_ivf(input->detect.buf)) {
}
}
+static const char* file_type_to_string(enum VideoFileType t) {
+ switch (t) {
+ case FILE_TYPE_RAW: return "RAW";
+ case FILE_TYPE_Y4M: return "Y4M";
+ default: return "Other";
+ }
+}
+
+static const char* image_format_to_string(vpx_img_fmt_t f) {
+ switch (f) {
+ case VPX_IMG_FMT_I420: return "I420";
+ case VPX_IMG_FMT_I422: return "I422";
+ case VPX_IMG_FMT_I444: return "I444";
+ case VPX_IMG_FMT_YV12: return "YV12";
+ default: return "Other";
+ }
+}
static void show_stream_config(struct stream_state *stream,
struct VpxEncoderConfig *global,
if (stream->index == 0) {
fprintf(stderr, "Codec: %s\n",
vpx_codec_iface_name(global->codec->interface()));
- fprintf(stderr, "Source file: %s Format: %s\n", input->filename,
- input->use_i420 ? "I420" : "YV12");
+ fprintf(stderr, "Source file: %s File Type: %s Format: %s\n",
+ input->filename,
+ file_type_to_string(input->file_type),
+ image_format_to_string(input->fmt));
}
if (stream->next || stream->index)
fprintf(stderr, "\nStream Index: %d\n", stream->index);
/* Scale if necessary */
if (img && (img->d_w != cfg->g_w || img->d_h != cfg->g_h)) {
+ if (img->fmt != VPX_IMG_FMT_I420 && img->fmt != VPX_IMG_FMT_YV12) {
+ fprintf(stderr, "%s can only scale 4:2:0 8bpp inputs\n", exec_name);
+ exit(EXIT_FAILURE);
+ }
if (!stream->img)
stream->img = vpx_img_alloc(NULL, VPX_IMG_FMT_I420,
cfg->g_w, cfg->g_h, 16);
/* Setup default input stream settings */
input.framerate.numerator = 30;
input.framerate.denominator = 1;
- input.use_i420 = 1;
input.only_i420 = 1;
/* First parse the global configuration values, because we want to apply
argv = argv_dup(argc - 1, argv_ + 1);
parse_global_config(&global, argv);
+ input.fmt = global.use_i420 ? VPX_IMG_FMT_I420 : VPX_IMG_FMT_YV12;
{
/* Now parse each stream's parameters. Using a local scope here
frames.*/
memset(&raw, 0, sizeof(raw));
else
- vpx_img_alloc(&raw,
- input.use_i420 ? VPX_IMG_FMT_I420
- : VPX_IMG_FMT_YV12,
- input.width, input.height, 32);
+ vpx_img_alloc(&raw, input.fmt, input.width, input.height, 32);
FOREACH_STREAM(stream->rate_hist =
init_rate_histogram(&stream->config.cfg,