soft_enable() {
for var in $*; do
if ! disabled $var; then
- log_echo " enabling $var"
+ enabled $var || log_echo " enabling $var"
enable_feature $var
fi
done
soft_disable() {
for var in $*; do
if ! enabled $var; then
- log_echo " disabling $var"
+ disabled $var || log_echo " disabling $var"
disable_feature $var
fi
done
elif [ $action = "disable" ] && ! disabled $option ; then
echo "${CMDLINE_SELECT}" | grep "^ *$option\$" >/dev/null ||
die_unknown $opt
+ log_echo " disabling $option"
elif [ $action = "enable" ] && ! enabled $option ; then
echo "${CMDLINE_SELECT}" | grep "^ *$option\$" >/dev/null ||
die_unknown $opt
+ log_echo " enabling $option"
fi
${action}_feature $option
;;
EXE_SFX=
}
+# Reliably find the newest available Darwin SDKs. (Older versions of
+# xcrun don't support --show-sdk-path.)
+show_darwin_sdk_path() {
+ xcrun --sdk $1 --show-sdk-path 2>/dev/null ||
+ xcodebuild -sdk $1 -version Path 2>/dev/null
+}
+
process_common_toolchain() {
if [ -z "$toolchain" ]; then
gcctarget="${CHOST:-$(gcc -dumpmachine 2> /dev/null)}"
tgt_isa=x86_64
tgt_os=darwin13
;;
+ *darwin14*)
+ tgt_isa=x86_64
+ tgt_os=darwin14
+ ;;
x86_64*mingw32*)
tgt_os=win64
;;
IOS_VERSION_MIN="6.0"
# Handle darwin variants. Newer SDKs allow targeting older
- # platforms, so find the newest SDK available.
+ # platforms, so use the newest one available.
case ${toolchain} in
*-darwin*)
- if [ -z "${DEVELOPER_DIR}" ]; then
- DEVELOPER_DIR=`xcode-select -print-path 2> /dev/null`
- [ $? -ne 0 ] && OSX_SKIP_DIR_CHECK=1
- fi
- if [ -z "${OSX_SKIP_DIR_CHECK}" ]; then
- OSX_SDK_ROOTS="${DEVELOPER_DIR}/SDKs"
- OSX_SDK_VERSIONS="MacOSX10.4u.sdk MacOSX10.5.sdk MacOSX10.6.sdk"
- OSX_SDK_VERSIONS="${OSX_SDK_VERSIONS} MacOSX10.7.sdk"
- for v in ${OSX_SDK_VERSIONS}; do
- if [ -d "${OSX_SDK_ROOTS}/${v}" ]; then
- osx_sdk_dir="${OSX_SDK_ROOTS}/${v}"
- fi
- done
+ osx_sdk_dir="$(show_darwin_sdk_path macosx)"
+ if [ -d "${osx_sdk_dir}" ]; then
+ add_cflags "-isysroot ${osx_sdk_dir}"
+ add_ldflags "-isysroot ${osx_sdk_dir}"
fi
;;
esac
- if [ -d "${osx_sdk_dir}" ]; then
- add_cflags "-isysroot ${osx_sdk_dir}"
- add_ldflags "-isysroot ${osx_sdk_dir}"
- fi
-
case ${toolchain} in
*-darwin8-*)
add_cflags "-mmacosx-version-min=10.4"
add_cflags "-mmacosx-version-min=10.9"
add_ldflags "-mmacosx-version-min=10.9"
;;
+ *-darwin14-*)
+ add_cflags "-mmacosx-version-min=10.10"
+ add_ldflags "-mmacosx-version-min=10.10"
+ ;;
*-iphonesimulator-*)
add_cflags "-miphoneos-version-min=${IOS_VERSION_MIN}"
add_ldflags "-miphoneos-version-min=${IOS_VERSION_MIN}"
- osx_sdk_dir="$(xcrun --sdk iphonesimulator --show-sdk-path)"
- add_cflags "-isysroot ${osx_sdk_dir}"
- add_ldflags "-isysroot ${osx_sdk_dir}"
+ iossim_sdk_dir="$(show_darwin_sdk_path iphonesimulator)"
+ if [ -d "${iossim_sdk_dir}" ]; then
+ add_cflags "-isysroot ${iossim_sdk_dir}"
+ add_ldflags "-isysroot ${iossim_sdk_dir}"
+ fi
;;
esac
;;
darwin*)
- XCRUN_FIND="xcrun --sdk iphoneos -find"
+ XCRUN_FIND="xcrun --sdk iphoneos --find"
CXX="$(${XCRUN_FIND} clang++)"
CC="$(${XCRUN_FIND} clang)"
AR="$(${XCRUN_FIND} ar)"
# options that were put in above
ASFLAGS="-arch ${tgt_isa} -g"
- alt_libc="$(xcrun --sdk iphoneos --show-sdk-path)"
- add_cflags -arch ${tgt_isa} -isysroot ${alt_libc}
+ add_cflags -arch ${tgt_isa}
add_ldflags -arch ${tgt_isa}
+ alt_libc="$(show_darwin_sdk_path iphoneos)"
+ if [ -d "${alt_libc}" ]; then
+ add_cflags -isysroot ${alt_libc}
+ fi
+
if [ "${LD}" = "${CXX}" ]; then
add_ldflags -miphoneos-version-min="${IOS_VERSION_MIN}"
else
fi
tgt_os_no_version=$(echo "${tgt_os}" | tr -d "[0-9]")
+ if [ "${tgt_os_no_version}" = "openbsd" ] || [ "`uname`" = "OpenBSD" ]; then
+ openbsd_like=yes
+ fi
# Default use_x86inc to yes when we are 64 bit, non-pic, or on any
# non-Darwin target.
if [ "${tgt_isa}" = "x86_64" ] || [ "${pic}" != "yes" ] || \
- [ "${tgt_os_no_version}" != "darwin" ]; then
+ [ "${openbsd_like}" != "yes" ]; then
soft_enable use_x86inc
fi
devnull='> /dev/null 2>&1'
BUILD_ROOT="_iosbuild"
+CONFIGURE_ARGS="--disable-docs
+ --disable-examples
+ --disable-libyuv
+ --disable-unit-tests"
DIST_DIR="_dist"
FRAMEWORK_DIR="VPX.framework"
HEADER_DIR="${FRAMEWORK_DIR}/Headers/vpx"
mkdir "${target}"
cd "${target}"
eval "${LIBVPX_SOURCE_DIR}/configure" --target="${target}" \
- --disable-docs ${EXTRA_CONFIGURE_ARGS} ${devnull}
+ ${CONFIGURE_ARGS} ${EXTRA_CONFIGURE_ARGS} ${devnull}
export DIST_DIR
eval make -j ${MAKE_JOBS} dist ${devnull}
cd "${old_pwd}"
cat << EOF
BUILD_ROOT=${BUILD_ROOT}
DIST_DIR=${DIST_DIR}
+ CONFIGURE_ARGS=${CONFIGURE_ARGS}
EXTRA_CONFIGURE_ARGS=${EXTRA_CONFIGURE_ARGS}
FRAMEWORK_DIR=${FRAMEWORK_DIR}
HEADER_DIR=${HEADER_DIR}
${toggle_codec_srcs} in/exclude codec library source code
${toggle_debug_libs} in/exclude debug version of libraries
${toggle_static_msvcrt} use static MSVCRT (VS builds only)
+ ${toggle_vp9_highbitdepth} use VP9 high bit depth (10/12) profiles
${toggle_vp8} VP8 codec support
${toggle_vp9} VP9 codec support
${toggle_internal_stats} output of encoder internal stats for debug, if supported (encoders)
${toggle_postproc_visualizer} macro block / block level visualizers
${toggle_multi_res_encoding} enable multiple-resolution encoding
${toggle_temporal_denoising} enable temporal denoising and disable the spatial denoiser
+ ${toggle_vp9_highbitdepth} enable 10/12 bit support in VP9
${toggle_vp9_temporal_denoising}
enable vp9 temporal denoising
${toggle_webm_io} enable input from and output to WebM container
all_platforms="${all_platforms} x86-darwin11-gcc"
all_platforms="${all_platforms} x86-darwin12-gcc"
all_platforms="${all_platforms} x86-darwin13-gcc"
+all_platforms="${all_platforms} x86-darwin14-gcc"
all_platforms="${all_platforms} x86-iphonesimulator-gcc"
all_platforms="${all_platforms} x86-linux-gcc"
all_platforms="${all_platforms} x86-linux-icc"
all_platforms="${all_platforms} x86_64-darwin11-gcc"
all_platforms="${all_platforms} x86_64-darwin12-gcc"
all_platforms="${all_platforms} x86_64-darwin13-gcc"
+all_platforms="${all_platforms} x86_64-darwin14-gcc"
all_platforms="${all_platforms} x86_64-iphonesimulator-gcc"
all_platforms="${all_platforms} x86_64-linux-gcc"
all_platforms="${all_platforms} x86_64-linux-icc"
all_platforms="${all_platforms} universal-darwin11-gcc"
all_platforms="${all_platforms} universal-darwin12-gcc"
all_platforms="${all_platforms} universal-darwin13-gcc"
+all_platforms="${all_platforms} universal-darwin14-gcc"
all_platforms="${all_platforms} generic-gnu"
# all_targets is a list of all targets that can be configured
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
-PROJECT_NAME = "WebM VP8 Codec SDK"
+PROJECT_NAME = "WebM Codec SDK"
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
# base path where the generated documentation will be put.
SHOW_USED_FILES = YES
-# If the sources in your project are distributed over multiple directories
-# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
-# in the documentation. The default is NO.
-
-SHOW_DIRECTORIES = NO
-
# The FILE_VERSION_FILTER tag can be used to specify a program or script that
# doxygen should invoke to get the current version for each file (typically from the
# version control system). Doxygen will invoke the program by executing (via
HTML_STYLESHEET =
-# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
-# files or namespaces will be aligned in HTML using tables. If set to
-# NO a bullet list will be used.
-
-HTML_ALIGN_MEMBERS = YES
-
# If the GENERATE_HTMLHELP tag is set to YES, additional index files
# will be generated that can be used as input for tools like the
# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
-/*!\mainpage WebM VP8 Codec SDK
+/*!\mainpage WebM Codec SDK
\section main_contents Page Contents
- \ref main_intro
- \ref main_support
\section main_intro Introduction
- Welcome to the WebM VP8 Codec SDK. This SDK allows you to integrate your
- applications with the VP8 video codec, a high quality, royalty free, open
- source codec deployed on millions of computers and devices worldwide.
+ Welcome to the WebM Codec SDK. This SDK allows you to integrate your
+ applications with the VP8 and VP9 video codecs, high quality, royalty free,
+ open source codecs deployed on billions of computers and devices worldwide.
- This distribution of the WebM VP8 Codec SDK includes the following support:
+ This distribution of the WebM Codec SDK includes the following support:
\if vp8_encoder
- \ref vp8_encoder
- Read the \ref samples "sample code" for examples of how to interact with the
codec.
- \ref codec reference
- \if encoder
- - \ref encoder reference
- \endif
- \if decoder
- - \ref decoder reference
- \endif
+ \if encoder
+ - \ref encoder reference
+ \endif
+ \if decoder
+ - \ref decoder reference
+ \endif
\section main_support Support Options & FAQ
The WebM project is an open source project supported by its community. For
first_drop_ = 0;
bits_total_ = 0;
duration_ = 0.0;
+ denoiser_offon_test_ = 0;
+ denoiser_offon_period_ = -1;
}
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
if (video->frame() == 1) {
encoder->Control(VP8E_SET_NOISE_SENSITIVITY, denoiser_on_);
}
+
+ if (denoiser_offon_test_) {
+ ASSERT_GT(denoiser_offon_period_, 0)
+ << "denoiser_offon_period_ is not positive.";
+ if ((video->frame() + 1) % denoiser_offon_period_ == 0) {
+ // Flip denoiser_on_ periodically
+ denoiser_on_ ^= 1;
+ }
+ encoder->Control(VP8E_SET_NOISE_SENSITIVITY, denoiser_on_);
+ }
+
const vpx_rational_t tb = video->timebase();
timebase_ = static_cast<double>(tb.num) / tb.den;
duration_ = 0;
double effective_datarate_;
size_t bits_in_last_frame_;
int denoiser_on_;
+ int denoiser_offon_test_;
+ int denoiser_offon_period_;
};
#if CONFIG_TEMPORAL_DENOISING
<< " The datarate for the file missed the target!";
}
}
+
+// Check basic datarate targeting, for a single bitrate, when denoiser is off
+// and on.
+TEST_P(DatarateTestLarge, DenoiserOffOn) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 299);
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ // The denoiser is off by default.
+ denoiser_on_ = 0;
+ // Set the offon test flag.
+ denoiser_offon_test_ = 1;
+ denoiser_offon_period_ = 100;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(cfg_.rc_target_bitrate, effective_datarate_ * 0.95)
+ << " The datarate for the file exceeds the target!";
+ ASSERT_LE(cfg_.rc_target_bitrate, file_datarate_ * 1.3)
+ << " The datarate for the file missed the target!";
+}
#endif // CONFIG_TEMPORAL_DENOISING
TEST_P(DatarateTestLarge, BasicBufferModel) {
for (int i = 0; i < 3; ++i) {
bits_total_[i] = 0;
}
+ denoiser_offon_test_ = 0;
+ denoiser_offon_period_ = -1;
}
//
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
- if (video->frame() == 1) {
+ if (video->frame() == 1)
encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
- encoder->Control(VP9E_SET_NOISE_SENSITIVITY, denoiser_on_);
+
+ if (denoiser_offon_test_) {
+ ASSERT_GT(denoiser_offon_period_, 0)
+ << "denoiser_offon_period_ is not positive.";
+ if ((video->frame() + 1) % denoiser_offon_period_ == 0) {
+ // Flip denoiser_on_ periodically
+ denoiser_on_ ^= 1;
+ }
}
+
+ encoder->Control(VP9E_SET_NOISE_SENSITIVITY, denoiser_on_);
+
if (cfg_.ts_number_layers > 1) {
if (video->frame() == 1) {
encoder->Control(VP9E_SET_SVC, 1);
vpx_codec_pts_t first_drop_;
int num_drops_;
int denoiser_on_;
+ int denoiser_offon_test_;
+ int denoiser_offon_period_;
};
// Check basic rate targeting,
ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.15)
<< " The datarate for the file is greater than target by too much!";
}
+
+// Check basic datarate targeting, for a single bitrate, when denoiser is off
+// and on.
+TEST_P(DatarateTestVP9Large, DenoiserOffOn) {
+ cfg_.rc_buf_initial_sz = 500;
+ cfg_.rc_buf_optimal_sz = 500;
+ cfg_.rc_buf_sz = 1000;
+ cfg_.rc_dropframe_thresh = 1;
+ cfg_.rc_min_quantizer = 2;
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_lag_in_frames = 0;
+
+ ::libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ 30, 1, 0, 299);
+
+ // For the temporal denoiser (#if CONFIG_VP9_TEMPORAL_DENOISING),
+ // there is only one denoiser mode: denoiserYonly(which is 1),
+ // but may add more modes in the future.
+ cfg_.rc_target_bitrate = 300;
+ ResetModel();
+ // The denoiser is off by default.
+ denoiser_on_ = 0;
+ // Set the offon test flag.
+ denoiser_offon_test_ = 1;
+ denoiser_offon_period_ = 100;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ ASSERT_GE(effective_datarate_[0], cfg_.rc_target_bitrate * 0.85)
+ << " The datarate for the file is lower than target by too much!";
+ ASSERT_LE(effective_datarate_[0], cfg_.rc_target_bitrate * 1.15)
+ << " The datarate for the file is greater than target by too much!";
+}
#endif // CONFIG_VP9_TEMPORAL_DENOISING
VP8_INSTANTIATE_TEST_CASE(DatarateTestLarge, ALL_TEST_MODES);
const int kMaxPsnr = 100;
const double kUsecsInSec = 1000000.0;
-static const char *kNewEncodeOutputFile = "new_encode.ivf";
+const char kNewEncodeOutputFile[] = "new_encode.ivf";
/*
DecodePerfTest takes a tuple of filename + number of threads to decode with
INSTANTIATE_TEST_CASE_P(VP9, DecodePerfTest,
::testing::ValuesIn(kVP9DecodePerfVectors));
-class VP9NewEncodeDecodePerfTest : public ::libvpx_test::EncoderTest,
+class VP9NewEncodeDecodePerfTest :
+ public ::libvpx_test::EncoderTest,
public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
VP9NewEncodeDecodePerfTest()
const std::string data_path = getenv("LIBVPX_TEST_DATA_PATH");
const std::string path_to_source = data_path + "/" + kNewEncodeOutputFile;
outfile_ = fopen(path_to_source.c_str(), "wb");
+ ASSERT_TRUE(outfile_ != NULL);
}
virtual void EndPassHook() {
- if (outfile_) {
+ if (outfile_ != NULL) {
if (!fseek(outfile_, 0, SEEK_SET))
ivf_write_file_header(outfile_, &cfg_, VP9_FOURCC, out_frames_);
fclose(outfile_);
// Write frame header and data.
ivf_write_frame_header(outfile_, out_frames_, pkt->data.frame.sz);
- (void)fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz, outfile_);
+ ASSERT_EQ(fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz, outfile_),
+ pkt->data.frame.sz);
}
- virtual bool DoDecode() { return 0; }
+ virtual bool DoDecode() { return false; }
void set_speed(unsigned int speed) {
speed_ = speed;
uint32_t out_frames_;
};
-
struct EncodePerfTestVideo {
EncodePerfTestVideo(const char *name_, uint32_t width_, uint32_t height_,
uint32_t bitrate_, int frames_)
vpx_usec_timer_mark(&t);
const double elapsed_secs =
- double(vpx_usec_timer_elapsed(&t)) / kUsecsInSec;
+ static_cast<double>(vpx_usec_timer_elapsed(&t)) / kUsecsInSec;
const unsigned decode_frames = decode_video.frame_number();
- const double fps = double(decode_frames) / elapsed_secs;
+ const double fps = static_cast<double>(decode_frames) / elapsed_secs;
printf("{\n");
printf("\t\"type\" : \"decode_perf_test\",\n");
void DecoderTest::RunLoop(CompressedVideoSource *video,
const vpx_codec_dec_cfg_t &dec_cfg) {
- Decoder* const decoder = codec_->CreateDecoder(dec_cfg, 0);
+ Decoder* const decoder = codec_->CreateDecoder(dec_cfg, flags_, 0);
ASSERT_TRUE(decoder != NULL);
bool end_of_file = false;
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
+#include <string>
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
#include "./vpx_version.h"
EncodePerfTestVideo("niklas_1280_720_30.yuv", 1280, 720, 600, 470),
};
-const int kEncodePerfTestSpeeds[] = { 5, 6, 7, 12 };
+const int kEncodePerfTestSpeeds[] = { 5, 6, 7, 8 };
+const int kEncodePerfTestThreads[] = { 1, 2, 4 };
#define NELEMENTS(x) (sizeof((x)) / sizeof((x)[0]))
min_psnr_(kMaxPsnr),
nframes_(0),
encoding_mode_(GET_PARAM(1)),
- speed_(0) {}
+ speed_(0),
+ threads_(1) {}
virtual ~VP9EncodePerfTest() {}
cfg_.rc_buf_optimal_sz = 600;
cfg_.rc_resize_allowed = 0;
cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_error_resilient = 1;
+ cfg_.g_threads = threads_;
}
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
- if (video->frame() == 1) {
+ if (video->frame() == 0) {
+ const int log2_tile_columns = 3;
encoder->Control(VP8E_SET_CPUUSED, speed_);
+ encoder->Control(VP9E_SET_TILE_COLUMNS, log2_tile_columns);
+ encoder->Control(VP9E_SET_FRAME_PARALLEL_DECODING, 1);
+ encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 0);
}
}
speed_ = speed;
}
+ void set_threads(unsigned int threads) {
+ threads_ = threads;
+ }
+
private:
double min_psnr_;
unsigned int nframes_;
libvpx_test::TestMode encoding_mode_;
unsigned speed_;
+ unsigned int threads_;
};
TEST_P(VP9EncodePerfTest, PerfTest) {
for (size_t i = 0; i < NELEMENTS(kVP9EncodePerfTestVectors); ++i) {
for (size_t j = 0; j < NELEMENTS(kEncodePerfTestSpeeds); ++j) {
- SetUp();
-
- const vpx_rational timebase = { 33333333, 1000000000 };
- cfg_.g_timebase = timebase;
- cfg_.rc_target_bitrate = kVP9EncodePerfTestVectors[i].bitrate;
-
- init_flags_ = VPX_CODEC_USE_PSNR;
-
- const unsigned frames = kVP9EncodePerfTestVectors[i].frames;
- const char *video_name = kVP9EncodePerfTestVectors[i].name;
- libvpx_test::I420VideoSource video(
- video_name,
- kVP9EncodePerfTestVectors[i].width,
- kVP9EncodePerfTestVectors[i].height,
- timebase.den, timebase.num, 0,
- kVP9EncodePerfTestVectors[i].frames);
- set_speed(kEncodePerfTestSpeeds[j]);
-
- vpx_usec_timer t;
- vpx_usec_timer_start(&t);
-
- ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
-
- vpx_usec_timer_mark(&t);
- const double elapsed_secs = vpx_usec_timer_elapsed(&t) / kUsecsInSec;
- const double fps = frames / elapsed_secs;
- const double minimum_psnr = min_psnr();
-
- printf("{\n");
- printf("\t\"type\" : \"encode_perf_test\",\n");
- printf("\t\"version\" : \"%s\",\n", VERSION_STRING_NOSP);
- printf("\t\"videoName\" : \"%s\",\n", video_name);
- printf("\t\"encodeTimeSecs\" : %f,\n", elapsed_secs);
- printf("\t\"totalFrames\" : %u,\n", frames);
- printf("\t\"framesPerSecond\" : %f,\n", fps);
- printf("\t\"minPsnr\" : %f,\n", minimum_psnr);
- printf("\t\"speed\" : %d\n", kEncodePerfTestSpeeds[j]);
- printf("}\n");
+ for (size_t k = 0; k < NELEMENTS(kEncodePerfTestThreads); ++k) {
+ if (kVP9EncodePerfTestVectors[i].width < 512 &&
+ kEncodePerfTestThreads[k] > 1)
+ continue;
+ else if (kVP9EncodePerfTestVectors[i].width < 1024 &&
+ kEncodePerfTestThreads[k] > 2)
+ continue;
+
+ set_threads(kEncodePerfTestThreads[k]);
+ SetUp();
+
+ const vpx_rational timebase = { 33333333, 1000000000 };
+ cfg_.g_timebase = timebase;
+ cfg_.rc_target_bitrate = kVP9EncodePerfTestVectors[i].bitrate;
+
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ const unsigned frames = kVP9EncodePerfTestVectors[i].frames;
+ const char *video_name = kVP9EncodePerfTestVectors[i].name;
+ libvpx_test::I420VideoSource video(
+ video_name,
+ kVP9EncodePerfTestVectors[i].width,
+ kVP9EncodePerfTestVectors[i].height,
+ timebase.den, timebase.num, 0,
+ kVP9EncodePerfTestVectors[i].frames);
+ set_speed(kEncodePerfTestSpeeds[j]);
+
+ vpx_usec_timer t;
+ vpx_usec_timer_start(&t);
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+
+ vpx_usec_timer_mark(&t);
+ const double elapsed_secs = vpx_usec_timer_elapsed(&t) / kUsecsInSec;
+ const double fps = frames / elapsed_secs;
+ const double minimum_psnr = min_psnr();
+ std::string display_name(video_name);
+ if (kEncodePerfTestThreads[k] > 1) {
+ char thread_count[32];
+ snprintf(thread_count, sizeof(thread_count), "_t-%d",
+ kEncodePerfTestThreads[k]);
+ display_name += thread_count;
+ }
+
+ printf("{\n");
+ printf("\t\"type\" : \"encode_perf_test\",\n");
+ printf("\t\"version\" : \"%s\",\n", VERSION_STRING_NOSP);
+ printf("\t\"videoName\" : \"%s\",\n", display_name.c_str());
+ printf("\t\"encodeTimeSecs\" : %f,\n", elapsed_secs);
+ printf("\t\"totalFrames\" : %u,\n", frames);
+ printf("\t\"framesPerSecond\" : %f,\n", fps);
+ printf("\t\"minPsnr\" : %f,\n", minimum_psnr);
+ printf("\t\"speed\" : %d,\n", kEncodePerfTestSpeeds[j]);
+ printf("\t\"threads\" : %d\n", kEncodePerfTestThreads[k]);
+ printf("}\n");
+ }
}
}
}
// Encode the frame
API_REGISTER_STATE_CHECK(
- res = vpx_codec_encode(&encoder_,
- video.img(), video.pts(), video.duration(),
+ res = vpx_codec_encode(&encoder_, img, video.pts(), video.duration(),
frame_flags, deadline_));
ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
}
static bool compare_img(const vpx_image_t *img1,
const vpx_image_t *img2) {
bool match = (img1->fmt == img2->fmt) &&
+ (img1->cs == img2->cs) &&
(img1->d_w == img2->d_w) &&
(img1->d_h == img2->d_h);
}
#endif
+ void Config(const vpx_codec_enc_cfg_t *cfg) {
+ const vpx_codec_err_t res = vpx_codec_enc_config_set(&encoder_, cfg);
+ ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
+ cfg_ = *cfg;
+ }
+
void set_deadline(unsigned long deadline) {
deadline_ = deadline;
}
void Reset() {
error_nframes_ = 0;
droppable_nframes_ = 0;
+ pattern_switch_ = 0;
}
virtual void SetUp() {
// 1 3
// 0 2 .....
// LAST is updated on base/layer 0, GOLDEN updated on layer 1.
- int SetFrameFlags(int frame_num, int num_temp_layers) {
+ // Non-zero pattern_switch parameter means pattern will switch to
+ // not using LAST for frame_num >= pattern_switch.
+ int SetFrameFlags(int frame_num,
+ int num_temp_layers,
+ int pattern_switch) {
int frame_flags = 0;
if (num_temp_layers == 2) {
- if (frame_num % 2 == 0) {
- // Layer 0: predict from L and ARF, update L.
- frame_flags = VP8_EFLAG_NO_REF_GF |
- VP8_EFLAG_NO_UPD_GF |
- VP8_EFLAG_NO_UPD_ARF;
- } else {
- // Layer 1: predict from L, GF, and ARF, and update GF.
- frame_flags = VP8_EFLAG_NO_UPD_ARF |
- VP8_EFLAG_NO_UPD_LAST;
- }
+ if (frame_num % 2 == 0) {
+ if (frame_num < pattern_switch || pattern_switch == 0) {
+ // Layer 0: predict from LAST and ARF, update LAST.
+ frame_flags = VP8_EFLAG_NO_REF_GF |
+ VP8_EFLAG_NO_UPD_GF |
+ VP8_EFLAG_NO_UPD_ARF;
+ } else {
+ // Layer 0: predict from GF and ARF, update GF.
+ frame_flags = VP8_EFLAG_NO_REF_LAST |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ARF;
+ }
+ } else {
+ if (frame_num < pattern_switch || pattern_switch == 0) {
+ // Layer 1: predict from L, GF, and ARF, update GF.
+ frame_flags = VP8_EFLAG_NO_UPD_ARF |
+ VP8_EFLAG_NO_UPD_LAST;
+ } else {
+ // Layer 1: predict from GF and ARF, update GF.
+ frame_flags = VP8_EFLAG_NO_REF_LAST |
+ VP8_EFLAG_NO_UPD_LAST |
+ VP8_EFLAG_NO_UPD_ARF;
+ }
+ }
}
return frame_flags;
}
VP8_EFLAG_NO_UPD_ARF);
// For temporal layer case.
if (cfg_.ts_number_layers > 1) {
- frame_flags_ = SetFrameFlags(video->frame(), cfg_.ts_number_layers);
+ frame_flags_ = SetFrameFlags(video->frame(),
+ cfg_.ts_number_layers,
+ pattern_switch_);
for (unsigned int i = 0; i < droppable_nframes_; ++i) {
if (droppable_frames_[i] == video->frame()) {
std::cout << "Encoding droppable frame: "
return mismatch_nframes_;
}
+ void SetPatternSwitch(int frame_switch) {
+ pattern_switch_ = frame_switch;
+ }
+
private:
double psnr_;
unsigned int nframes_;
unsigned int error_nframes_;
unsigned int droppable_nframes_;
+ unsigned int pattern_switch_;
double mismatch_psnr_;
unsigned int mismatch_nframes_;
unsigned int error_frames_[kMaxErrorFrames];
// Error resilient mode ON.
cfg_.g_error_resilient = 1;
cfg_.kf_mode = VPX_KF_DISABLED;
+ SetPatternSwitch(0);
// The odd frames are the enhancement layer for 2 layer pattern, so set
// those frames as droppable. Drop the last 7 frames.
Reset();
}
+// Check for successful decoding and no encoder/decoder mismatch
+// for a two layer temporal pattern, where at some point in the
+// sequence, the LAST ref is not used anymore.
+TEST_P(ErrorResilienceTestLarge, 2LayersNoRefLast) {
+ const vpx_rational timebase = { 33333333, 1000000000 };
+ cfg_.g_timebase = timebase;
+ cfg_.rc_target_bitrate = 500;
+ cfg_.g_lag_in_frames = 0;
+
+ cfg_.rc_end_usage = VPX_CBR;
+ // 2 Temporal layers, no spatial layers, CBR mode.
+ cfg_.ss_number_layers = 1;
+ cfg_.ts_number_layers = 2;
+ cfg_.ts_rate_decimator[0] = 2;
+ cfg_.ts_rate_decimator[1] = 1;
+ cfg_.ts_periodicity = 2;
+ cfg_.ts_target_bitrate[0] = 60 * cfg_.rc_target_bitrate / 100;
+ cfg_.ts_target_bitrate[1] = cfg_.rc_target_bitrate;
+
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ libvpx_test::I420VideoSource video("hantro_collage_w352h288.yuv", 352, 288,
+ timebase.den, timebase.num, 0, 100);
+
+ // Error resilient mode ON.
+ cfg_.g_error_resilient = 1;
+ cfg_.kf_mode = VPX_KF_DISABLED;
+ SetPatternSwitch(60);
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ // Test that no mismatches have been found
+ std::cout << " Mismatch frames: "
+ << GetMismatchFrames() << "\n";
+ EXPECT_EQ(GetMismatchFrames(), (unsigned int) 0);
+
+ // Reset previously set of error/droppable frames.
+ Reset();
+}
+
class ErrorResilienceTestLargeCodecControls : public ::libvpx_test::EncoderTest,
public ::libvpx_test::CodecTestWithParam<libvpx_test::TestMode> {
protected:
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif // HAVE_SSE2
+#if HAVE_NEON_ASM
+#if CONFIG_VP9_HIGHBITDEPTH
+// No neon high bitdepth functions.
+#else
+void wrapper_vertical_16_neon(uint8_t *s, int p, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh,
+ int count) {
+ vp9_lpf_vertical_16_neon(s, p, blimit, limit, thresh);
+}
+
+void wrapper_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh,
+ int count) {
+ vp9_lpf_vertical_16_c(s, p, blimit, limit, thresh);
+}
+
+void wrapper_vertical_16_dual_neon(uint8_t *s, int p, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh,
+ int count) {
+ vp9_lpf_vertical_16_dual_neon(s, p, blimit, limit, thresh);
+}
+
+void wrapper_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh,
+ int count) {
+ vp9_lpf_vertical_16_dual_c(s, p, blimit, limit, thresh);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_NEON_ASM
+
class Loop8Test6Param : public ::testing::TestWithParam<loop8_param_t> {
public:
virtual ~Loop8Test6Param() {}
#endif // CONFIG_VP9_HIGHBITDEPTH
#endif
-#if HAVE_NEON && (!CONFIG_VP9_HIGHBITDEPTH)
+#if HAVE_NEON
+#if CONFIG_VP9_HIGHBITDEPTH
+// No neon high bitdepth functions.
+#else
INSTANTIATE_TEST_CASE_P(
NEON, Loop8Test6Param,
::testing::Values(
#if HAVE_NEON_ASM
+// Using #if inside the macro is unsupported on MSVS but the tests are not
+// currently built for MSVS with ARM and NEON.
make_tuple(&vp9_lpf_horizontal_16_neon,
&vp9_lpf_horizontal_16_c, 8),
+ make_tuple(&wrapper_vertical_16_neon,
+ &wrapper_vertical_16_c, 8),
+ make_tuple(&wrapper_vertical_16_dual_neon,
+ &wrapper_vertical_16_dual_c, 8),
+ make_tuple(&vp9_lpf_horizontal_8_neon,
+ &vp9_lpf_horizontal_8_c, 8),
+ make_tuple(&vp9_lpf_vertical_8_neon,
+ &vp9_lpf_vertical_8_c, 8),
#endif // HAVE_NEON_ASM
make_tuple(&vp9_lpf_horizontal_4_neon,
&vp9_lpf_horizontal_4_c, 8),
- make_tuple(&vp9_lpf_horizontal_8_neon,
- &vp9_lpf_horizontal_8_c, 8),
make_tuple(&vp9_lpf_vertical_4_neon,
- &vp9_lpf_vertical_4_c, 8),
- make_tuple(&vp9_lpf_vertical_8_neon,
- &vp9_lpf_vertical_8_c, 8)));
+ &vp9_lpf_vertical_4_c, 8)));
INSTANTIATE_TEST_CASE_P(
NEON, Loop8Test9Param,
::testing::Values(
- make_tuple(&vp9_lpf_horizontal_4_dual_neon,
- &vp9_lpf_horizontal_4_dual_c, 8),
+#if HAVE_NEON_ASM
make_tuple(&vp9_lpf_horizontal_8_dual_neon,
&vp9_lpf_horizontal_8_dual_c, 8),
- make_tuple(&vp9_lpf_vertical_4_dual_neon,
- &vp9_lpf_vertical_4_dual_c, 8),
make_tuple(&vp9_lpf_vertical_8_dual_neon,
- &vp9_lpf_vertical_8_dual_c, 8)));
-#endif // HAVE_NEON && (!CONFIG_VP9_HIGHBITDEPTH)
+ &vp9_lpf_vertical_8_dual_c, 8),
+#endif // HAVE_NEON_ASM
+ make_tuple(&vp9_lpf_horizontal_4_dual_neon,
+ &vp9_lpf_horizontal_4_dual_c, 8),
+ make_tuple(&vp9_lpf_vertical_4_dual_neon,
+ &vp9_lpf_vertical_4_dual_c, 8)));
+#endif // CONFIG_VP9_HIGHBITDEPTH
+#endif // HAVE_NEON
} // namespace
TEST_P(ResizeTest, TestExternalResizeWorks) {
ResizingVideoSource video;
+ cfg_.g_lag_in_frames = 0;
ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
for (std::vector<FrameInfo>::const_iterator info = frame_info_list_.begin();
const unsigned int expected_h = ScaleForFrameNumber(frame, kInitialHeight);
EXPECT_EQ(expected_w, info->w)
- << "Frame " << frame << "had unexpected width";
+ << "Frame " << frame << " had unexpected width";
EXPECT_EQ(expected_h, info->h)
- << "Frame " << frame << "had unexpected height";
+ << "Frame " << frame << " had unexpected height";
}
}
}
}
+vpx_img_fmt_t CspForFrameNumber(int frame) {
+ if (frame < 10)
+ return VPX_IMG_FMT_I420;
+ if (frame < 20)
+ return VPX_IMG_FMT_I444;
+ return VPX_IMG_FMT_I420;
+}
+
+class ResizeCspTest : public ResizeTest {
+ protected:
+#if WRITE_COMPRESSED_STREAM
+ ResizeCspTest()
+ : ResizeTest(),
+ frame0_psnr_(0.0),
+ outfile_(NULL),
+ out_frames_(0) {}
+#else
+ ResizeCspTest() : ResizeTest(), frame0_psnr_(0.0) {}
+#endif
+
+ virtual ~ResizeCspTest() {}
+
+ virtual void BeginPassHook(unsigned int /*pass*/) {
+#if WRITE_COMPRESSED_STREAM
+ outfile_ = fopen("vp91-2-05-cspchape.ivf", "wb");
+#endif
+ }
+
+ virtual void EndPassHook() {
+#if WRITE_COMPRESSED_STREAM
+ if (outfile_) {
+ if (!fseek(outfile_, 0, SEEK_SET))
+ write_ivf_file_header(&cfg_, out_frames_, outfile_);
+ fclose(outfile_);
+ outfile_ = NULL;
+ }
+#endif
+ }
+
+ virtual void PreEncodeFrameHook(libvpx_test::VideoSource *video,
+ libvpx_test::Encoder *encoder) {
+ if (CspForFrameNumber(video->frame()) != VPX_IMG_FMT_I420 &&
+ cfg_.g_profile != 1) {
+ cfg_.g_profile = 1;
+ encoder->Config(&cfg_);
+ }
+ if (CspForFrameNumber(video->frame()) == VPX_IMG_FMT_I420 &&
+ cfg_.g_profile != 0) {
+ cfg_.g_profile = 0;
+ encoder->Config(&cfg_);
+ }
+ }
+
+ virtual void PSNRPktHook(const vpx_codec_cx_pkt_t *pkt) {
+ if (!frame0_psnr_)
+ frame0_psnr_ = pkt->data.psnr.psnr[0];
+ EXPECT_NEAR(pkt->data.psnr.psnr[0], frame0_psnr_, 2.0);
+ }
+
+#if WRITE_COMPRESSED_STREAM
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ ++out_frames_;
+
+ // Write initial file header if first frame.
+ if (pkt->data.frame.pts == 0)
+ write_ivf_file_header(&cfg_, 0, outfile_);
+
+ // Write frame header and data.
+ write_ivf_frame_header(pkt, outfile_);
+ (void)fwrite(pkt->data.frame.buf, 1, pkt->data.frame.sz, outfile_);
+ }
+#endif
+
+ double frame0_psnr_;
+#if WRITE_COMPRESSED_STREAM
+ FILE *outfile_;
+ unsigned int out_frames_;
+#endif
+};
+
+class ResizingCspVideoSource : public ::libvpx_test::DummyVideoSource {
+ public:
+ ResizingCspVideoSource() {
+ SetSize(kInitialWidth, kInitialHeight);
+ limit_ = 30;
+ }
+
+ virtual ~ResizingCspVideoSource() {}
+
+ protected:
+ virtual void Next() {
+ ++frame_;
+ SetImageFormat(CspForFrameNumber(frame_));
+ FillFrame();
+ }
+};
+
+TEST_P(ResizeCspTest, TestResizeCspWorks) {
+ ResizingCspVideoSource video;
+ cfg_.rc_min_quantizer = cfg_.rc_max_quantizer = 48;
+ cfg_.g_lag_in_frames = 0;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+}
+
VP8_INSTANTIATE_TEST_CASE(ResizeTest, ONE_PASS_TEST_MODES);
+VP9_INSTANTIATE_TEST_CASE(ResizeTest,
+ ::testing::Values(::libvpx_test::kRealTime));
VP9_INSTANTIATE_TEST_CASE(ResizeInternalTest,
::testing::Values(::libvpx_test::kOnePassBest));
+VP9_INSTANTIATE_TEST_CASE(ResizeCspTest,
+ ::testing::Values(::libvpx_test::kRealTime));
} // namespace
#endif // CONFIG_USE_X86INC
#endif // HAVE_SSSE3
-#if HAVE_AVX2
#if CONFIG_VP9_ENCODER
+#if HAVE_AVX2
const SadMxNx4Func sad_64x64x4d_avx2 = vp9_sad64x64x4d_avx2;
const SadMxNx4Func sad_32x32x4d_avx2 = vp9_sad32x32x4d_avx2;
INSTANTIATE_TEST_CASE_P(AVX2, SADx4Test, ::testing::Values(
make_tuple(32, 32, sad_32x32x4d_avx2, -1),
make_tuple(64, 64, sad_64x64x4d_avx2, -1)));
-#endif // CONFIG_VP9_ENCODER
#endif // HAVE_AVX2
+#if HAVE_NEON
+const SadMxNx4Func sad_16x16x4d_neon = vp9_sad16x16x4d_neon;
+const SadMxNx4Func sad_32x32x4d_neon = vp9_sad32x32x4d_neon;
+const SadMxNx4Func sad_64x64x4d_neon = vp9_sad64x64x4d_neon;
+INSTANTIATE_TEST_CASE_P(NEON, SADx4Test, ::testing::Values(
+ make_tuple(16, 16, sad_16x16x4d_neon, -1),
+ make_tuple(32, 32, sad_32x32x4d_neon, -1),
+ make_tuple(64, 64, sad_64x64x4d_neon, -1)));
+#endif // HAVE_NEON
+#endif // CONFIG_VP9_ENCODER
+
} // namespace
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-06-bilinear.webm.md5
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-07-frame_parallel.webm
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-07-frame_parallel.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-07-frame_parallel-1.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-07-frame_parallel-1.webm.md5
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile-4x1.webm
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile-4x1.webm.md5
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile-4x4.webm
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv440.webm.md5
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv444.webm
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv444.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-20-big_superframe-01.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-20-big_superframe-01.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-20-big_superframe-02.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-20-big_superframe-02.webm.md5
ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp92-2-20-10bit-yuv420.webm
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp92-2-20-10bit-yuv420.webm.md5
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf.res
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp91-2-mixedrefcsp-444to420.ivf
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp91-2-mixedrefcsp-444to420.ivf.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-07-frame_parallel-1.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-07-frame_parallel-2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-07-frame_parallel-3.webm
ifeq ($(CONFIG_DECODE_PERF_TESTS),yes)
# NewEncode Test
+ifneq ($(CONFIG_VP9_ENCODER),yes)
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += niklas_1280_720_30.yuv
+endif
# BBB VP9 streams
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_426x240_tile_1x1_180kbps.webm
LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_640x360_tile_1x2_337kbps.webm
LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += kirland_640_480_30.yuv
LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += macmarcomoving_640_480_30.yuv
LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += macmarcostationary_640_480_30.yuv
-ifneq ($(CONFIG_DECODE_PERF_TESTS),yes)
-LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += niklas_1280_720_30.yuv
-endif
LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += niklas_640_480_30.yuv
LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += tacomanarrows_640_480_30.yuv
LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += tacomasmallcameramovement_640_480_30.yuv
a61774cf03fc584bd9f0904fc145253bb8ea6c4c invalid-vp91-2-mixedrefcsp-444to420.ivf.res
812d05a64a0d83c1b504d0519927ddc5a2cdb273 invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf
1e472baaf5f6113459f0399a38a5a5e68d17799d invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf.res
+f97088c7359fc8d3d5aa5eafe57bc7308b3ee124 vp90-2-20-big_superframe-01.webm
+47d7d409785afa33b123376de0c907336e6c7bd7 vp90-2-20-big_superframe-01.webm.md5
+65ade6d2786209582c50d34cfe22b3cdb033abaf vp90-2-20-big_superframe-02.webm
+7c0ed8d04c4d06c5411dd2e5de2411d37f092db5 vp90-2-20-big_superframe-02.webm.md5
+667ec8718c982aef6be07eb94f083c2efb9d2d16 vp90-2-07-frame_parallel-1.webm
+bfc82bf848e9c05020d61e3ffc1e62f25df81d19 vp90-2-07-frame_parallel-1.webm.md5
+efd5a51d175cfdacd169ed23477729dc558030dc invalid-vp90-2-07-frame_parallel-1.webm
+9f912712ec418be69adb910e2ca886a63c4cec08 invalid-vp90-2-07-frame_parallel-2.webm
+445f5a53ca9555341852997ccdd480a51540bd14 invalid-vp90-2-07-frame_parallel-3.webm
\ No newline at end of file
LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += external_frame_buffer_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += invalid_file_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += user_priv_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += vp9_frame_parallel_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += active_map_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += borders_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += cpu_speed_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += frame_size_tests.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_lossless_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_end_to_end_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_ethread_test.cc
LIBVPX_TEST_SRCS-yes += decode_test_driver.cc
LIBVPX_TEST_SRCS-yes += decode_test_driver.h
#include <cstdlib>
#include <string>
#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "../tools_common.h"
#include "./vpx_config.h"
#include "test/codec_factory.h"
#include "test/decode_test_driver.h"
namespace {
+enum DecodeMode {
+ kSerialMode,
+ kFrameParallMode
+};
+
+const int kDecodeMode = 0;
+const int kThreads = 1;
+const int kFileName = 2;
+
+typedef std::tr1::tuple<int, int, const char*> DecodeParam;
+
class TestVectorTest : public ::libvpx_test::DecoderTest,
- public ::libvpx_test::CodecTestWithParam<const char*> {
+ public ::libvpx_test::CodecTestWithParam<DecodeParam> {
protected:
- TestVectorTest() : DecoderTest(GET_PARAM(0)), md5_file_(NULL) {}
+ TestVectorTest()
+ : DecoderTest(GET_PARAM(0)),
+ md5_file_(NULL) {
+ }
virtual ~TestVectorTest() {
if (md5_file_)
// checksums match the correct md5 data, then the test is passed. Otherwise,
// the test failed.
TEST_P(TestVectorTest, MD5Match) {
- const std::string filename = GET_PARAM(1);
+ const DecodeParam input = GET_PARAM(1);
+ const std::string filename = std::tr1::get<kFileName>(input);
+ const int threads = std::tr1::get<kThreads>(input);
+ const int mode = std::tr1::get<kDecodeMode>(input);
libvpx_test::CompressedVideoSource *video = NULL;
+ vpx_codec_flags_t flags = 0;
+ vpx_codec_dec_cfg_t cfg = {0};
+ char str[256];
+
+ if (mode == kFrameParallMode) {
+ flags |= VPX_CODEC_USE_FRAME_THREADING;
+ }
+
+ cfg.threads = threads;
+
+ snprintf(str, sizeof(str) / sizeof(str[0]) - 1,
+ "file: %s mode: %s threads: %d",
+ filename.c_str(), mode == 0 ? "Serial" : "Parallel", threads);
+ SCOPED_TRACE(str);
// Open compressed video file.
if (filename.substr(filename.length() - 3, 3) == "ivf") {
const std::string md5_filename = filename + ".md5";
OpenMD5File(md5_filename);
+ // Set decode config and flags.
+ set_cfg(cfg);
+ set_flags(flags);
+
// Decode frame, and check the md5 matching.
- ASSERT_NO_FATAL_FAILURE(RunLoop(video));
+ ASSERT_NO_FATAL_FAILURE(RunLoop(video, cfg));
delete video;
}
-VP8_INSTANTIATE_TEST_CASE(TestVectorTest,
- ::testing::ValuesIn(libvpx_test::kVP8TestVectors,
- libvpx_test::kVP8TestVectors +
- libvpx_test::kNumVP8TestVectors));
-VP9_INSTANTIATE_TEST_CASE(TestVectorTest,
- ::testing::ValuesIn(libvpx_test::kVP9TestVectors,
- libvpx_test::kVP9TestVectors +
- libvpx_test::kNumVP9TestVectors));
-
+// Test VP8 decode in serial mode with single thread.
+// NOTE: VP8 only support serial mode.
+INSTANTIATE_TEST_CASE_P(
+ VP8, TestVectorTest,
+ ::testing::Combine(
+ ::testing::Values(
+ static_cast<const libvpx_test::CodecFactory *>(&libvpx_test::kVP8)),
+ ::testing::Combine(
+ ::testing::Values(0), // Serial Mode.
+ ::testing::Values(1), // Single thread.
+ ::testing::ValuesIn(libvpx_test::kVP8TestVectors,
+ libvpx_test::kVP8TestVectors +
+ libvpx_test::kNumVP8TestVectors))));
+
+// Test VP9 decode in serial mode with single thread.
+INSTANTIATE_TEST_CASE_P(
+ VP9, TestVectorTest,
+ ::testing::Combine(
+ ::testing::Values(
+ static_cast<const libvpx_test::CodecFactory *>(&libvpx_test::kVP9)),
+ ::testing::Combine(
+ ::testing::Values(0), // Serial Mode.
+ ::testing::Values(1), // Single thread.
+ ::testing::ValuesIn(libvpx_test::kVP9TestVectors,
+ libvpx_test::kVP9TestVectors +
+ libvpx_test::kNumVP9TestVectors))));
+
+
+// Test VP9 decode in frame parallel mode with different number of threads.
+INSTANTIATE_TEST_CASE_P(
+ VP9MultiThreadedFrameParallel, TestVectorTest,
+ ::testing::Combine(
+ ::testing::Values(
+ static_cast<const libvpx_test::CodecFactory *>(&libvpx_test::kVP9)),
+ ::testing::Combine(
+ ::testing::Values(1), // Frame Parallel mode.
+ ::testing::Range(2, 9), // With 2 ~ 8 threads.
+ ::testing::ValuesIn(libvpx_test::kVP9TestVectors,
+ libvpx_test::kVP9TestVectors +
+ libvpx_test::kNumVP9TestVectors))));
} // namespace
"vp93-2-20-10bit-yuv440.webm", "vp93-2-20-12bit-yuv440.webm",
"vp93-2-20-10bit-yuv444.webm", "vp93-2-20-12bit-yuv444.webm",
#endif // CONFIG_VP9_HIGHBITDEPTH`
+ "vp90-2-20-big_superframe-01.webm", "vp90-2-20-big_superframe-02.webm",
};
const int kNumVP9TestVectors = NELEMENTS(kVP9TestVectors);
#endif // CONFIG_VP9_DECODER
return
fi
+ # Don't bother with the environment tests if everything else was disabled.
+ [ -z "${tests_to_filter}" ] && return
+
# Combine environment and actual tests.
local tests_to_run="${env_tests} ${tests_to_filter}"
const vp9_variance_fn_t variance8x8_neon = vp9_variance8x8_neon;
const vp9_variance_fn_t variance16x16_neon = vp9_variance16x16_neon;
const vp9_variance_fn_t variance32x32_neon = vp9_variance32x32_neon;
+const vp9_variance_fn_t variance32x64_neon = vp9_variance32x64_neon;
+const vp9_variance_fn_t variance64x32_neon = vp9_variance64x32_neon;
+const vp9_variance_fn_t variance64x64_neon = vp9_variance64x64_neon;
INSTANTIATE_TEST_CASE_P(
NEON, VP9VarianceTest,
::testing::Values(make_tuple(3, 3, variance8x8_neon, 0),
make_tuple(4, 4, variance16x16_neon, 0),
- make_tuple(5, 5, variance32x32_neon, 0)));
+ make_tuple(5, 5, variance32x32_neon, 0),
+ make_tuple(5, 6, variance32x64_neon, 0),
+ make_tuple(6, 5, variance64x32_neon, 0),
+ make_tuple(6, 6, variance64x64_neon, 0)));
const vp9_subpixvariance_fn_t subpel_variance8x8_neon =
vp9_sub_pixel_variance8x8_neon;
vp9_sub_pixel_variance16x16_neon;
const vp9_subpixvariance_fn_t subpel_variance32x32_neon =
vp9_sub_pixel_variance32x32_neon;
+const vp9_subpixvariance_fn_t subpel_variance64x64_neon =
+ vp9_sub_pixel_variance64x64_neon;
INSTANTIATE_TEST_CASE_P(
NEON, VP9SubpelVarianceTest,
::testing::Values(make_tuple(3, 3, subpel_variance8x8_neon, 0),
make_tuple(4, 4, subpel_variance16x16_neon, 0),
- make_tuple(5, 5, subpel_variance32x32_neon, 0)));
+ make_tuple(5, 5, subpel_variance32x32_neon, 0),
+ make_tuple(6, 6, subpel_variance64x64_neon, 0)));
#endif // HAVE_NEON
#endif // CONFIG_VP9_ENCODER
class DummyVideoSource : public VideoSource {
public:
- DummyVideoSource() : img_(NULL), limit_(100), width_(0), height_(0) {
- SetSize(80, 64);
+ DummyVideoSource()
+ : img_(NULL),
+ limit_(100),
+ width_(80),
+ height_(64),
+ format_(VPX_IMG_FMT_I420) {
+ ReallocImage();
}
virtual ~DummyVideoSource() { vpx_img_free(img_); }
void SetSize(unsigned int width, unsigned int height) {
if (width != width_ || height != height_) {
- vpx_img_free(img_);
- raw_sz_ = ((width + 31)&~31) * height * 3 / 2;
- img_ = vpx_img_alloc(NULL, VPX_IMG_FMT_I420, width, height, 32);
width_ = width;
height_ = height;
+ ReallocImage();
+ }
+ }
+
+ void SetImageFormat(vpx_img_fmt_t format) {
+ if (format_ != format) {
+ format_ = format;
+ ReallocImage();
}
}
protected:
virtual void FillFrame() { if (img_) memset(img_->img_data, 0, raw_sz_); }
+ void ReallocImage() {
+ vpx_img_free(img_);
+ img_ = vpx_img_alloc(NULL, format_, width_, height_, 32);
+ raw_sz_ = ((img_->w + 31) & ~31) * img_->h * img_->bps / 8;
+ }
+
vpx_image_t *img_;
size_t raw_sz_;
unsigned int limit_;
unsigned int frame_;
unsigned int width_;
unsigned int height_;
+ vpx_img_fmt_t format_;
};
#endif
+#if HAVE_NEON
+INSTANTIATE_TEST_CASE_P(
+ NEON, AverageTest,
+ ::testing::Values(
+ make_tuple(16, 16, 0, 8, &vp9_avg_8x8_neon),
+ make_tuple(16, 16, 5, 8, &vp9_avg_8x8_neon),
+ make_tuple(32, 32, 15, 8, &vp9_avg_8x8_neon)));
+
+#endif
+
} // namespace
int32_t lossless;
int32_t error_resilient;
int32_t frame_parallel;
+ vpx_color_space_t cs;
// TODO(JBB): quantizers / bitrate
};
const EncodeParameters kVP9EncodeParameterSet[] = {
- {0, 0, 0, 1, 0},
- {0, 0, 0, 0, 0},
- {0, 0, 1, 0, 0},
- {0, 2, 0, 0, 1},
+ {0, 0, 0, 1, 0, VPX_CS_BT_601},
+ {0, 0, 0, 0, 0, VPX_CS_BT_709},
+ {0, 0, 1, 0, 0, VPX_CS_BT_2020},
+ {0, 2, 0, 0, 1, VPX_CS_UNKNOWN},
// TODO(JBB): Test profiles (requires more work).
};
virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
::libvpx_test::Encoder *encoder) {
if (video->frame() == 1) {
+ encoder->Control(VP9E_SET_COLOR_SPACE, encode_parms.cs);
encoder->Control(VP9E_SET_LOSSLESS, encode_parms.lossless);
encoder->Control(VP9E_SET_FRAME_PARALLEL_DECODING,
encode_parms.frame_parallel);
EXPECT_EQ(common->frame_parallel_decoding_mode,
encode_parms.frame_parallel);
}
-
+ EXPECT_EQ(common->color_space, encode_parms.cs);
EXPECT_EQ(common->log2_tile_cols, encode_parms.tile_cols);
EXPECT_EQ(common->log2_tile_rows, encode_parms.tile_rows);
EncodeParameters encode_parms;
};
-TEST_P(Vp9EncoderParmsGetToDecoder, BitstreamParms) {
+// TODO(hkuang): This test conflicts with frame parallel decode. So disable it
+// for now until fix.
+TEST_P(Vp9EncoderParmsGetToDecoder, DISABLED_BitstreamParms) {
init_flags_ = VPX_CODEC_USE_PSNR;
libvpx_test::VideoSource *video;
--- /dev/null
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string>
+#include <vector>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/codec_factory.h"
+#include "test/encode_test_driver.h"
+#include "test/i420_video_source.h"
+#include "test/util.h"
+#include "test/md5_helper.h"
+
+namespace {
+class VP9EncoderThreadTest
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWith2Params<libvpx_test::TestMode, int> {
+ protected:
+ VP9EncoderThreadTest()
+ : EncoderTest(GET_PARAM(0)),
+ tiles_(2),
+ encoding_mode_(GET_PARAM(1)),
+ set_cpu_used_(GET_PARAM(2)) {
+ init_flags_ = VPX_CODEC_USE_PSNR;
+ vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
+ cfg.w = 1280;
+ cfg.h = 720;
+ decoder_ = codec_->CreateDecoder(cfg, 0);
+
+ md5_.clear();
+ }
+ virtual ~VP9EncoderThreadTest() {
+ delete decoder_;
+ }
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(encoding_mode_);
+
+ if (encoding_mode_ != ::libvpx_test::kRealTime) {
+ cfg_.g_lag_in_frames = 3;
+ cfg_.rc_end_usage = VPX_VBR;
+ cfg_.rc_2pass_vbr_minsection_pct = 5;
+ cfg_.rc_2pass_vbr_minsection_pct = 2000;
+ } else {
+ cfg_.g_lag_in_frames = 0;
+ cfg_.rc_end_usage = VPX_CBR;
+ cfg_.g_error_resilient = 1;
+ }
+ cfg_.rc_max_quantizer = 56;
+ cfg_.rc_min_quantizer = 0;
+ }
+
+ virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
+ ::libvpx_test::Encoder *encoder) {
+ if (video->frame() == 0) {
+ // Encode 4 column tiles.
+ encoder->Control(VP9E_SET_TILE_COLUMNS, tiles_);
+ encoder->Control(VP8E_SET_CPUUSED, set_cpu_used_);
+ if (encoding_mode_ != ::libvpx_test::kRealTime) {
+ encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
+ encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
+ encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
+ encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+ } else {
+ encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 0);
+ }
+ }
+ }
+
+ virtual void FramePktHook(const vpx_codec_cx_pkt_t *pkt) {
+ const vpx_codec_err_t res = decoder_->DecodeFrame(
+ reinterpret_cast<uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz);
+ if (res != VPX_CODEC_OK) {
+ abort_ = true;
+ ASSERT_EQ(VPX_CODEC_OK, res);
+ }
+ const vpx_image_t *img = decoder_->GetDxData().Next();
+
+ if (img) {
+ ::libvpx_test::MD5 md5_res;
+ md5_res.Add(img);
+ md5_.push_back(md5_res.Get());
+ }
+ }
+
+ int tiles_;
+ ::libvpx_test::TestMode encoding_mode_;
+ int set_cpu_used_;
+ ::libvpx_test::Decoder *decoder_;
+ std::vector<std::string> md5_;
+};
+
+TEST_P(VP9EncoderThreadTest, EncoderResultTest) {
+ std::vector<std::string> single_thr_md5, multi_thr_md5;
+
+ ::libvpx_test::I420VideoSource video("niklas_1280_720_30.yuv", 1280, 720,
+ 50, 1, 15, 20);
+
+ cfg_.rc_target_bitrate = 1000;
+
+ // Encode using single thread.
+ cfg_.g_threads = 1;
+ init_flags_ = VPX_CODEC_USE_PSNR;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ single_thr_md5 = md5_;
+ md5_.clear();
+
+ // Encode using multiple threads.
+ cfg_.g_threads = 4;
+ ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
+ multi_thr_md5 = md5_;
+ md5_.clear();
+
+ // Compare to check if two vectors are equal.
+ ASSERT_EQ(single_thr_md5, multi_thr_md5);
+}
+
+VP9_INSTANTIATE_TEST_CASE(
+ VP9EncoderThreadTest,
+ ::testing::Values(::libvpx_test::kTwoPassGood, ::libvpx_test::kOnePassGood,
+ ::libvpx_test::kRealTime),
+ ::testing::Range(1, 9));
+} // namespace
--- /dev/null
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cstdio>
+#include <cstdlib>
+#include <string>
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "./vpx_config.h"
+#include "test/codec_factory.h"
+#include "test/decode_test_driver.h"
+#include "test/ivf_video_source.h"
+#include "test/md5_helper.h"
+#include "test/util.h"
+#if CONFIG_WEBM_IO
+#include "test/webm_video_source.h"
+#endif
+#include "vpx_mem/vpx_mem.h"
+
+namespace {
+
+using std::string;
+
+#if CONFIG_WEBM_IO
+
+struct FileList {
+ const char *name;
+ // md5 sum for decoded frames which does not include skipped frames.
+ const char *expected_md5;
+ const int pause_frame_num;
+};
+
+// Decodes |filename| with |num_threads|. Pause at the specified frame_num,
+// seek to next key frame and then continue decoding until the end. Return
+// the md5 of the decoded frames which does not include skipped frames.
+string DecodeFile(const string &filename, int num_threads, int pause_num) {
+ libvpx_test::WebMVideoSource video(filename);
+ video.Init();
+ int in_frames = 0;
+ int out_frames = 0;
+
+ vpx_codec_dec_cfg_t cfg = {0};
+ cfg.threads = num_threads;
+ vpx_codec_flags_t flags = 0;
+ flags |= VPX_CODEC_USE_FRAME_THREADING;
+ libvpx_test::VP9Decoder decoder(cfg, flags, 0);
+
+ libvpx_test::MD5 md5;
+ video.Begin();
+
+ do {
+ ++in_frames;
+ const vpx_codec_err_t res =
+ decoder.DecodeFrame(video.cxdata(), video.frame_size());
+ if (res != VPX_CODEC_OK) {
+ EXPECT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
+ break;
+ }
+
+ // Pause at specified frame number.
+ if (in_frames == pause_num) {
+ // Flush the decoder and then seek to next key frame.
+ decoder.DecodeFrame(NULL, 0);
+ video.SeekToNextKeyFrame();
+ } else {
+ video.Next();
+ }
+
+ // Flush the decoder at the end of the video.
+ if (!video.cxdata())
+ decoder.DecodeFrame(NULL, 0);
+
+ libvpx_test::DxDataIterator dec_iter = decoder.GetDxData();
+ const vpx_image_t *img;
+
+ // Get decompressed data
+ while ((img = dec_iter.Next())) {
+ ++out_frames;
+ md5.Add(img);
+ }
+ } while (video.cxdata() != NULL);
+
+ EXPECT_EQ(in_frames, out_frames) <<
+ "Input frame count does not match output frame count";
+
+ return string(md5.Get());
+}
+
+void DecodeFiles(const FileList files[]) {
+ for (const FileList *iter = files; iter->name != NULL; ++iter) {
+ SCOPED_TRACE(iter->name);
+ for (int t = 2; t <= 8; ++t) {
+ EXPECT_EQ(iter->expected_md5,
+ DecodeFile(iter->name, t, iter->pause_frame_num))
+ << "threads = " << t;
+ }
+ }
+}
+
+TEST(VP9MultiThreadedFrameParallel, PauseSeekResume) {
+ // vp90-2-07-frame_parallel-1.webm is a 40 frame video file with
+ // one key frame for every ten frames.
+ static const FileList files[] = {
+ { "vp90-2-07-frame_parallel-1.webm",
+ "6ea7c3875d67252e7caf2bc6e75b36b1", 6},
+ { "vp90-2-07-frame_parallel-1.webm",
+ "4bb634160c7356a8d7d4299b6dc83a45", 12},
+ { "vp90-2-07-frame_parallel-1.webm",
+ "89772591e6ef461f9fa754f916c78ed8", 26},
+ { NULL, NULL, 0},
+ };
+ DecodeFiles(files);
+}
+
+struct InvalidFileList {
+ const char *name;
+ // md5 sum for decoded frames which does not include corrupted frames.
+ const char *expected_md5;
+ // Expected number of decoded frames which does not include corrupted frames.
+ const int expected_frame_count;
+};
+
+// Decodes |filename| with |num_threads|. Return the md5 of the decoded
+// frames which does not include corrupted frames.
+string DecodeInvalidFile(const string &filename, int num_threads,
+ int expected_frame_count) {
+ libvpx_test::WebMVideoSource video(filename);
+ video.Init();
+
+ vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
+ cfg.threads = num_threads;
+ const vpx_codec_flags_t flags = VPX_CODEC_USE_FRAME_THREADING;
+ libvpx_test::VP9Decoder decoder(cfg, flags, 0);
+
+ libvpx_test::MD5 md5;
+ video.Begin();
+
+ int out_frames = 0;
+ do {
+ const vpx_codec_err_t res =
+ decoder.DecodeFrame(video.cxdata(), video.frame_size());
+ // TODO(hkuang): frame parallel mode should return an error on corruption.
+ if (res != VPX_CODEC_OK) {
+ EXPECT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
+ break;
+ }
+
+ video.Next();
+
+ // Flush the decoder at the end of the video.
+ if (!video.cxdata())
+ decoder.DecodeFrame(NULL, 0);
+
+ libvpx_test::DxDataIterator dec_iter = decoder.GetDxData();
+ const vpx_image_t *img;
+
+ // Get decompressed data
+ while ((img = dec_iter.Next())) {
+ ++out_frames;
+ md5.Add(img);
+ }
+ } while (video.cxdata() != NULL);
+
+ EXPECT_EQ(expected_frame_count, out_frames) <<
+ "Input frame count does not match expected output frame count";
+
+ return string(md5.Get());
+}
+
+void DecodeInvalidFiles(const InvalidFileList files[]) {
+ for (const InvalidFileList *iter = files; iter->name != NULL; ++iter) {
+ SCOPED_TRACE(iter->name);
+ for (int t = 2; t <= 8; ++t) {
+ EXPECT_EQ(iter->expected_md5,
+ DecodeInvalidFile(iter->name, t, iter->expected_frame_count))
+ << "threads = " << t;
+ }
+ }
+}
+
+TEST(VP9MultiThreadedFrameParallel, DISABLED_InvalidFileTest) {
+ static const InvalidFileList files[] = {
+ // invalid-vp90-2-07-frame_parallel-1.webm is a 40 frame video file with
+ // one key frame for every ten frames. The 11th frame has corrupted data.
+ { "invalid-vp90-2-07-frame_parallel-1.webm",
+ "0549d0f45f60deaef8eb708e6c0eb6cb", 30},
+ // invalid-vp90-2-07-frame_parallel-2.webm is a 40 frame video file with
+ // one key frame for every ten frames. The 1st and 31st frames have
+ // corrupted data.
+ { "invalid-vp90-2-07-frame_parallel-2.webm",
+ "6a1f3cf6f9e7a364212fadb9580d525e", 20},
+ // invalid-vp90-2-07-frame_parallel-3.webm is a 40 frame video file with
+ // one key frame for every ten frames. The 5th and 13th frames have
+ // corrupted data.
+ { "invalid-vp90-2-07-frame_parallel-3.webm",
+ "8256544308de926b0681e04685b98677", 27},
+ { NULL, NULL, 0},
+ };
+ DecodeInvalidFiles(files);
+}
+
+#endif // CONFIG_WEBM_IO
+} // namespace
const int16_t *round, const int16_t *quant,
const int16_t *quant_shift,
tran_low_t *qcoeff, tran_low_t *dqcoeff,
- const int16_t *dequant, int zbin_oq_value,
+ const int16_t *dequant,
uint16_t *eob, const int16_t *scan,
const int16_t *iscan);
typedef std::tr1::tuple<QuantizeFunc, QuantizeFunc, vpx_bit_depth_t>
TEST_P(VP9QuantizeTest, OperationCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
- int zbin_oq_value = 0;
DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff_ptr, 256);
DECLARE_ALIGNED_ARRAY(16, int16_t, zbin_ptr, 2);
DECLARE_ALIGNED_ARRAY(16, int16_t, round_ptr, 2);
}
ref_quantize_op_(coeff_ptr, count, skip_block, zbin_ptr, round_ptr,
quant_ptr, quant_shift_ptr, ref_qcoeff_ptr,
- ref_dqcoeff_ptr, dequant_ptr, zbin_oq_value,
+ ref_dqcoeff_ptr, dequant_ptr,
ref_eob_ptr, scan_order->scan, scan_order->iscan);
ASM_REGISTER_STATE_CHECK(quantize_op_(coeff_ptr, count, skip_block,
zbin_ptr, round_ptr, quant_ptr,
quant_shift_ptr, qcoeff_ptr,
- dqcoeff_ptr, dequant_ptr,
- zbin_oq_value, eob_ptr,
+ dqcoeff_ptr, dequant_ptr, eob_ptr,
scan_order->scan, scan_order->iscan));
for (int j = 0; j < sz; ++j) {
err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) |
TEST_P(VP9Quantize32Test, OperationCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
- int zbin_oq_value = 0;
DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff_ptr, 1024);
DECLARE_ALIGNED_ARRAY(16, int16_t, zbin_ptr, 2);
DECLARE_ALIGNED_ARRAY(16, int16_t, round_ptr, 2);
}
ref_quantize_op_(coeff_ptr, count, skip_block, zbin_ptr, round_ptr,
quant_ptr, quant_shift_ptr, ref_qcoeff_ptr,
- ref_dqcoeff_ptr, dequant_ptr, zbin_oq_value,
+ ref_dqcoeff_ptr, dequant_ptr,
ref_eob_ptr, scan_order->scan, scan_order->iscan);
ASM_REGISTER_STATE_CHECK(quantize_op_(coeff_ptr, count, skip_block,
zbin_ptr, round_ptr, quant_ptr,
quant_shift_ptr, qcoeff_ptr,
- dqcoeff_ptr, dequant_ptr,
- zbin_oq_value, eob_ptr,
+ dqcoeff_ptr, dequant_ptr, eob_ptr,
scan_order->scan, scan_order->iscan));
for (int j = 0; j < sz; ++j) {
err_count += (ref_qcoeff_ptr[j] != qcoeff_ptr[j]) |
TEST_P(VP9QuantizeTest, EOBCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
- int zbin_oq_value = 0;
DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff_ptr, 256);
DECLARE_ALIGNED_ARRAY(16, int16_t, zbin_ptr, 2);
DECLARE_ALIGNED_ARRAY(16, int16_t, round_ptr, 2);
ref_quantize_op_(coeff_ptr, count, skip_block, zbin_ptr, round_ptr,
quant_ptr, quant_shift_ptr, ref_qcoeff_ptr,
- ref_dqcoeff_ptr, dequant_ptr, zbin_oq_value,
+ ref_dqcoeff_ptr, dequant_ptr,
ref_eob_ptr, scan_order->scan, scan_order->iscan);
ASM_REGISTER_STATE_CHECK(quantize_op_(coeff_ptr, count, skip_block,
zbin_ptr, round_ptr, quant_ptr,
quant_shift_ptr, qcoeff_ptr,
- dqcoeff_ptr, dequant_ptr,
- zbin_oq_value, eob_ptr,
+ dqcoeff_ptr, dequant_ptr, eob_ptr,
scan_order->scan, scan_order->iscan));
for (int j = 0; j < sz; ++j) {
TEST_P(VP9Quantize32Test, EOBCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
- int zbin_oq_value = 0;
DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff_ptr, 1024);
DECLARE_ALIGNED_ARRAY(16, int16_t, zbin_ptr, 2);
DECLARE_ALIGNED_ARRAY(16, int16_t, round_ptr, 2);
ref_quantize_op_(coeff_ptr, count, skip_block, zbin_ptr, round_ptr,
quant_ptr, quant_shift_ptr, ref_qcoeff_ptr,
- ref_dqcoeff_ptr, dequant_ptr, zbin_oq_value,
+ ref_dqcoeff_ptr, dequant_ptr,
ref_eob_ptr, scan_order->scan, scan_order->iscan);
ASM_REGISTER_STATE_CHECK(quantize_op_(coeff_ptr, count, skip_block,
zbin_ptr, round_ptr, quant_ptr,
quant_shift_ptr, qcoeff_ptr,
- dqcoeff_ptr, dequant_ptr,
- zbin_oq_value, eob_ptr,
+ dqcoeff_ptr, dequant_ptr, eob_ptr,
scan_order->scan, scan_order->iscan));
for (int j = 0; j < sz; ++j) {
}
}
+ void SeekToNextKeyFrame() {
+ ASSERT_TRUE(vpx_ctx_->file != NULL);
+ do {
+ const int status = webm_read_frame(webm_ctx_, &buf_, &buf_sz_, &buf_sz_);
+ ASSERT_GE(status, 0) << "webm_read_frame failed";
+ ++frame_;
+ if (status == 1) {
+ end_of_file_ = true;
+ }
+ } while (!webm_ctx_->is_key_frame && !end_of_file_);
+ }
+
virtual const uint8_t *cxdata() const {
return end_of_file_ ? NULL : buf_;
}
%macro SECTION_RODATA 0-1 16
%ifidn __OUTPUT_FORMAT__,macho64
SECTION .text align=%1
+ %elifidn __OUTPUT_FORMAT__,macho32
+ SECTION .text align=%1
+ fakegot:
%elifidn __OUTPUT_FORMAT__,macho
SECTION .text align=%1
fakegot:
The available initialization methods are:
- \if encoder - #vpx_codec_enc_init (calls vpx_codec_enc_init_ver()) \endif
- \if multi-encoder - #vpx_codec_enc_init_multi (calls vpx_codec_enc_init_multi_ver()) \endif
+ \if encoder
+ - #vpx_codec_enc_init (calls vpx_codec_enc_init_ver())
+ - #vpx_codec_enc_init_multi (calls vpx_codec_enc_init_multi_ver())
+ .
+ \endif
\if decoder - #vpx_codec_dec_init (calls vpx_codec_dec_init_ver()) \endif
#include "vpx_config.h"
#include "./vpx_scale_rtcd.h"
+#include "./vp8_rtcd.h"
#include "vp8/common/onyxc_int.h"
#include "vp8/common/blockd.h"
#include "onyx_int.h"
reset_temporal_layer_change(cpi, oxcf, prev_number_of_layers);
}
+ if (!cpi->initial_width)
+ {
+ cpi->initial_width = cpi->oxcf.Width;
+ cpi->initial_height = cpi->oxcf.Height;
+ }
+
cm->Width = cpi->oxcf.Width;
cm->Height = cpi->oxcf.Height;
+ assert(cm->Width <= cpi->initial_width);
+ assert(cm->Height <= cpi->initial_height);
/* TODO(jkoleszar): if an internal spatial resampling is active,
* and we downsize the input image, maybe we should clear the
int droppable;
+ int initial_width;
+ int initial_height;
+
#if CONFIG_TEMPORAL_DENOISING
VP8_DENOISER denoiser;
#endif
int ref_frame_map[4];
int sign_bias = 0;
int dot_artifact_candidate = 0;
- // For detecting dot artifact.
- unsigned char* target = x->src.y_buffer;
- unsigned char* target_u = x->block[16].src + *x->block[16].base_src;
- unsigned char* target_v = x->block[20].src + *x->block[20].base_src;
- int stride = x->src.y_stride;
- int stride_uv = x->block[16].src_stride;
+ get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
+
+ // If the current frame is using LAST as a reference, check for
+ // biasing the mode selection for dot artifacts.
+ if (cpi->ref_frame_flags & VP8_LAST_FRAME) {
+ unsigned char* target_y = x->src.y_buffer;
+ unsigned char* target_u = x->block[16].src + *x->block[16].base_src;
+ unsigned char* target_v = x->block[20].src + *x->block[20].base_src;
+ int stride = x->src.y_stride;
+ int stride_uv = x->block[16].src_stride;
#if CONFIG_TEMPORAL_DENOISING
- if (cpi->oxcf.noise_sensitivity) {
- int uv_denoise = (cpi->oxcf.noise_sensitivity >= 2) ? 1 : 0;
- target =
- cpi->denoiser.yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset;
- stride = cpi->denoiser.yv12_running_avg[LAST_FRAME].y_stride;
- if (uv_denoise) {
- target_u =
- cpi->denoiser.yv12_running_avg[LAST_FRAME].u_buffer + recon_uvoffset;
- target_v =
- cpi->denoiser.yv12_running_avg[LAST_FRAME].v_buffer + recon_uvoffset;
- stride_uv = cpi->denoiser.yv12_running_avg[LAST_FRAME].uv_stride;
+ if (cpi->oxcf.noise_sensitivity) {
+ const int uv_denoise = (cpi->oxcf.noise_sensitivity >= 2) ? 1 : 0;
+ target_y =
+ cpi->denoiser.yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset;
+ stride = cpi->denoiser.yv12_running_avg[LAST_FRAME].y_stride;
+ if (uv_denoise) {
+ target_u =
+ cpi->denoiser.yv12_running_avg[LAST_FRAME].u_buffer +
+ recon_uvoffset;
+ target_v =
+ cpi->denoiser.yv12_running_avg[LAST_FRAME].v_buffer +
+ recon_uvoffset;
+ stride_uv = cpi->denoiser.yv12_running_avg[LAST_FRAME].uv_stride;
+ }
}
- }
#endif
-
- get_predictor_pointers(cpi, plane, recon_yoffset, recon_uvoffset);
-
- dot_artifact_candidate =
- check_dot_artifact_candidate(cpi, x,
- target, stride,
- plane[LAST_FRAME][0], mb_row, mb_col, 0);
- // If not found in Y channel, check UV channel.
- if (!dot_artifact_candidate) {
dot_artifact_candidate =
- check_dot_artifact_candidate(cpi, x,
- target_u, stride_uv,
- plane[LAST_FRAME][1], mb_row, mb_col, 1);
+ check_dot_artifact_candidate(cpi, x, target_y, stride,
+ plane[LAST_FRAME][0], mb_row, mb_col, 0);
+ // If not found in Y channel, check UV channel.
if (!dot_artifact_candidate) {
dot_artifact_candidate =
- check_dot_artifact_candidate(cpi, x,
- target_v, stride_uv,
- plane[LAST_FRAME][2], mb_row, mb_col, 2);
+ check_dot_artifact_candidate(cpi, x, target_u, stride_uv,
+ plane[LAST_FRAME][1], mb_row, mb_col, 1);
+ if (!dot_artifact_candidate) {
+ dot_artifact_candidate =
+ check_dot_artifact_candidate(cpi, x, target_v, stride_uv,
+ plane[LAST_FRAME][2], mb_row, mb_col, 2);
+ }
}
}
{
vpx_codec_err_t res;
- if (((cfg->g_w != ctx->cfg.g_w) || (cfg->g_h != ctx->cfg.g_h))
- && (cfg->g_lag_in_frames > 1 || cfg->g_pass != VPX_RC_ONE_PASS))
- ERROR("Cannot change width or height after initialization");
+ if (cfg->g_w != ctx->cfg.g_w || cfg->g_h != ctx->cfg.g_h)
+ {
+ if (cfg->g_lag_in_frames > 1 || cfg->g_pass != VPX_RC_ONE_PASS)
+ ERROR("Cannot change width or height after initialization");
+ if ((ctx->cpi->initial_width && (int)cfg->g_w > ctx->cpi->initial_width) ||
+ (ctx->cpi->initial_height && (int)cfg->g_h > ctx->cpi->initial_height))
+ ERROR("Cannot increast width or height larger than their initial values");
+ }
/* Prevent increasing lag_in_frames. This check is stricter than it needs
* to be -- the limit is not increasing past the first lag_in_frames
/* vet via sync code */
if (clear[3] != 0x9d || clear[4] != 0x01 || clear[5] != 0x2a)
- res = VPX_CODEC_UNSUP_BITSTREAM;
+ return VPX_CODEC_UNSUP_BITSTREAM;
si->w = (clear[6] | (clear[7] << 8)) & 0x3fff;
si->h = (clear[8] | (clear[9] << 8)) & 0x3fff;
if (!res)
{
VP8D_COMP *pbi = ctx->yv12_frame_buffers.pbi[0];
- if(resolution_change)
+ if (resolution_change)
{
VP8_COMMON *const pc = & pbi->common;
MACROBLOCKD *const xd = & pbi->mb;
return;
}
-#if !HAVE_NEON_ASM
void vp9_lpf_horizontal_4_dual_neon(uint8_t *s, int p /* pitch */,
const uint8_t *blimit0,
const uint8_t *limit0,
vst1q_u8(s, q8u8);
return;
}
-#endif // !HAVE_NEON_ASM
-
-void vp9_lpf_horizontal_8_dual_neon(uint8_t *s, int p /* pitch */,
- const uint8_t *blimit0,
- const uint8_t *limit0,
- const uint8_t *thresh0,
- const uint8_t *blimit1,
- const uint8_t *limit1,
- const uint8_t *thresh1) {
- vp9_lpf_horizontal_8_neon(s, p, blimit0, limit0, thresh0, 1);
- vp9_lpf_horizontal_8_neon(s + 8, p, blimit1, limit1, thresh1, 1);
-}
-
-void vp9_lpf_vertical_4_dual_neon(uint8_t *s, int p,
- const uint8_t *blimit0,
- const uint8_t *limit0,
- const uint8_t *thresh0,
- const uint8_t *blimit1,
- const uint8_t *limit1,
- const uint8_t *thresh1) {
- vp9_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0, 1);
- vp9_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1);
-}
-
-void vp9_lpf_vertical_8_dual_neon(uint8_t *s, int p,
- const uint8_t *blimit0,
- const uint8_t *limit0,
- const uint8_t *thresh0,
- const uint8_t *blimit1,
- const uint8_t *limit1,
- const uint8_t *thresh1) {
- vp9_lpf_vertical_8_neon(s, p, blimit0, limit0, thresh0, 1);
- vp9_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1);
-}
-
-#if HAVE_NEON_ASM
-void vp9_lpf_vertical_16_dual_neon(uint8_t *s, int p,
- const uint8_t *blimit,
- const uint8_t *limit,
- const uint8_t *thresh) {
- vp9_lpf_vertical_16_neon(s, p, blimit, limit, thresh);
- vp9_lpf_vertical_16_neon(s + 8 * p, p, blimit, limit, thresh);
-}
-#endif // HAVE_NEON_ASM
--- /dev/null
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vpx_config.h"
+
+static INLINE void vp9_loop_filter_neon(
+ uint8x8_t dblimit, // flimit
+ uint8x8_t dlimit, // limit
+ uint8x8_t dthresh, // thresh
+ uint8x8_t d3u8, // p3
+ uint8x8_t d4u8, // p2
+ uint8x8_t d5u8, // p1
+ uint8x8_t d6u8, // p0
+ uint8x8_t d7u8, // q0
+ uint8x8_t d16u8, // q1
+ uint8x8_t d17u8, // q2
+ uint8x8_t d18u8, // q3
+ uint8x8_t *d4ru8, // p1
+ uint8x8_t *d5ru8, // p0
+ uint8x8_t *d6ru8, // q0
+ uint8x8_t *d7ru8) { // q1
+ uint8x8_t d19u8, d20u8, d21u8, d22u8, d23u8, d27u8, d28u8;
+ int16x8_t q12s16;
+ int8x8_t d19s8, d20s8, d21s8, d26s8, d27s8, d28s8;
+
+ d19u8 = vabd_u8(d3u8, d4u8);
+ d20u8 = vabd_u8(d4u8, d5u8);
+ d21u8 = vabd_u8(d5u8, d6u8);
+ d22u8 = vabd_u8(d16u8, d7u8);
+ d3u8 = vabd_u8(d17u8, d16u8);
+ d4u8 = vabd_u8(d18u8, d17u8);
+
+ d19u8 = vmax_u8(d19u8, d20u8);
+ d20u8 = vmax_u8(d21u8, d22u8);
+ d3u8 = vmax_u8(d3u8, d4u8);
+ d23u8 = vmax_u8(d19u8, d20u8);
+
+ d17u8 = vabd_u8(d6u8, d7u8);
+
+ d21u8 = vcgt_u8(d21u8, dthresh);
+ d22u8 = vcgt_u8(d22u8, dthresh);
+ d23u8 = vmax_u8(d23u8, d3u8);
+
+ d28u8 = vabd_u8(d5u8, d16u8);
+ d17u8 = vqadd_u8(d17u8, d17u8);
+
+ d23u8 = vcge_u8(dlimit, d23u8);
+
+ d18u8 = vdup_n_u8(0x80);
+ d5u8 = veor_u8(d5u8, d18u8);
+ d6u8 = veor_u8(d6u8, d18u8);
+ d7u8 = veor_u8(d7u8, d18u8);
+ d16u8 = veor_u8(d16u8, d18u8);
+
+ d28u8 = vshr_n_u8(d28u8, 1);
+ d17u8 = vqadd_u8(d17u8, d28u8);
+
+ d19u8 = vdup_n_u8(3);
+
+ d28s8 = vsub_s8(vreinterpret_s8_u8(d7u8),
+ vreinterpret_s8_u8(d6u8));
+
+ d17u8 = vcge_u8(dblimit, d17u8);
+
+ d27s8 = vqsub_s8(vreinterpret_s8_u8(d5u8),
+ vreinterpret_s8_u8(d16u8));
+
+ d22u8 = vorr_u8(d21u8, d22u8);
+
+ q12s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d19u8));
+
+ d27u8 = vand_u8(vreinterpret_u8_s8(d27s8), d22u8);
+ d23u8 = vand_u8(d23u8, d17u8);
+
+ q12s16 = vaddw_s8(q12s16, vreinterpret_s8_u8(d27u8));
+
+ d17u8 = vdup_n_u8(4);
+
+ d27s8 = vqmovn_s16(q12s16);
+ d27u8 = vand_u8(vreinterpret_u8_s8(d27s8), d23u8);
+ d27s8 = vreinterpret_s8_u8(d27u8);
+
+ d28s8 = vqadd_s8(d27s8, vreinterpret_s8_u8(d19u8));
+ d27s8 = vqadd_s8(d27s8, vreinterpret_s8_u8(d17u8));
+ d28s8 = vshr_n_s8(d28s8, 3);
+ d27s8 = vshr_n_s8(d27s8, 3);
+
+ d19s8 = vqadd_s8(vreinterpret_s8_u8(d6u8), d28s8);
+ d26s8 = vqsub_s8(vreinterpret_s8_u8(d7u8), d27s8);
+
+ d27s8 = vrshr_n_s8(d27s8, 1);
+ d27s8 = vbic_s8(d27s8, vreinterpret_s8_u8(d22u8));
+
+ d21s8 = vqadd_s8(vreinterpret_s8_u8(d5u8), d27s8);
+ d20s8 = vqsub_s8(vreinterpret_s8_u8(d16u8), d27s8);
+
+ *d4ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d18u8);
+ *d5ru8 = veor_u8(vreinterpret_u8_s8(d19s8), d18u8);
+ *d6ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d18u8);
+ *d7ru8 = veor_u8(vreinterpret_u8_s8(d20s8), d18u8);
+ return;
+}
+
+void vp9_lpf_horizontal_4_neon(
+ unsigned char *src,
+ int pitch,
+ unsigned char *blimit,
+ unsigned char *limit,
+ unsigned char *thresh,
+ int count) {
+ int i;
+ uint8_t *s, *psrc;
+ uint8x8_t dblimit, dlimit, dthresh;
+ uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8;
+
+ if (count == 0) // end_vp9_lf_h_edge
+ return;
+
+ dblimit = vld1_u8(blimit);
+ dlimit = vld1_u8(limit);
+ dthresh = vld1_u8(thresh);
+
+ psrc = src - (pitch << 2);
+ for (i = 0; i < count; i++) {
+ s = psrc + i * 8;
+
+ d3u8 = vld1_u8(s);
+ s += pitch;
+ d4u8 = vld1_u8(s);
+ s += pitch;
+ d5u8 = vld1_u8(s);
+ s += pitch;
+ d6u8 = vld1_u8(s);
+ s += pitch;
+ d7u8 = vld1_u8(s);
+ s += pitch;
+ d16u8 = vld1_u8(s);
+ s += pitch;
+ d17u8 = vld1_u8(s);
+ s += pitch;
+ d18u8 = vld1_u8(s);
+
+ vp9_loop_filter_neon(dblimit, dlimit, dthresh,
+ d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
+ &d4u8, &d5u8, &d6u8, &d7u8);
+
+ s -= (pitch * 5);
+ vst1_u8(s, d4u8);
+ s += pitch;
+ vst1_u8(s, d5u8);
+ s += pitch;
+ vst1_u8(s, d6u8);
+ s += pitch;
+ vst1_u8(s, d7u8);
+ }
+ return;
+}
+
+void vp9_lpf_vertical_4_neon(
+ unsigned char *src,
+ int pitch,
+ unsigned char *blimit,
+ unsigned char *limit,
+ unsigned char *thresh,
+ int count) {
+ int i, pitch8;
+ uint8_t *s;
+ uint8x8_t dblimit, dlimit, dthresh;
+ uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8;
+ uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3;
+ uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7;
+ uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11;
+ uint8x8x4_t d4Result;
+
+ if (count == 0) // end_vp9_lf_h_edge
+ return;
+
+ dblimit = vld1_u8(blimit);
+ dlimit = vld1_u8(limit);
+ dthresh = vld1_u8(thresh);
+
+ pitch8 = pitch * 8;
+ for (i = 0; i < count; i++, src += pitch8) {
+ s = src - (i + 1) * 4;
+
+ d3u8 = vld1_u8(s);
+ s += pitch;
+ d4u8 = vld1_u8(s);
+ s += pitch;
+ d5u8 = vld1_u8(s);
+ s += pitch;
+ d6u8 = vld1_u8(s);
+ s += pitch;
+ d7u8 = vld1_u8(s);
+ s += pitch;
+ d16u8 = vld1_u8(s);
+ s += pitch;
+ d17u8 = vld1_u8(s);
+ s += pitch;
+ d18u8 = vld1_u8(s);
+
+ d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8),
+ vreinterpret_u32_u8(d7u8));
+ d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8),
+ vreinterpret_u32_u8(d16u8));
+ d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8),
+ vreinterpret_u32_u8(d17u8));
+ d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8),
+ vreinterpret_u32_u8(d18u8));
+
+ d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]),
+ vreinterpret_u16_u32(d2tmp2.val[0]));
+ d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]),
+ vreinterpret_u16_u32(d2tmp3.val[0]));
+ d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]),
+ vreinterpret_u16_u32(d2tmp2.val[1]));
+ d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]),
+ vreinterpret_u16_u32(d2tmp3.val[1]));
+
+ d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]),
+ vreinterpret_u8_u16(d2tmp5.val[0]));
+ d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]),
+ vreinterpret_u8_u16(d2tmp5.val[1]));
+ d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]),
+ vreinterpret_u8_u16(d2tmp7.val[0]));
+ d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]),
+ vreinterpret_u8_u16(d2tmp7.val[1]));
+
+ d3u8 = d2tmp8.val[0];
+ d4u8 = d2tmp8.val[1];
+ d5u8 = d2tmp9.val[0];
+ d6u8 = d2tmp9.val[1];
+ d7u8 = d2tmp10.val[0];
+ d16u8 = d2tmp10.val[1];
+ d17u8 = d2tmp11.val[0];
+ d18u8 = d2tmp11.val[1];
+
+ vp9_loop_filter_neon(dblimit, dlimit, dthresh,
+ d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
+ &d4u8, &d5u8, &d6u8, &d7u8);
+
+ d4Result.val[0] = d4u8;
+ d4Result.val[1] = d5u8;
+ d4Result.val[2] = d6u8;
+ d4Result.val[3] = d7u8;
+
+ src -= 2;
+ vst4_lane_u8(src, d4Result, 0);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 1);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 2);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 3);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 4);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 5);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 6);
+ src += pitch;
+ vst4_lane_u8(src, d4Result, 7);
+ }
+ return;
+}
--- /dev/null
+;
+; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+ EXPORT |vp9_lpf_horizontal_4_neon|
+ EXPORT |vp9_lpf_vertical_4_neon|
+ ARM
+
+ AREA ||.text||, CODE, READONLY, ALIGN=2
+
+; Currently vp9 only works on iterations 8 at a time. The vp8 loop filter
+; works on 16 iterations at a time.
+; TODO(fgalligan): See about removing the count code as this function is only
+; called with a count of 1.
+;
+; void vp9_lpf_horizontal_4_neon(uint8_t *s,
+; int p /* pitch */,
+; const uint8_t *blimit,
+; const uint8_t *limit,
+; const uint8_t *thresh,
+; int count)
+;
+; r0 uint8_t *s,
+; r1 int p, /* pitch */
+; r2 const uint8_t *blimit,
+; r3 const uint8_t *limit,
+; sp const uint8_t *thresh,
+; sp+4 int count
+|vp9_lpf_horizontal_4_neon| PROC
+ push {lr}
+
+ vld1.8 {d0[]}, [r2] ; duplicate *blimit
+ ldr r12, [sp, #8] ; load count
+ ldr r2, [sp, #4] ; load thresh
+ add r1, r1, r1 ; double pitch
+
+ cmp r12, #0
+ beq end_vp9_lf_h_edge
+
+ vld1.8 {d1[]}, [r3] ; duplicate *limit
+ vld1.8 {d2[]}, [r2] ; duplicate *thresh
+
+count_lf_h_loop
+ sub r2, r0, r1, lsl #1 ; move src pointer down by 4 lines
+ add r3, r2, r1, lsr #1 ; set to 3 lines down
+
+ vld1.u8 {d3}, [r2@64], r1 ; p3
+ vld1.u8 {d4}, [r3@64], r1 ; p2
+ vld1.u8 {d5}, [r2@64], r1 ; p1
+ vld1.u8 {d6}, [r3@64], r1 ; p0
+ vld1.u8 {d7}, [r2@64], r1 ; q0
+ vld1.u8 {d16}, [r3@64], r1 ; q1
+ vld1.u8 {d17}, [r2@64] ; q2
+ vld1.u8 {d18}, [r3@64] ; q3
+
+ sub r2, r2, r1, lsl #1
+ sub r3, r3, r1, lsl #1
+
+ bl vp9_loop_filter_neon
+
+ vst1.u8 {d4}, [r2@64], r1 ; store op1
+ vst1.u8 {d5}, [r3@64], r1 ; store op0
+ vst1.u8 {d6}, [r2@64], r1 ; store oq0
+ vst1.u8 {d7}, [r3@64], r1 ; store oq1
+
+ add r0, r0, #8
+ subs r12, r12, #1
+ bne count_lf_h_loop
+
+end_vp9_lf_h_edge
+ pop {pc}
+ ENDP ; |vp9_lpf_horizontal_4_neon|
+
+; Currently vp9 only works on iterations 8 at a time. The vp8 loop filter
+; works on 16 iterations at a time.
+; TODO(fgalligan): See about removing the count code as this function is only
+; called with a count of 1.
+;
+; void vp9_lpf_vertical_4_neon(uint8_t *s,
+; int p /* pitch */,
+; const uint8_t *blimit,
+; const uint8_t *limit,
+; const uint8_t *thresh,
+; int count)
+;
+; r0 uint8_t *s,
+; r1 int p, /* pitch */
+; r2 const uint8_t *blimit,
+; r3 const uint8_t *limit,
+; sp const uint8_t *thresh,
+; sp+4 int count
+|vp9_lpf_vertical_4_neon| PROC
+ push {lr}
+
+ vld1.8 {d0[]}, [r2] ; duplicate *blimit
+ ldr r12, [sp, #8] ; load count
+ vld1.8 {d1[]}, [r3] ; duplicate *limit
+
+ ldr r3, [sp, #4] ; load thresh
+ sub r2, r0, #4 ; move s pointer down by 4 columns
+ cmp r12, #0
+ beq end_vp9_lf_v_edge
+
+ vld1.8 {d2[]}, [r3] ; duplicate *thresh
+
+count_lf_v_loop
+ vld1.u8 {d3}, [r2], r1 ; load s data
+ vld1.u8 {d4}, [r2], r1
+ vld1.u8 {d5}, [r2], r1
+ vld1.u8 {d6}, [r2], r1
+ vld1.u8 {d7}, [r2], r1
+ vld1.u8 {d16}, [r2], r1
+ vld1.u8 {d17}, [r2], r1
+ vld1.u8 {d18}, [r2]
+
+ ;transpose to 8x16 matrix
+ vtrn.32 d3, d7
+ vtrn.32 d4, d16
+ vtrn.32 d5, d17
+ vtrn.32 d6, d18
+
+ vtrn.16 d3, d5
+ vtrn.16 d4, d6
+ vtrn.16 d7, d17
+ vtrn.16 d16, d18
+
+ vtrn.8 d3, d4
+ vtrn.8 d5, d6
+ vtrn.8 d7, d16
+ vtrn.8 d17, d18
+
+ bl vp9_loop_filter_neon
+
+ sub r0, r0, #2
+
+ ;store op1, op0, oq0, oq1
+ vst4.8 {d4[0], d5[0], d6[0], d7[0]}, [r0], r1
+ vst4.8 {d4[1], d5[1], d6[1], d7[1]}, [r0], r1
+ vst4.8 {d4[2], d5[2], d6[2], d7[2]}, [r0], r1
+ vst4.8 {d4[3], d5[3], d6[3], d7[3]}, [r0], r1
+ vst4.8 {d4[4], d5[4], d6[4], d7[4]}, [r0], r1
+ vst4.8 {d4[5], d5[5], d6[5], d7[5]}, [r0], r1
+ vst4.8 {d4[6], d5[6], d6[6], d7[6]}, [r0], r1
+ vst4.8 {d4[7], d5[7], d6[7], d7[7]}, [r0]
+
+ add r0, r0, r1, lsl #3 ; s += pitch * 8
+ subs r12, r12, #1
+ subne r2, r0, #4 ; move s pointer down by 4 columns
+ bne count_lf_v_loop
+
+end_vp9_lf_v_edge
+ pop {pc}
+ ENDP ; |vp9_lpf_vertical_4_neon|
+
+; void vp9_loop_filter_neon();
+; This is a helper function for the loopfilters. The invidual functions do the
+; necessary load, transpose (if necessary) and store. The function does not use
+; registers d8-d15.
+;
+; Inputs:
+; r0-r3, r12 PRESERVE
+; d0 blimit
+; d1 limit
+; d2 thresh
+; d3 p3
+; d4 p2
+; d5 p1
+; d6 p0
+; d7 q0
+; d16 q1
+; d17 q2
+; d18 q3
+;
+; Outputs:
+; d4 op1
+; d5 op0
+; d6 oq0
+; d7 oq1
+|vp9_loop_filter_neon| PROC
+ ; filter_mask
+ vabd.u8 d19, d3, d4 ; m1 = abs(p3 - p2)
+ vabd.u8 d20, d4, d5 ; m2 = abs(p2 - p1)
+ vabd.u8 d21, d5, d6 ; m3 = abs(p1 - p0)
+ vabd.u8 d22, d16, d7 ; m4 = abs(q1 - q0)
+ vabd.u8 d3, d17, d16 ; m5 = abs(q2 - q1)
+ vabd.u8 d4, d18, d17 ; m6 = abs(q3 - q2)
+
+ ; only compare the largest value to limit
+ vmax.u8 d19, d19, d20 ; m1 = max(m1, m2)
+ vmax.u8 d20, d21, d22 ; m2 = max(m3, m4)
+
+ vabd.u8 d17, d6, d7 ; abs(p0 - q0)
+
+ vmax.u8 d3, d3, d4 ; m3 = max(m5, m6)
+
+ vmov.u8 d18, #0x80
+
+ vmax.u8 d23, d19, d20 ; m1 = max(m1, m2)
+
+ ; hevmask
+ vcgt.u8 d21, d21, d2 ; (abs(p1 - p0) > thresh)*-1
+ vcgt.u8 d22, d22, d2 ; (abs(q1 - q0) > thresh)*-1
+ vmax.u8 d23, d23, d3 ; m1 = max(m1, m3)
+
+ vabd.u8 d28, d5, d16 ; a = abs(p1 - q1)
+ vqadd.u8 d17, d17, d17 ; b = abs(p0 - q0) * 2
+
+ veor d7, d7, d18 ; qs0
+
+ vcge.u8 d23, d1, d23 ; abs(m1) > limit
+
+ ; filter() function
+ ; convert to signed
+
+ vshr.u8 d28, d28, #1 ; a = a / 2
+ veor d6, d6, d18 ; ps0
+
+ veor d5, d5, d18 ; ps1
+ vqadd.u8 d17, d17, d28 ; a = b + a
+
+ veor d16, d16, d18 ; qs1
+
+ vmov.u8 d19, #3
+
+ vsub.s8 d28, d7, d6 ; ( qs0 - ps0)
+
+ vcge.u8 d17, d0, d17 ; a > blimit
+
+ vqsub.s8 d27, d5, d16 ; filter = clamp(ps1-qs1)
+ vorr d22, d21, d22 ; hevmask
+
+ vmull.s8 q12, d28, d19 ; 3 * ( qs0 - ps0)
+
+ vand d27, d27, d22 ; filter &= hev
+ vand d23, d23, d17 ; filter_mask
+
+ vaddw.s8 q12, q12, d27 ; filter + 3 * (qs0 - ps0)
+
+ vmov.u8 d17, #4
+
+ ; filter = clamp(filter + 3 * ( qs0 - ps0))
+ vqmovn.s16 d27, q12
+
+ vand d27, d27, d23 ; filter &= mask
+
+ vqadd.s8 d28, d27, d19 ; filter2 = clamp(filter+3)
+ vqadd.s8 d27, d27, d17 ; filter1 = clamp(filter+4)
+ vshr.s8 d28, d28, #3 ; filter2 >>= 3
+ vshr.s8 d27, d27, #3 ; filter1 >>= 3
+
+ vqadd.s8 d19, d6, d28 ; u = clamp(ps0 + filter2)
+ vqsub.s8 d26, d7, d27 ; u = clamp(qs0 - filter1)
+
+ ; outer tap adjustments
+ vrshr.s8 d27, d27, #1 ; filter = ++filter1 >> 1
+
+ veor d6, d26, d18 ; *oq0 = u^0x80
+
+ vbic d27, d27, d22 ; filter &= ~hev
+
+ vqadd.s8 d21, d5, d27 ; u = clamp(ps1 + filter)
+ vqsub.s8 d20, d16, d27 ; u = clamp(qs1 - filter)
+
+ veor d5, d19, d18 ; *op0 = u^0x80
+ veor d4, d21, d18 ; *op1 = u^0x80
+ veor d7, d20, d18 ; *oq1 = u^0x80
+
+ bx lr
+ ENDP ; |vp9_loop_filter_neon|
+
+ END
--- /dev/null
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+#include "./vpx_config.h"
+
+static INLINE void vp9_mbloop_filter_neon(
+ uint8x8_t dblimit, // mblimit
+ uint8x8_t dlimit, // limit
+ uint8x8_t dthresh, // thresh
+ uint8x8_t d3u8, // p2
+ uint8x8_t d4u8, // p2
+ uint8x8_t d5u8, // p1
+ uint8x8_t d6u8, // p0
+ uint8x8_t d7u8, // q0
+ uint8x8_t d16u8, // q1
+ uint8x8_t d17u8, // q2
+ uint8x8_t d18u8, // q3
+ uint8x8_t *d0ru8, // p1
+ uint8x8_t *d1ru8, // p1
+ uint8x8_t *d2ru8, // p0
+ uint8x8_t *d3ru8, // q0
+ uint8x8_t *d4ru8, // q1
+ uint8x8_t *d5ru8) { // q1
+ uint32_t flat;
+ uint8x8_t d0u8, d1u8, d2u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8;
+ uint8x8_t d25u8, d26u8, d27u8, d28u8, d29u8, d30u8, d31u8;
+ int16x8_t q15s16;
+ uint16x8_t q10u16, q14u16;
+ int8x8_t d21s8, d24s8, d25s8, d26s8, d28s8, d29s8, d30s8;
+
+ d19u8 = vabd_u8(d3u8, d4u8);
+ d20u8 = vabd_u8(d4u8, d5u8);
+ d21u8 = vabd_u8(d5u8, d6u8);
+ d22u8 = vabd_u8(d16u8, d7u8);
+ d23u8 = vabd_u8(d17u8, d16u8);
+ d24u8 = vabd_u8(d18u8, d17u8);
+
+ d19u8 = vmax_u8(d19u8, d20u8);
+ d20u8 = vmax_u8(d21u8, d22u8);
+
+ d25u8 = vabd_u8(d6u8, d4u8);
+
+ d23u8 = vmax_u8(d23u8, d24u8);
+
+ d26u8 = vabd_u8(d7u8, d17u8);
+
+ d19u8 = vmax_u8(d19u8, d20u8);
+
+ d24u8 = vabd_u8(d6u8, d7u8);
+ d27u8 = vabd_u8(d3u8, d6u8);
+ d28u8 = vabd_u8(d18u8, d7u8);
+
+ d19u8 = vmax_u8(d19u8, d23u8);
+
+ d23u8 = vabd_u8(d5u8, d16u8);
+ d24u8 = vqadd_u8(d24u8, d24u8);
+
+
+ d19u8 = vcge_u8(dlimit, d19u8);
+
+
+ d25u8 = vmax_u8(d25u8, d26u8);
+ d26u8 = vmax_u8(d27u8, d28u8);
+
+ d23u8 = vshr_n_u8(d23u8, 1);
+
+ d25u8 = vmax_u8(d25u8, d26u8);
+
+ d24u8 = vqadd_u8(d24u8, d23u8);
+
+ d20u8 = vmax_u8(d20u8, d25u8);
+
+ d23u8 = vdup_n_u8(1);
+ d24u8 = vcge_u8(dblimit, d24u8);
+
+ d21u8 = vcgt_u8(d21u8, dthresh);
+
+ d20u8 = vcge_u8(d23u8, d20u8);
+
+ d19u8 = vand_u8(d19u8, d24u8);
+
+ d23u8 = vcgt_u8(d22u8, dthresh);
+
+ d20u8 = vand_u8(d20u8, d19u8);
+
+ d22u8 = vdup_n_u8(0x80);
+
+ d23u8 = vorr_u8(d21u8, d23u8);
+
+ q10u16 = vcombine_u16(vreinterpret_u16_u8(d20u8),
+ vreinterpret_u16_u8(d21u8));
+
+ d30u8 = vshrn_n_u16(q10u16, 4);
+ flat = vget_lane_u32(vreinterpret_u32_u8(d30u8), 0);
+
+ if (flat == 0xffffffff) { // Check for all 1's, power_branch_only
+ d27u8 = vdup_n_u8(3);
+ d21u8 = vdup_n_u8(2);
+ q14u16 = vaddl_u8(d6u8, d7u8);
+ q14u16 = vmlal_u8(q14u16, d3u8, d27u8);
+ q14u16 = vmlal_u8(q14u16, d4u8, d21u8);
+ q14u16 = vaddw_u8(q14u16, d5u8);
+ *d0ru8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d3u8);
+ q14u16 = vsubw_u8(q14u16, d4u8);
+ q14u16 = vaddw_u8(q14u16, d5u8);
+ q14u16 = vaddw_u8(q14u16, d16u8);
+ *d1ru8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d3u8);
+ q14u16 = vsubw_u8(q14u16, d5u8);
+ q14u16 = vaddw_u8(q14u16, d6u8);
+ q14u16 = vaddw_u8(q14u16, d17u8);
+ *d2ru8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d3u8);
+ q14u16 = vsubw_u8(q14u16, d6u8);
+ q14u16 = vaddw_u8(q14u16, d7u8);
+ q14u16 = vaddw_u8(q14u16, d18u8);
+ *d3ru8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d4u8);
+ q14u16 = vsubw_u8(q14u16, d7u8);
+ q14u16 = vaddw_u8(q14u16, d16u8);
+ q14u16 = vaddw_u8(q14u16, d18u8);
+ *d4ru8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d5u8);
+ q14u16 = vsubw_u8(q14u16, d16u8);
+ q14u16 = vaddw_u8(q14u16, d17u8);
+ q14u16 = vaddw_u8(q14u16, d18u8);
+ *d5ru8 = vqrshrn_n_u16(q14u16, 3);
+ } else {
+ d21u8 = veor_u8(d7u8, d22u8);
+ d24u8 = veor_u8(d6u8, d22u8);
+ d25u8 = veor_u8(d5u8, d22u8);
+ d26u8 = veor_u8(d16u8, d22u8);
+
+ d27u8 = vdup_n_u8(3);
+
+ d28s8 = vsub_s8(vreinterpret_s8_u8(d21u8), vreinterpret_s8_u8(d24u8));
+ d29s8 = vqsub_s8(vreinterpret_s8_u8(d25u8), vreinterpret_s8_u8(d26u8));
+
+ q15s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d27u8));
+
+ d29s8 = vand_s8(d29s8, vreinterpret_s8_u8(d23u8));
+
+ q15s16 = vaddw_s8(q15s16, d29s8);
+
+ d29u8 = vdup_n_u8(4);
+
+ d28s8 = vqmovn_s16(q15s16);
+
+ d28s8 = vand_s8(d28s8, vreinterpret_s8_u8(d19u8));
+
+ d30s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d27u8));
+ d29s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d29u8));
+ d30s8 = vshr_n_s8(d30s8, 3);
+ d29s8 = vshr_n_s8(d29s8, 3);
+
+ d24s8 = vqadd_s8(vreinterpret_s8_u8(d24u8), d30s8);
+ d21s8 = vqsub_s8(vreinterpret_s8_u8(d21u8), d29s8);
+
+ d29s8 = vrshr_n_s8(d29s8, 1);
+ d29s8 = vbic_s8(d29s8, vreinterpret_s8_u8(d23u8));
+
+ d25s8 = vqadd_s8(vreinterpret_s8_u8(d25u8), d29s8);
+ d26s8 = vqsub_s8(vreinterpret_s8_u8(d26u8), d29s8);
+
+ if (flat == 0) { // filter_branch_only
+ *d0ru8 = d4u8;
+ *d1ru8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8);
+ *d2ru8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8);
+ *d3ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8);
+ *d4ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8);
+ *d5ru8 = d17u8;
+ return;
+ }
+
+ d21u8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8);
+ d24u8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8);
+ d25u8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8);
+ d26u8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8);
+
+ d23u8 = vdup_n_u8(2);
+ q14u16 = vaddl_u8(d6u8, d7u8);
+ q14u16 = vmlal_u8(q14u16, d3u8, d27u8);
+ q14u16 = vmlal_u8(q14u16, d4u8, d23u8);
+
+ d0u8 = vbsl_u8(d20u8, dblimit, d4u8);
+
+ q14u16 = vaddw_u8(q14u16, d5u8);
+
+ d1u8 = vbsl_u8(d20u8, dlimit, d25u8);
+
+ d30u8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d3u8);
+ q14u16 = vsubw_u8(q14u16, d4u8);
+ q14u16 = vaddw_u8(q14u16, d5u8);
+ q14u16 = vaddw_u8(q14u16, d16u8);
+
+ d2u8 = vbsl_u8(d20u8, dthresh, d24u8);
+
+ d31u8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d3u8);
+ q14u16 = vsubw_u8(q14u16, d5u8);
+ q14u16 = vaddw_u8(q14u16, d6u8);
+ q14u16 = vaddw_u8(q14u16, d17u8);
+
+ *d0ru8 = vbsl_u8(d20u8, d30u8, d0u8);
+
+ d23u8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d3u8);
+ q14u16 = vsubw_u8(q14u16, d6u8);
+ q14u16 = vaddw_u8(q14u16, d7u8);
+
+ *d1ru8 = vbsl_u8(d20u8, d31u8, d1u8);
+
+ q14u16 = vaddw_u8(q14u16, d18u8);
+
+ *d2ru8 = vbsl_u8(d20u8, d23u8, d2u8);
+
+ d22u8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d4u8);
+ q14u16 = vsubw_u8(q14u16, d7u8);
+ q14u16 = vaddw_u8(q14u16, d16u8);
+
+ d3u8 = vbsl_u8(d20u8, d3u8, d21u8);
+
+ q14u16 = vaddw_u8(q14u16, d18u8);
+
+ d4u8 = vbsl_u8(d20u8, d4u8, d26u8);
+
+ d6u8 = vqrshrn_n_u16(q14u16, 3);
+
+ q14u16 = vsubw_u8(q14u16, d5u8);
+ q14u16 = vsubw_u8(q14u16, d16u8);
+ q14u16 = vaddw_u8(q14u16, d17u8);
+ q14u16 = vaddw_u8(q14u16, d18u8);
+
+ d5u8 = vbsl_u8(d20u8, d5u8, d17u8);
+
+ d7u8 = vqrshrn_n_u16(q14u16, 3);
+
+ *d3ru8 = vbsl_u8(d20u8, d22u8, d3u8);
+ *d4ru8 = vbsl_u8(d20u8, d6u8, d4u8);
+ *d5ru8 = vbsl_u8(d20u8, d7u8, d5u8);
+ }
+ return;
+}
+
+void vp9_lpf_horizontal_8_neon(
+ unsigned char *src,
+ int pitch,
+ unsigned char *blimit,
+ unsigned char *limit,
+ unsigned char *thresh,
+ int count) {
+ int i;
+ uint8_t *s, *psrc;
+ uint8x8_t dblimit, dlimit, dthresh;
+ uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
+ uint8x8_t d16u8, d17u8, d18u8;
+
+ if (count == 0) // end_vp9_mblf_h_edge
+ return;
+
+ dblimit = vld1_u8(blimit);
+ dlimit = vld1_u8(limit);
+ dthresh = vld1_u8(thresh);
+
+ psrc = src - (pitch << 2);
+ for (i = 0; i < count; i++) {
+ s = psrc + i * 8;
+
+ d3u8 = vld1_u8(s);
+ s += pitch;
+ d4u8 = vld1_u8(s);
+ s += pitch;
+ d5u8 = vld1_u8(s);
+ s += pitch;
+ d6u8 = vld1_u8(s);
+ s += pitch;
+ d7u8 = vld1_u8(s);
+ s += pitch;
+ d16u8 = vld1_u8(s);
+ s += pitch;
+ d17u8 = vld1_u8(s);
+ s += pitch;
+ d18u8 = vld1_u8(s);
+
+ vp9_mbloop_filter_neon(dblimit, dlimit, dthresh,
+ d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
+ &d0u8, &d1u8, &d2u8, &d3u8, &d4u8, &d5u8);
+
+ s -= (pitch * 6);
+ vst1_u8(s, d0u8);
+ s += pitch;
+ vst1_u8(s, d1u8);
+ s += pitch;
+ vst1_u8(s, d2u8);
+ s += pitch;
+ vst1_u8(s, d3u8);
+ s += pitch;
+ vst1_u8(s, d4u8);
+ s += pitch;
+ vst1_u8(s, d5u8);
+ }
+ return;
+}
+
+void vp9_lpf_vertical_8_neon(
+ unsigned char *src,
+ int pitch,
+ unsigned char *blimit,
+ unsigned char *limit,
+ unsigned char *thresh,
+ int count) {
+ int i;
+ uint8_t *s;
+ uint8x8_t dblimit, dlimit, dthresh;
+ uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
+ uint8x8_t d16u8, d17u8, d18u8;
+ uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3;
+ uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7;
+ uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11;
+ uint8x8x4_t d4Result;
+ uint8x8x2_t d2Result;
+
+ if (count == 0)
+ return;
+
+ dblimit = vld1_u8(blimit);
+ dlimit = vld1_u8(limit);
+ dthresh = vld1_u8(thresh);
+
+ for (i = 0; i < count; i++) {
+ s = src + (i * (pitch << 3)) - 4;
+
+ d3u8 = vld1_u8(s);
+ s += pitch;
+ d4u8 = vld1_u8(s);
+ s += pitch;
+ d5u8 = vld1_u8(s);
+ s += pitch;
+ d6u8 = vld1_u8(s);
+ s += pitch;
+ d7u8 = vld1_u8(s);
+ s += pitch;
+ d16u8 = vld1_u8(s);
+ s += pitch;
+ d17u8 = vld1_u8(s);
+ s += pitch;
+ d18u8 = vld1_u8(s);
+
+ d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8),
+ vreinterpret_u32_u8(d7u8));
+ d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8),
+ vreinterpret_u32_u8(d16u8));
+ d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8),
+ vreinterpret_u32_u8(d17u8));
+ d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8),
+ vreinterpret_u32_u8(d18u8));
+
+ d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]),
+ vreinterpret_u16_u32(d2tmp2.val[0]));
+ d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]),
+ vreinterpret_u16_u32(d2tmp3.val[0]));
+ d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]),
+ vreinterpret_u16_u32(d2tmp2.val[1]));
+ d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]),
+ vreinterpret_u16_u32(d2tmp3.val[1]));
+
+ d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]),
+ vreinterpret_u8_u16(d2tmp5.val[0]));
+ d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]),
+ vreinterpret_u8_u16(d2tmp5.val[1]));
+ d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]),
+ vreinterpret_u8_u16(d2tmp7.val[0]));
+ d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]),
+ vreinterpret_u8_u16(d2tmp7.val[1]));
+
+ d3u8 = d2tmp8.val[0];
+ d4u8 = d2tmp8.val[1];
+ d5u8 = d2tmp9.val[0];
+ d6u8 = d2tmp9.val[1];
+ d7u8 = d2tmp10.val[0];
+ d16u8 = d2tmp10.val[1];
+ d17u8 = d2tmp11.val[0];
+ d18u8 = d2tmp11.val[1];
+
+ vp9_mbloop_filter_neon(dblimit, dlimit, dthresh,
+ d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
+ &d0u8, &d1u8, &d2u8, &d3u8, &d4u8, &d5u8);
+
+ d4Result.val[0] = d0u8;
+ d4Result.val[1] = d1u8;
+ d4Result.val[2] = d2u8;
+ d4Result.val[3] = d3u8;
+
+ d2Result.val[0] = d4u8;
+ d2Result.val[1] = d5u8;
+
+ s = src - 3;
+ vst4_lane_u8(s, d4Result, 0);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 1);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 2);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 3);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 4);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 5);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 6);
+ s += pitch;
+ vst4_lane_u8(s, d4Result, 7);
+
+ s = src + 1;
+ vst2_lane_u8(s, d2Result, 0);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 1);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 2);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 3);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 4);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 5);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 6);
+ s += pitch;
+ vst2_lane_u8(s, d2Result, 7);
+ }
+ return;
+}
; be found in the AUTHORS file in the root of the source tree.
;
- EXPORT |vp9_lpf_horizontal_4_neon|
- EXPORT |vp9_lpf_vertical_4_neon|
EXPORT |vp9_lpf_horizontal_8_neon|
EXPORT |vp9_lpf_vertical_8_neon|
ARM
; TODO(fgalligan): See about removing the count code as this function is only
; called with a count of 1.
;
-; void vp9_lpf_horizontal_4_neon(uint8_t *s,
-; int p /* pitch */,
-; const uint8_t *blimit,
-; const uint8_t *limit,
-; const uint8_t *thresh,
-; int count)
-;
-; r0 uint8_t *s,
-; r1 int p, /* pitch */
-; r2 const uint8_t *blimit,
-; r3 const uint8_t *limit,
-; sp const uint8_t *thresh,
-; sp+4 int count
-|vp9_lpf_horizontal_4_neon| PROC
- push {lr}
-
- vld1.8 {d0[]}, [r2] ; duplicate *blimit
- ldr r12, [sp, #8] ; load count
- ldr r2, [sp, #4] ; load thresh
- add r1, r1, r1 ; double pitch
-
- cmp r12, #0
- beq end_vp9_lf_h_edge
-
- vld1.8 {d1[]}, [r3] ; duplicate *limit
- vld1.8 {d2[]}, [r2] ; duplicate *thresh
-
-count_lf_h_loop
- sub r2, r0, r1, lsl #1 ; move src pointer down by 4 lines
- add r3, r2, r1, lsr #1 ; set to 3 lines down
-
- vld1.u8 {d3}, [r2@64], r1 ; p3
- vld1.u8 {d4}, [r3@64], r1 ; p2
- vld1.u8 {d5}, [r2@64], r1 ; p1
- vld1.u8 {d6}, [r3@64], r1 ; p0
- vld1.u8 {d7}, [r2@64], r1 ; q0
- vld1.u8 {d16}, [r3@64], r1 ; q1
- vld1.u8 {d17}, [r2@64] ; q2
- vld1.u8 {d18}, [r3@64] ; q3
-
- sub r2, r2, r1, lsl #1
- sub r3, r3, r1, lsl #1
-
- bl vp9_loop_filter_neon
-
- vst1.u8 {d4}, [r2@64], r1 ; store op1
- vst1.u8 {d5}, [r3@64], r1 ; store op0
- vst1.u8 {d6}, [r2@64], r1 ; store oq0
- vst1.u8 {d7}, [r3@64], r1 ; store oq1
-
- add r0, r0, #8
- subs r12, r12, #1
- bne count_lf_h_loop
-
-end_vp9_lf_h_edge
- pop {pc}
- ENDP ; |vp9_lpf_horizontal_4_neon|
-
-; Currently vp9 only works on iterations 8 at a time. The vp8 loop filter
-; works on 16 iterations at a time.
-; TODO(fgalligan): See about removing the count code as this function is only
-; called with a count of 1.
-;
-; void vp9_lpf_vertical_4_neon(uint8_t *s,
-; int p /* pitch */,
-; const uint8_t *blimit,
-; const uint8_t *limit,
-; const uint8_t *thresh,
-; int count)
-;
-; r0 uint8_t *s,
-; r1 int p, /* pitch */
-; r2 const uint8_t *blimit,
-; r3 const uint8_t *limit,
-; sp const uint8_t *thresh,
-; sp+4 int count
-|vp9_lpf_vertical_4_neon| PROC
- push {lr}
-
- vld1.8 {d0[]}, [r2] ; duplicate *blimit
- ldr r12, [sp, #8] ; load count
- vld1.8 {d1[]}, [r3] ; duplicate *limit
-
- ldr r3, [sp, #4] ; load thresh
- sub r2, r0, #4 ; move s pointer down by 4 columns
- cmp r12, #0
- beq end_vp9_lf_v_edge
-
- vld1.8 {d2[]}, [r3] ; duplicate *thresh
-
-count_lf_v_loop
- vld1.u8 {d3}, [r2], r1 ; load s data
- vld1.u8 {d4}, [r2], r1
- vld1.u8 {d5}, [r2], r1
- vld1.u8 {d6}, [r2], r1
- vld1.u8 {d7}, [r2], r1
- vld1.u8 {d16}, [r2], r1
- vld1.u8 {d17}, [r2], r1
- vld1.u8 {d18}, [r2]
-
- ;transpose to 8x16 matrix
- vtrn.32 d3, d7
- vtrn.32 d4, d16
- vtrn.32 d5, d17
- vtrn.32 d6, d18
-
- vtrn.16 d3, d5
- vtrn.16 d4, d6
- vtrn.16 d7, d17
- vtrn.16 d16, d18
-
- vtrn.8 d3, d4
- vtrn.8 d5, d6
- vtrn.8 d7, d16
- vtrn.8 d17, d18
-
- bl vp9_loop_filter_neon
-
- sub r0, r0, #2
-
- ;store op1, op0, oq0, oq1
- vst4.8 {d4[0], d5[0], d6[0], d7[0]}, [r0], r1
- vst4.8 {d4[1], d5[1], d6[1], d7[1]}, [r0], r1
- vst4.8 {d4[2], d5[2], d6[2], d7[2]}, [r0], r1
- vst4.8 {d4[3], d5[3], d6[3], d7[3]}, [r0], r1
- vst4.8 {d4[4], d5[4], d6[4], d7[4]}, [r0], r1
- vst4.8 {d4[5], d5[5], d6[5], d7[5]}, [r0], r1
- vst4.8 {d4[6], d5[6], d6[6], d7[6]}, [r0], r1
- vst4.8 {d4[7], d5[7], d6[7], d7[7]}, [r0]
-
- add r0, r0, r1, lsl #3 ; s += pitch * 8
- subs r12, r12, #1
- subne r2, r0, #4 ; move s pointer down by 4 columns
- bne count_lf_v_loop
-
-end_vp9_lf_v_edge
- pop {pc}
- ENDP ; |vp9_lpf_vertical_4_neon|
-
-; void vp9_loop_filter_neon();
-; This is a helper function for the loopfilters. The invidual functions do the
-; necessary load, transpose (if necessary) and store. The function does not use
-; registers d8-d15.
-;
-; Inputs:
-; r0-r3, r12 PRESERVE
-; d0 blimit
-; d1 limit
-; d2 thresh
-; d3 p3
-; d4 p2
-; d5 p1
-; d6 p0
-; d7 q0
-; d16 q1
-; d17 q2
-; d18 q3
-;
-; Outputs:
-; d4 op1
-; d5 op0
-; d6 oq0
-; d7 oq1
-|vp9_loop_filter_neon| PROC
- ; filter_mask
- vabd.u8 d19, d3, d4 ; m1 = abs(p3 - p2)
- vabd.u8 d20, d4, d5 ; m2 = abs(p2 - p1)
- vabd.u8 d21, d5, d6 ; m3 = abs(p1 - p0)
- vabd.u8 d22, d16, d7 ; m4 = abs(q1 - q0)
- vabd.u8 d3, d17, d16 ; m5 = abs(q2 - q1)
- vabd.u8 d4, d18, d17 ; m6 = abs(q3 - q2)
-
- ; only compare the largest value to limit
- vmax.u8 d19, d19, d20 ; m1 = max(m1, m2)
- vmax.u8 d20, d21, d22 ; m2 = max(m3, m4)
-
- vabd.u8 d17, d6, d7 ; abs(p0 - q0)
-
- vmax.u8 d3, d3, d4 ; m3 = max(m5, m6)
-
- vmov.u8 d18, #0x80
-
- vmax.u8 d23, d19, d20 ; m1 = max(m1, m2)
-
- ; hevmask
- vcgt.u8 d21, d21, d2 ; (abs(p1 - p0) > thresh)*-1
- vcgt.u8 d22, d22, d2 ; (abs(q1 - q0) > thresh)*-1
- vmax.u8 d23, d23, d3 ; m1 = max(m1, m3)
-
- vabd.u8 d28, d5, d16 ; a = abs(p1 - q1)
- vqadd.u8 d17, d17, d17 ; b = abs(p0 - q0) * 2
-
- veor d7, d7, d18 ; qs0
-
- vcge.u8 d23, d1, d23 ; abs(m1) > limit
-
- ; filter() function
- ; convert to signed
-
- vshr.u8 d28, d28, #1 ; a = a / 2
- veor d6, d6, d18 ; ps0
-
- veor d5, d5, d18 ; ps1
- vqadd.u8 d17, d17, d28 ; a = b + a
-
- veor d16, d16, d18 ; qs1
-
- vmov.u8 d19, #3
-
- vsub.s8 d28, d7, d6 ; ( qs0 - ps0)
-
- vcge.u8 d17, d0, d17 ; a > blimit
-
- vqsub.s8 d27, d5, d16 ; filter = clamp(ps1-qs1)
- vorr d22, d21, d22 ; hevmask
-
- vmull.s8 q12, d28, d19 ; 3 * ( qs0 - ps0)
-
- vand d27, d27, d22 ; filter &= hev
- vand d23, d23, d17 ; filter_mask
-
- vaddw.s8 q12, q12, d27 ; filter + 3 * (qs0 - ps0)
-
- vmov.u8 d17, #4
-
- ; filter = clamp(filter + 3 * ( qs0 - ps0))
- vqmovn.s16 d27, q12
-
- vand d27, d27, d23 ; filter &= mask
-
- vqadd.s8 d28, d27, d19 ; filter2 = clamp(filter+3)
- vqadd.s8 d27, d27, d17 ; filter1 = clamp(filter+4)
- vshr.s8 d28, d28, #3 ; filter2 >>= 3
- vshr.s8 d27, d27, #3 ; filter1 >>= 3
-
- vqadd.s8 d19, d6, d28 ; u = clamp(ps0 + filter2)
- vqsub.s8 d26, d7, d27 ; u = clamp(qs0 - filter1)
-
- ; outer tap adjustments
- vrshr.s8 d27, d27, #1 ; filter = ++filter1 >> 1
-
- veor d6, d26, d18 ; *oq0 = u^0x80
-
- vbic d27, d27, d22 ; filter &= ~hev
-
- vqadd.s8 d21, d5, d27 ; u = clamp(ps1 + filter)
- vqsub.s8 d20, d16, d27 ; u = clamp(qs1 - filter)
-
- veor d5, d19, d18 ; *op0 = u^0x80
- veor d4, d21, d18 ; *op1 = u^0x80
- veor d7, d20, d18 ; *oq1 = u^0x80
-
- bx lr
- ENDP ; |vp9_loop_filter_neon|
-
; void vp9_lpf_horizontal_8_neon(uint8_t *s, int p,
; const uint8_t *blimit,
; const uint8_t *limit,
#include <arm_neon.h>
+#include "./vp9_rtcd.h"
#include "./vpx_config.h"
-
-static INLINE void vp9_loop_filter_neon(
- uint8x8_t dblimit, // flimit
- uint8x8_t dlimit, // limit
- uint8x8_t dthresh, // thresh
- uint8x8_t d3u8, // p3
- uint8x8_t d4u8, // p2
- uint8x8_t d5u8, // p1
- uint8x8_t d6u8, // p0
- uint8x8_t d7u8, // q0
- uint8x8_t d16u8, // q1
- uint8x8_t d17u8, // q2
- uint8x8_t d18u8, // q3
- uint8x8_t *d4ru8, // p1
- uint8x8_t *d5ru8, // p0
- uint8x8_t *d6ru8, // q0
- uint8x8_t *d7ru8) { // q1
- uint8x8_t d19u8, d20u8, d21u8, d22u8, d23u8, d27u8, d28u8;
- int16x8_t q12s16;
- int8x8_t d19s8, d20s8, d21s8, d26s8, d27s8, d28s8;
-
- d19u8 = vabd_u8(d3u8, d4u8);
- d20u8 = vabd_u8(d4u8, d5u8);
- d21u8 = vabd_u8(d5u8, d6u8);
- d22u8 = vabd_u8(d16u8, d7u8);
- d3u8 = vabd_u8(d17u8, d16u8);
- d4u8 = vabd_u8(d18u8, d17u8);
-
- d19u8 = vmax_u8(d19u8, d20u8);
- d20u8 = vmax_u8(d21u8, d22u8);
- d3u8 = vmax_u8(d3u8, d4u8);
- d23u8 = vmax_u8(d19u8, d20u8);
-
- d17u8 = vabd_u8(d6u8, d7u8);
-
- d21u8 = vcgt_u8(d21u8, dthresh);
- d22u8 = vcgt_u8(d22u8, dthresh);
- d23u8 = vmax_u8(d23u8, d3u8);
-
- d28u8 = vabd_u8(d5u8, d16u8);
- d17u8 = vqadd_u8(d17u8, d17u8);
-
- d23u8 = vcge_u8(dlimit, d23u8);
-
- d18u8 = vdup_n_u8(0x80);
- d5u8 = veor_u8(d5u8, d18u8);
- d6u8 = veor_u8(d6u8, d18u8);
- d7u8 = veor_u8(d7u8, d18u8);
- d16u8 = veor_u8(d16u8, d18u8);
-
- d28u8 = vshr_n_u8(d28u8, 1);
- d17u8 = vqadd_u8(d17u8, d28u8);
-
- d19u8 = vdup_n_u8(3);
-
- d28s8 = vsub_s8(vreinterpret_s8_u8(d7u8),
- vreinterpret_s8_u8(d6u8));
-
- d17u8 = vcge_u8(dblimit, d17u8);
-
- d27s8 = vqsub_s8(vreinterpret_s8_u8(d5u8),
- vreinterpret_s8_u8(d16u8));
-
- d22u8 = vorr_u8(d21u8, d22u8);
-
- q12s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d19u8));
-
- d27u8 = vand_u8(vreinterpret_u8_s8(d27s8), d22u8);
- d23u8 = vand_u8(d23u8, d17u8);
-
- q12s16 = vaddw_s8(q12s16, vreinterpret_s8_u8(d27u8));
-
- d17u8 = vdup_n_u8(4);
-
- d27s8 = vqmovn_s16(q12s16);
- d27u8 = vand_u8(vreinterpret_u8_s8(d27s8), d23u8);
- d27s8 = vreinterpret_s8_u8(d27u8);
-
- d28s8 = vqadd_s8(d27s8, vreinterpret_s8_u8(d19u8));
- d27s8 = vqadd_s8(d27s8, vreinterpret_s8_u8(d17u8));
- d28s8 = vshr_n_s8(d28s8, 3);
- d27s8 = vshr_n_s8(d27s8, 3);
-
- d19s8 = vqadd_s8(vreinterpret_s8_u8(d6u8), d28s8);
- d26s8 = vqsub_s8(vreinterpret_s8_u8(d7u8), d27s8);
-
- d27s8 = vrshr_n_s8(d27s8, 1);
- d27s8 = vbic_s8(d27s8, vreinterpret_s8_u8(d22u8));
-
- d21s8 = vqadd_s8(vreinterpret_s8_u8(d5u8), d27s8);
- d20s8 = vqsub_s8(vreinterpret_s8_u8(d16u8), d27s8);
-
- *d4ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d18u8);
- *d5ru8 = veor_u8(vreinterpret_u8_s8(d19s8), d18u8);
- *d6ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d18u8);
- *d7ru8 = veor_u8(vreinterpret_u8_s8(d20s8), d18u8);
- return;
-}
-
-void vp9_lpf_horizontal_4_neon(
- unsigned char *src,
- int pitch,
- unsigned char *blimit,
- unsigned char *limit,
- unsigned char *thresh,
- int count) {
- int i;
- uint8_t *s, *psrc;
- uint8x8_t dblimit, dlimit, dthresh;
- uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8;
-
- if (count == 0) // end_vp9_lf_h_edge
- return;
-
- dblimit = vld1_u8(blimit);
- dlimit = vld1_u8(limit);
- dthresh = vld1_u8(thresh);
-
- psrc = src - (pitch << 2);
- for (i = 0; i < count; i++) {
- s = psrc + i * 8;
-
- d3u8 = vld1_u8(s);
- s += pitch;
- d4u8 = vld1_u8(s);
- s += pitch;
- d5u8 = vld1_u8(s);
- s += pitch;
- d6u8 = vld1_u8(s);
- s += pitch;
- d7u8 = vld1_u8(s);
- s += pitch;
- d16u8 = vld1_u8(s);
- s += pitch;
- d17u8 = vld1_u8(s);
- s += pitch;
- d18u8 = vld1_u8(s);
-
- vp9_loop_filter_neon(dblimit, dlimit, dthresh,
- d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
- &d4u8, &d5u8, &d6u8, &d7u8);
-
- s -= (pitch * 5);
- vst1_u8(s, d4u8);
- s += pitch;
- vst1_u8(s, d5u8);
- s += pitch;
- vst1_u8(s, d6u8);
- s += pitch;
- vst1_u8(s, d7u8);
- }
- return;
-}
-
-void vp9_lpf_vertical_4_neon(
- unsigned char *src,
- int pitch,
- unsigned char *blimit,
- unsigned char *limit,
- unsigned char *thresh,
- int count) {
- int i, pitch8;
- uint8_t *s;
- uint8x8_t dblimit, dlimit, dthresh;
- uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8;
- uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3;
- uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7;
- uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11;
- uint8x8x4_t d4Result;
-
- if (count == 0) // end_vp9_lf_h_edge
- return;
-
- dblimit = vld1_u8(blimit);
- dlimit = vld1_u8(limit);
- dthresh = vld1_u8(thresh);
-
- pitch8 = pitch * 8;
- for (i = 0; i < count; i++, src += pitch8) {
- s = src - (i + 1) * 4;
-
- d3u8 = vld1_u8(s);
- s += pitch;
- d4u8 = vld1_u8(s);
- s += pitch;
- d5u8 = vld1_u8(s);
- s += pitch;
- d6u8 = vld1_u8(s);
- s += pitch;
- d7u8 = vld1_u8(s);
- s += pitch;
- d16u8 = vld1_u8(s);
- s += pitch;
- d17u8 = vld1_u8(s);
- s += pitch;
- d18u8 = vld1_u8(s);
-
- d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8),
- vreinterpret_u32_u8(d7u8));
- d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8),
- vreinterpret_u32_u8(d16u8));
- d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8),
- vreinterpret_u32_u8(d17u8));
- d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8),
- vreinterpret_u32_u8(d18u8));
-
- d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]),
- vreinterpret_u16_u32(d2tmp2.val[0]));
- d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]),
- vreinterpret_u16_u32(d2tmp3.val[0]));
- d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]),
- vreinterpret_u16_u32(d2tmp2.val[1]));
- d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]),
- vreinterpret_u16_u32(d2tmp3.val[1]));
-
- d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]),
- vreinterpret_u8_u16(d2tmp5.val[0]));
- d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]),
- vreinterpret_u8_u16(d2tmp5.val[1]));
- d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]),
- vreinterpret_u8_u16(d2tmp7.val[0]));
- d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]),
- vreinterpret_u8_u16(d2tmp7.val[1]));
-
- d3u8 = d2tmp8.val[0];
- d4u8 = d2tmp8.val[1];
- d5u8 = d2tmp9.val[0];
- d6u8 = d2tmp9.val[1];
- d7u8 = d2tmp10.val[0];
- d16u8 = d2tmp10.val[1];
- d17u8 = d2tmp11.val[0];
- d18u8 = d2tmp11.val[1];
-
- vp9_loop_filter_neon(dblimit, dlimit, dthresh,
- d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
- &d4u8, &d5u8, &d6u8, &d7u8);
-
- d4Result.val[0] = d4u8;
- d4Result.val[1] = d5u8;
- d4Result.val[2] = d6u8;
- d4Result.val[3] = d7u8;
-
- src -= 2;
- vst4_lane_u8(src, d4Result, 0);
- src += pitch;
- vst4_lane_u8(src, d4Result, 1);
- src += pitch;
- vst4_lane_u8(src, d4Result, 2);
- src += pitch;
- vst4_lane_u8(src, d4Result, 3);
- src += pitch;
- vst4_lane_u8(src, d4Result, 4);
- src += pitch;
- vst4_lane_u8(src, d4Result, 5);
- src += pitch;
- vst4_lane_u8(src, d4Result, 6);
- src += pitch;
- vst4_lane_u8(src, d4Result, 7);
- }
- return;
+#include "vpx/vpx_integer.h"
+
+void vp9_lpf_vertical_4_dual_neon(uint8_t *s, int p,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1) {
+ vp9_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0, 1);
+ vp9_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1);
}
-static INLINE void vp9_mbloop_filter_neon(
- uint8x8_t dblimit, // mblimit
- uint8x8_t dlimit, // limit
- uint8x8_t dthresh, // thresh
- uint8x8_t d3u8, // p2
- uint8x8_t d4u8, // p2
- uint8x8_t d5u8, // p1
- uint8x8_t d6u8, // p0
- uint8x8_t d7u8, // q0
- uint8x8_t d16u8, // q1
- uint8x8_t d17u8, // q2
- uint8x8_t d18u8, // q3
- uint8x8_t *d0ru8, // p1
- uint8x8_t *d1ru8, // p1
- uint8x8_t *d2ru8, // p0
- uint8x8_t *d3ru8, // q0
- uint8x8_t *d4ru8, // q1
- uint8x8_t *d5ru8) { // q1
- uint32_t flat;
- uint8x8_t d0u8, d1u8, d2u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8;
- uint8x8_t d25u8, d26u8, d27u8, d28u8, d29u8, d30u8, d31u8;
- int16x8_t q15s16;
- uint16x8_t q10u16, q14u16;
- int8x8_t d21s8, d24s8, d25s8, d26s8, d28s8, d29s8, d30s8;
-
- d19u8 = vabd_u8(d3u8, d4u8);
- d20u8 = vabd_u8(d4u8, d5u8);
- d21u8 = vabd_u8(d5u8, d6u8);
- d22u8 = vabd_u8(d16u8, d7u8);
- d23u8 = vabd_u8(d17u8, d16u8);
- d24u8 = vabd_u8(d18u8, d17u8);
-
- d19u8 = vmax_u8(d19u8, d20u8);
- d20u8 = vmax_u8(d21u8, d22u8);
-
- d25u8 = vabd_u8(d6u8, d4u8);
-
- d23u8 = vmax_u8(d23u8, d24u8);
-
- d26u8 = vabd_u8(d7u8, d17u8);
-
- d19u8 = vmax_u8(d19u8, d20u8);
-
- d24u8 = vabd_u8(d6u8, d7u8);
- d27u8 = vabd_u8(d3u8, d6u8);
- d28u8 = vabd_u8(d18u8, d7u8);
-
- d19u8 = vmax_u8(d19u8, d23u8);
-
- d23u8 = vabd_u8(d5u8, d16u8);
- d24u8 = vqadd_u8(d24u8, d24u8);
-
-
- d19u8 = vcge_u8(dlimit, d19u8);
-
-
- d25u8 = vmax_u8(d25u8, d26u8);
- d26u8 = vmax_u8(d27u8, d28u8);
-
- d23u8 = vshr_n_u8(d23u8, 1);
-
- d25u8 = vmax_u8(d25u8, d26u8);
-
- d24u8 = vqadd_u8(d24u8, d23u8);
-
- d20u8 = vmax_u8(d20u8, d25u8);
-
- d23u8 = vdup_n_u8(1);
- d24u8 = vcge_u8(dblimit, d24u8);
-
- d21u8 = vcgt_u8(d21u8, dthresh);
-
- d20u8 = vcge_u8(d23u8, d20u8);
-
- d19u8 = vand_u8(d19u8, d24u8);
-
- d23u8 = vcgt_u8(d22u8, dthresh);
-
- d20u8 = vand_u8(d20u8, d19u8);
-
- d22u8 = vdup_n_u8(0x80);
-
- d23u8 = vorr_u8(d21u8, d23u8);
-
- q10u16 = vcombine_u16(vreinterpret_u16_u8(d20u8),
- vreinterpret_u16_u8(d21u8));
-
- d30u8 = vshrn_n_u16(q10u16, 4);
- flat = vget_lane_u32(vreinterpret_u32_u8(d30u8), 0);
-
- if (flat == 0xffffffff) { // Check for all 1's, power_branch_only
- d27u8 = vdup_n_u8(3);
- d21u8 = vdup_n_u8(2);
- q14u16 = vaddl_u8(d6u8, d7u8);
- q14u16 = vmlal_u8(q14u16, d3u8, d27u8);
- q14u16 = vmlal_u8(q14u16, d4u8, d21u8);
- q14u16 = vaddw_u8(q14u16, d5u8);
- *d0ru8 = vqrshrn_n_u16(q14u16, 3);
-
- q14u16 = vsubw_u8(q14u16, d3u8);
- q14u16 = vsubw_u8(q14u16, d4u8);
- q14u16 = vaddw_u8(q14u16, d5u8);
- q14u16 = vaddw_u8(q14u16, d16u8);
- *d1ru8 = vqrshrn_n_u16(q14u16, 3);
-
- q14u16 = vsubw_u8(q14u16, d3u8);
- q14u16 = vsubw_u8(q14u16, d5u8);
- q14u16 = vaddw_u8(q14u16, d6u8);
- q14u16 = vaddw_u8(q14u16, d17u8);
- *d2ru8 = vqrshrn_n_u16(q14u16, 3);
-
- q14u16 = vsubw_u8(q14u16, d3u8);
- q14u16 = vsubw_u8(q14u16, d6u8);
- q14u16 = vaddw_u8(q14u16, d7u8);
- q14u16 = vaddw_u8(q14u16, d18u8);
- *d3ru8 = vqrshrn_n_u16(q14u16, 3);
-
- q14u16 = vsubw_u8(q14u16, d4u8);
- q14u16 = vsubw_u8(q14u16, d7u8);
- q14u16 = vaddw_u8(q14u16, d16u8);
- q14u16 = vaddw_u8(q14u16, d18u8);
- *d4ru8 = vqrshrn_n_u16(q14u16, 3);
-
- q14u16 = vsubw_u8(q14u16, d5u8);
- q14u16 = vsubw_u8(q14u16, d16u8);
- q14u16 = vaddw_u8(q14u16, d17u8);
- q14u16 = vaddw_u8(q14u16, d18u8);
- *d5ru8 = vqrshrn_n_u16(q14u16, 3);
- } else {
- d21u8 = veor_u8(d7u8, d22u8);
- d24u8 = veor_u8(d6u8, d22u8);
- d25u8 = veor_u8(d5u8, d22u8);
- d26u8 = veor_u8(d16u8, d22u8);
-
- d27u8 = vdup_n_u8(3);
-
- d28s8 = vsub_s8(vreinterpret_s8_u8(d21u8), vreinterpret_s8_u8(d24u8));
- d29s8 = vqsub_s8(vreinterpret_s8_u8(d25u8), vreinterpret_s8_u8(d26u8));
-
- q15s16 = vmull_s8(d28s8, vreinterpret_s8_u8(d27u8));
-
- d29s8 = vand_s8(d29s8, vreinterpret_s8_u8(d23u8));
-
- q15s16 = vaddw_s8(q15s16, d29s8);
-
- d29u8 = vdup_n_u8(4);
-
- d28s8 = vqmovn_s16(q15s16);
-
- d28s8 = vand_s8(d28s8, vreinterpret_s8_u8(d19u8));
-
- d30s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d27u8));
- d29s8 = vqadd_s8(d28s8, vreinterpret_s8_u8(d29u8));
- d30s8 = vshr_n_s8(d30s8, 3);
- d29s8 = vshr_n_s8(d29s8, 3);
-
- d24s8 = vqadd_s8(vreinterpret_s8_u8(d24u8), d30s8);
- d21s8 = vqsub_s8(vreinterpret_s8_u8(d21u8), d29s8);
-
- d29s8 = vrshr_n_s8(d29s8, 1);
- d29s8 = vbic_s8(d29s8, vreinterpret_s8_u8(d23u8));
-
- d25s8 = vqadd_s8(vreinterpret_s8_u8(d25u8), d29s8);
- d26s8 = vqsub_s8(vreinterpret_s8_u8(d26u8), d29s8);
-
- if (flat == 0) { // filter_branch_only
- *d0ru8 = d4u8;
- *d1ru8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8);
- *d2ru8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8);
- *d3ru8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8);
- *d4ru8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8);
- *d5ru8 = d17u8;
- return;
- }
-
- d21u8 = veor_u8(vreinterpret_u8_s8(d21s8), d22u8);
- d24u8 = veor_u8(vreinterpret_u8_s8(d24s8), d22u8);
- d25u8 = veor_u8(vreinterpret_u8_s8(d25s8), d22u8);
- d26u8 = veor_u8(vreinterpret_u8_s8(d26s8), d22u8);
-
- d23u8 = vdup_n_u8(2);
- q14u16 = vaddl_u8(d6u8, d7u8);
- q14u16 = vmlal_u8(q14u16, d3u8, d27u8);
- q14u16 = vmlal_u8(q14u16, d4u8, d23u8);
-
- d0u8 = vbsl_u8(d20u8, dblimit, d4u8);
-
- q14u16 = vaddw_u8(q14u16, d5u8);
-
- d1u8 = vbsl_u8(d20u8, dlimit, d25u8);
-
- d30u8 = vqrshrn_n_u16(q14u16, 3);
-
- q14u16 = vsubw_u8(q14u16, d3u8);
- q14u16 = vsubw_u8(q14u16, d4u8);
- q14u16 = vaddw_u8(q14u16, d5u8);
- q14u16 = vaddw_u8(q14u16, d16u8);
-
- d2u8 = vbsl_u8(d20u8, dthresh, d24u8);
-
- d31u8 = vqrshrn_n_u16(q14u16, 3);
-
- q14u16 = vsubw_u8(q14u16, d3u8);
- q14u16 = vsubw_u8(q14u16, d5u8);
- q14u16 = vaddw_u8(q14u16, d6u8);
- q14u16 = vaddw_u8(q14u16, d17u8);
-
- *d0ru8 = vbsl_u8(d20u8, d30u8, d0u8);
-
- d23u8 = vqrshrn_n_u16(q14u16, 3);
-
- q14u16 = vsubw_u8(q14u16, d3u8);
- q14u16 = vsubw_u8(q14u16, d6u8);
- q14u16 = vaddw_u8(q14u16, d7u8);
-
- *d1ru8 = vbsl_u8(d20u8, d31u8, d1u8);
-
- q14u16 = vaddw_u8(q14u16, d18u8);
-
- *d2ru8 = vbsl_u8(d20u8, d23u8, d2u8);
-
- d22u8 = vqrshrn_n_u16(q14u16, 3);
-
- q14u16 = vsubw_u8(q14u16, d4u8);
- q14u16 = vsubw_u8(q14u16, d7u8);
- q14u16 = vaddw_u8(q14u16, d16u8);
-
- d3u8 = vbsl_u8(d20u8, d3u8, d21u8);
-
- q14u16 = vaddw_u8(q14u16, d18u8);
-
- d4u8 = vbsl_u8(d20u8, d4u8, d26u8);
-
- d6u8 = vqrshrn_n_u16(q14u16, 3);
-
- q14u16 = vsubw_u8(q14u16, d5u8);
- q14u16 = vsubw_u8(q14u16, d16u8);
- q14u16 = vaddw_u8(q14u16, d17u8);
- q14u16 = vaddw_u8(q14u16, d18u8);
-
- d5u8 = vbsl_u8(d20u8, d5u8, d17u8);
-
- d7u8 = vqrshrn_n_u16(q14u16, 3);
-
- *d3ru8 = vbsl_u8(d20u8, d22u8, d3u8);
- *d4ru8 = vbsl_u8(d20u8, d6u8, d4u8);
- *d5ru8 = vbsl_u8(d20u8, d7u8, d5u8);
- }
- return;
+#if HAVE_NEON_ASM
+void vp9_lpf_horizontal_8_dual_neon(uint8_t *s, int p /* pitch */,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1) {
+ vp9_lpf_horizontal_8_neon(s, p, blimit0, limit0, thresh0, 1);
+ vp9_lpf_horizontal_8_neon(s + 8, p, blimit1, limit1, thresh1, 1);
}
-void vp9_lpf_horizontal_8_neon(
- unsigned char *src,
- int pitch,
- unsigned char *blimit,
- unsigned char *limit,
- unsigned char *thresh,
- int count) {
- int i;
- uint8_t *s, *psrc;
- uint8x8_t dblimit, dlimit, dthresh;
- uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
- uint8x8_t d16u8, d17u8, d18u8;
-
- if (count == 0) // end_vp9_mblf_h_edge
- return;
-
- dblimit = vld1_u8(blimit);
- dlimit = vld1_u8(limit);
- dthresh = vld1_u8(thresh);
-
- psrc = src - (pitch << 2);
- for (i = 0; i < count; i++) {
- s = psrc + i * 8;
-
- d3u8 = vld1_u8(s);
- s += pitch;
- d4u8 = vld1_u8(s);
- s += pitch;
- d5u8 = vld1_u8(s);
- s += pitch;
- d6u8 = vld1_u8(s);
- s += pitch;
- d7u8 = vld1_u8(s);
- s += pitch;
- d16u8 = vld1_u8(s);
- s += pitch;
- d17u8 = vld1_u8(s);
- s += pitch;
- d18u8 = vld1_u8(s);
-
- vp9_mbloop_filter_neon(dblimit, dlimit, dthresh,
- d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
- &d0u8, &d1u8, &d2u8, &d3u8, &d4u8, &d5u8);
-
- s -= (pitch * 6);
- vst1_u8(s, d0u8);
- s += pitch;
- vst1_u8(s, d1u8);
- s += pitch;
- vst1_u8(s, d2u8);
- s += pitch;
- vst1_u8(s, d3u8);
- s += pitch;
- vst1_u8(s, d4u8);
- s += pitch;
- vst1_u8(s, d5u8);
- }
- return;
+void vp9_lpf_vertical_8_dual_neon(uint8_t *s, int p,
+ const uint8_t *blimit0,
+ const uint8_t *limit0,
+ const uint8_t *thresh0,
+ const uint8_t *blimit1,
+ const uint8_t *limit1,
+ const uint8_t *thresh1) {
+ vp9_lpf_vertical_8_neon(s, p, blimit0, limit0, thresh0, 1);
+ vp9_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1);
}
-void vp9_lpf_vertical_8_neon(
- unsigned char *src,
- int pitch,
- unsigned char *blimit,
- unsigned char *limit,
- unsigned char *thresh,
- int count) {
- int i;
- uint8_t *s;
- uint8x8_t dblimit, dlimit, dthresh;
- uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
- uint8x8_t d16u8, d17u8, d18u8;
- uint32x2x2_t d2tmp0, d2tmp1, d2tmp2, d2tmp3;
- uint16x4x2_t d2tmp4, d2tmp5, d2tmp6, d2tmp7;
- uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11;
- uint8x8x4_t d4Result;
- uint8x8x2_t d2Result;
-
- if (count == 0)
- return;
-
- dblimit = vld1_u8(blimit);
- dlimit = vld1_u8(limit);
- dthresh = vld1_u8(thresh);
-
- for (i = 0; i < count; i++) {
- s = src + (i * (pitch << 3)) - 4;
-
- d3u8 = vld1_u8(s);
- s += pitch;
- d4u8 = vld1_u8(s);
- s += pitch;
- d5u8 = vld1_u8(s);
- s += pitch;
- d6u8 = vld1_u8(s);
- s += pitch;
- d7u8 = vld1_u8(s);
- s += pitch;
- d16u8 = vld1_u8(s);
- s += pitch;
- d17u8 = vld1_u8(s);
- s += pitch;
- d18u8 = vld1_u8(s);
-
- d2tmp0 = vtrn_u32(vreinterpret_u32_u8(d3u8),
- vreinterpret_u32_u8(d7u8));
- d2tmp1 = vtrn_u32(vreinterpret_u32_u8(d4u8),
- vreinterpret_u32_u8(d16u8));
- d2tmp2 = vtrn_u32(vreinterpret_u32_u8(d5u8),
- vreinterpret_u32_u8(d17u8));
- d2tmp3 = vtrn_u32(vreinterpret_u32_u8(d6u8),
- vreinterpret_u32_u8(d18u8));
-
- d2tmp4 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[0]),
- vreinterpret_u16_u32(d2tmp2.val[0]));
- d2tmp5 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[0]),
- vreinterpret_u16_u32(d2tmp3.val[0]));
- d2tmp6 = vtrn_u16(vreinterpret_u16_u32(d2tmp0.val[1]),
- vreinterpret_u16_u32(d2tmp2.val[1]));
- d2tmp7 = vtrn_u16(vreinterpret_u16_u32(d2tmp1.val[1]),
- vreinterpret_u16_u32(d2tmp3.val[1]));
-
- d2tmp8 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[0]),
- vreinterpret_u8_u16(d2tmp5.val[0]));
- d2tmp9 = vtrn_u8(vreinterpret_u8_u16(d2tmp4.val[1]),
- vreinterpret_u8_u16(d2tmp5.val[1]));
- d2tmp10 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[0]),
- vreinterpret_u8_u16(d2tmp7.val[0]));
- d2tmp11 = vtrn_u8(vreinterpret_u8_u16(d2tmp6.val[1]),
- vreinterpret_u8_u16(d2tmp7.val[1]));
-
- d3u8 = d2tmp8.val[0];
- d4u8 = d2tmp8.val[1];
- d5u8 = d2tmp9.val[0];
- d6u8 = d2tmp9.val[1];
- d7u8 = d2tmp10.val[0];
- d16u8 = d2tmp10.val[1];
- d17u8 = d2tmp11.val[0];
- d18u8 = d2tmp11.val[1];
-
- vp9_mbloop_filter_neon(dblimit, dlimit, dthresh,
- d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8,
- &d0u8, &d1u8, &d2u8, &d3u8, &d4u8, &d5u8);
-
- d4Result.val[0] = d0u8;
- d4Result.val[1] = d1u8;
- d4Result.val[2] = d2u8;
- d4Result.val[3] = d3u8;
-
- d2Result.val[0] = d4u8;
- d2Result.val[1] = d5u8;
-
- s = src - 3;
- vst4_lane_u8(s, d4Result, 0);
- s += pitch;
- vst4_lane_u8(s, d4Result, 1);
- s += pitch;
- vst4_lane_u8(s, d4Result, 2);
- s += pitch;
- vst4_lane_u8(s, d4Result, 3);
- s += pitch;
- vst4_lane_u8(s, d4Result, 4);
- s += pitch;
- vst4_lane_u8(s, d4Result, 5);
- s += pitch;
- vst4_lane_u8(s, d4Result, 6);
- s += pitch;
- vst4_lane_u8(s, d4Result, 7);
-
- s = src + 1;
- vst2_lane_u8(s, d2Result, 0);
- s += pitch;
- vst2_lane_u8(s, d2Result, 1);
- s += pitch;
- vst2_lane_u8(s, d2Result, 2);
- s += pitch;
- vst2_lane_u8(s, d2Result, 3);
- s += pitch;
- vst2_lane_u8(s, d2Result, 4);
- s += pitch;
- vst2_lane_u8(s, d2Result, 5);
- s += pitch;
- vst2_lane_u8(s, d2Result, 6);
- s += pitch;
- vst2_lane_u8(s, d2Result, 7);
- }
- return;
+void vp9_lpf_vertical_16_dual_neon(uint8_t *s, int p,
+ const uint8_t *blimit,
+ const uint8_t *limit,
+ const uint8_t *thresh) {
+ vp9_lpf_vertical_16_neon(s, p, blimit, limit, thresh);
+ vp9_lpf_vertical_16_neon(s + 8 * p, p, blimit, limit, thresh);
}
+#endif // HAVE_NEON_ASM
#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/common/vp9_systemdependent.h"
+// TODO(hkuang): Don't need to lock the whole pool after implementing atomic
+// frame reference count.
+void lock_buffer_pool(BufferPool *const pool) {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_lock(&pool->pool_mutex);
+#else
+ (void)pool;
+#endif
+}
+
+void unlock_buffer_pool(BufferPool *const pool) {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_unlock(&pool->pool_mutex);
+#else
+ (void)pool;
+#endif
+}
+
void vp9_set_mb_mi(VP9_COMMON *cm, int width, int height) {
const int aligned_width = ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2);
const int aligned_height = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2);
cm->MBs = cm->mb_rows * cm->mb_cols;
}
+static int alloc_seg_map(VP9_COMMON *cm, int seg_map_size) {
+ int i;
+
+ for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
+ cm->seg_map_array[i] = (uint8_t *)vpx_calloc(seg_map_size, 1);
+ if (cm->seg_map_array[i] == NULL)
+ return 1;
+ }
+
+ // Init the index.
+ cm->seg_map_idx = 0;
+ cm->prev_seg_map_idx = 1;
+
+ cm->current_frame_seg_map = cm->seg_map_array[cm->seg_map_idx];
+ if (!cm->frame_parallel_decode)
+ cm->last_frame_seg_map = cm->seg_map_array[cm->prev_seg_map_idx];
+
+ return 0;
+}
+
+static void free_seg_map(VP9_COMMON *cm) {
+ int i;
+
+ for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
+ vpx_free(cm->seg_map_array[i]);
+ cm->seg_map_array[i] = NULL;
+ }
+
+ cm->current_frame_seg_map = NULL;
+
+ if (!cm->frame_parallel_decode) {
+ cm->last_frame_seg_map = NULL;
+ }
+}
+
void vp9_free_ref_frame_buffers(VP9_COMMON *cm) {
+ BufferPool *const pool = cm->buffer_pool;
int i;
for (i = 0; i < FRAME_BUFFERS; ++i) {
- if (cm->frame_bufs[i].ref_count > 0 &&
- cm->frame_bufs[i].raw_frame_buffer.data != NULL) {
- cm->release_fb_cb(cm->cb_priv, &cm->frame_bufs[i].raw_frame_buffer);
- cm->frame_bufs[i].ref_count = 0;
+ if (pool->frame_bufs[i].ref_count > 0 &&
+ pool->frame_bufs[i].raw_frame_buffer.data != NULL) {
+ pool->release_fb_cb(pool->cb_priv, &pool->frame_bufs[i].raw_frame_buffer);
+ pool->frame_bufs[i].ref_count = 0;
}
- vpx_free(cm->frame_bufs[i].mvs);
- cm->frame_bufs[i].mvs = NULL;
- vp9_free_frame_buffer(&cm->frame_bufs[i].buf);
+ vpx_free(pool->frame_bufs[i].mvs);
+ pool->frame_bufs[i].mvs = NULL;
+ vp9_free_frame_buffer(&pool->frame_bufs[i].buf);
}
#if CONFIG_VP9_POSTPROC
void vp9_free_context_buffers(VP9_COMMON *cm) {
cm->free_mi(cm);
- vpx_free(cm->last_frame_seg_map);
- cm->last_frame_seg_map = NULL;
+ free_seg_map(cm);
vpx_free(cm->above_context);
cm->above_context = NULL;
vpx_free(cm->above_seg_context);
if (cm->alloc_mi(cm, cm->mi_stride * calc_mi_size(cm->mi_rows)))
goto fail;
- cm->last_frame_seg_map = (uint8_t *)vpx_calloc(cm->mi_rows * cm->mi_cols, 1);
- if (!cm->last_frame_seg_map) goto fail;
+ // Create the segmentation map structure and set to 0.
+ free_seg_map(cm);
+ if (alloc_seg_map(cm, cm->mi_rows * cm->mi_cols))
+ goto fail;
cm->above_context = (ENTROPY_CONTEXT *)vpx_calloc(
2 * mi_cols_aligned_to_sb(cm->mi_cols) * MAX_MB_PLANE,
}
static void init_frame_bufs(VP9_COMMON *cm) {
+ BufferPool *const pool = cm->buffer_pool;
int i;
cm->new_fb_idx = FRAME_BUFFERS - 1;
- cm->frame_bufs[cm->new_fb_idx].ref_count = 1;
+ pool->frame_bufs[cm->new_fb_idx].ref_count = 1;
for (i = 0; i < REF_FRAMES; ++i) {
cm->ref_frame_map[i] = i;
- cm->frame_bufs[i].ref_count = 1;
+ pool->frame_bufs[i].ref_count = 1;
}
}
vp9_free_ref_frame_buffers(cm);
for (i = 0; i < FRAME_BUFFERS; ++i) {
- cm->frame_bufs[i].ref_count = 0;
- if (vp9_alloc_frame_buffer(&cm->frame_bufs[i].buf, width, height,
+ BufferPool *const pool = cm->buffer_pool;
+ pool->frame_bufs[i].ref_count = 0;
+ if (vp9_alloc_frame_buffer(&pool->frame_bufs[i].buf, width, height,
ss_x, ss_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
VP9_ENC_BORDER_IN_PIXELS,
cm->byte_alignment) < 0)
goto fail;
- if (cm->frame_bufs[i].mvs == NULL) {
- cm->frame_bufs[i].mvs =
+ if (pool->frame_bufs[i].mvs == NULL) {
+ pool->frame_bufs[i].mvs =
(MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
- sizeof(*cm->frame_bufs[i].mvs));
- if (cm->frame_bufs[i].mvs == NULL)
+ sizeof(*pool->frame_bufs[i].mvs));
+ if (pool->frame_bufs[i].mvs == NULL)
goto fail;
- cm->frame_bufs[i].mi_rows = cm->mi_rows;
- cm->frame_bufs[i].mi_cols = cm->mi_cols;
+ pool->frame_bufs[i].mi_rows = cm->mi_rows;
+ pool->frame_bufs[i].mi_cols = cm->mi_cols;
}
}
void vp9_remove_common(VP9_COMMON *cm) {
vp9_free_ref_frame_buffers(cm);
vp9_free_context_buffers(cm);
- vp9_free_internal_frame_buffers(&cm->int_frame_buffers);
vpx_free(cm->fc);
cm->fc = NULL;
void vp9_init_context_buffers(VP9_COMMON *cm) {
cm->setup_mi(cm);
- if (cm->last_frame_seg_map)
+ if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
vpx_memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
}
+
+void vp9_swap_current_and_last_seg_map(VP9_COMMON *cm) {
+ // Swap indices.
+ const int tmp = cm->seg_map_idx;
+ cm->seg_map_idx = cm->prev_seg_map_idx;
+ cm->prev_seg_map_idx = tmp;
+
+ cm->current_frame_seg_map = cm->seg_map_array[cm->seg_map_idx];
+ cm->last_frame_seg_map = cm->seg_map_array[cm->prev_seg_map_idx];
+}
void vp9_set_mb_mi(struct VP9Common *cm, int width, int height);
+void vp9_swap_current_and_last_seg_map(struct VP9Common *cm);
+
#ifdef __cplusplus
} // extern "C"
#endif
int lossless;
int corrupted;
+
+ struct vpx_internal_error_info *error_info;
} MACROBLOCKD;
static INLINE BLOCK_SIZE get_subsize(BLOCK_SIZE bsize,
const vp9_prob *prob;
int len;
int base_val;
+ const int16_t *cost;
} vp9_extra_bit;
// indexed by token value
-EIGHTTAP_SMOOTH, -EIGHTTAP_SHARP
};
-#define COUNT_SAT 20
-#define MAX_UPDATE_FACTOR 128
-
-static int adapt_prob(vp9_prob pre_prob, const unsigned int ct[2]) {
- return merge_probs(pre_prob, ct, COUNT_SAT, MAX_UPDATE_FACTOR);
-}
-
-static void adapt_probs(const vp9_tree_index *tree,
- const vp9_prob *pre_probs, const unsigned int *counts,
- vp9_prob *probs) {
- vp9_tree_merge_probs(tree, pre_probs, counts, COUNT_SAT, MAX_UPDATE_FACTOR,
- probs);
-}
-
void vp9_adapt_mode_probs(VP9_COMMON *cm) {
int i, j;
FRAME_CONTEXT *fc = cm->fc;
const FRAME_COUNTS *counts = &cm->counts;
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
- fc->intra_inter_prob[i] = adapt_prob(pre_fc->intra_inter_prob[i],
- counts->intra_inter[i]);
+ fc->intra_inter_prob[i] = mode_mv_merge_probs(pre_fc->intra_inter_prob[i],
+ counts->intra_inter[i]);
for (i = 0; i < COMP_INTER_CONTEXTS; i++)
- fc->comp_inter_prob[i] = adapt_prob(pre_fc->comp_inter_prob[i],
- counts->comp_inter[i]);
+ fc->comp_inter_prob[i] = mode_mv_merge_probs(pre_fc->comp_inter_prob[i],
+ counts->comp_inter[i]);
for (i = 0; i < REF_CONTEXTS; i++)
- fc->comp_ref_prob[i] = adapt_prob(pre_fc->comp_ref_prob[i],
- counts->comp_ref[i]);
+ fc->comp_ref_prob[i] = mode_mv_merge_probs(pre_fc->comp_ref_prob[i],
+ counts->comp_ref[i]);
for (i = 0; i < REF_CONTEXTS; i++)
for (j = 0; j < 2; j++)
- fc->single_ref_prob[i][j] = adapt_prob(pre_fc->single_ref_prob[i][j],
- counts->single_ref[i][j]);
+ fc->single_ref_prob[i][j] = mode_mv_merge_probs(
+ pre_fc->single_ref_prob[i][j], counts->single_ref[i][j]);
for (i = 0; i < INTER_MODE_CONTEXTS; i++)
- adapt_probs(vp9_inter_mode_tree, pre_fc->inter_mode_probs[i],
+ vp9_tree_merge_probs(vp9_inter_mode_tree, pre_fc->inter_mode_probs[i],
counts->inter_mode[i], fc->inter_mode_probs[i]);
for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
- adapt_probs(vp9_intra_mode_tree, pre_fc->y_mode_prob[i],
+ vp9_tree_merge_probs(vp9_intra_mode_tree, pre_fc->y_mode_prob[i],
counts->y_mode[i], fc->y_mode_prob[i]);
for (i = 0; i < INTRA_MODES; ++i)
- adapt_probs(vp9_intra_mode_tree, pre_fc->uv_mode_prob[i],
- counts->uv_mode[i], fc->uv_mode_prob[i]);
+ vp9_tree_merge_probs(vp9_intra_mode_tree, pre_fc->uv_mode_prob[i],
+ counts->uv_mode[i], fc->uv_mode_prob[i]);
for (i = 0; i < PARTITION_CONTEXTS; i++)
- adapt_probs(vp9_partition_tree, pre_fc->partition_prob[i],
- counts->partition[i], fc->partition_prob[i]);
+ vp9_tree_merge_probs(vp9_partition_tree, pre_fc->partition_prob[i],
+ counts->partition[i], fc->partition_prob[i]);
if (cm->interp_filter == SWITCHABLE) {
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
- adapt_probs(vp9_switchable_interp_tree, pre_fc->switchable_interp_prob[i],
- counts->switchable_interp[i], fc->switchable_interp_prob[i]);
+ vp9_tree_merge_probs(vp9_switchable_interp_tree,
+ pre_fc->switchable_interp_prob[i],
+ counts->switchable_interp[i],
+ fc->switchable_interp_prob[i]);
}
if (cm->tx_mode == TX_MODE_SELECT) {
for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], branch_ct_8x8p);
for (j = 0; j < TX_SIZES - 3; ++j)
- fc->tx_probs.p8x8[i][j] = adapt_prob(pre_fc->tx_probs.p8x8[i][j],
- branch_ct_8x8p[j]);
+ fc->tx_probs.p8x8[i][j] = mode_mv_merge_probs(
+ pre_fc->tx_probs.p8x8[i][j], branch_ct_8x8p[j]);
tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], branch_ct_16x16p);
for (j = 0; j < TX_SIZES - 2; ++j)
- fc->tx_probs.p16x16[i][j] = adapt_prob(pre_fc->tx_probs.p16x16[i][j],
- branch_ct_16x16p[j]);
+ fc->tx_probs.p16x16[i][j] = mode_mv_merge_probs(
+ pre_fc->tx_probs.p16x16[i][j], branch_ct_16x16p[j]);
tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], branch_ct_32x32p);
for (j = 0; j < TX_SIZES - 1; ++j)
- fc->tx_probs.p32x32[i][j] = adapt_prob(pre_fc->tx_probs.p32x32[i][j],
- branch_ct_32x32p[j]);
+ fc->tx_probs.p32x32[i][j] = mode_mv_merge_probs(
+ pre_fc->tx_probs.p32x32[i][j], branch_ct_32x32p[j]);
}
}
for (i = 0; i < SKIP_CONTEXTS; ++i)
- fc->skip_probs[i] = adapt_prob(pre_fc->skip_probs[i], counts->skip[i]);
+ fc->skip_probs[i] = mode_mv_merge_probs(
+ pre_fc->skip_probs[i], counts->skip[i]);
}
static void set_default_lf_deltas(struct loopfilter *lf) {
int i;
vp9_clearall_segfeatures(&cm->seg);
cm->seg.abs_delta = SEGMENT_DELTADATA;
- if (cm->last_frame_seg_map)
+
+ if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
vpx_memset(cm->last_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
+ if (cm->current_frame_seg_map)
+ vpx_memset(cm->current_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
+
// Reset the mode ref deltas for loop filter
vp9_zero(lf->last_ref_deltas);
vp9_zero(lf->last_mode_deltas);
}
// prev_mip will only be allocated in encoder.
- if (frame_is_intra_only(cm) && cm->prev_mip)
+ if (frame_is_intra_only(cm) && cm->prev_mip && !cm->frame_parallel_decode)
vpx_memset(cm->prev_mip, 0, cm->mi_stride * (cm->mi_rows + 1) *
sizeof(*cm->prev_mip));
unsigned int p32x32[TX_SIZE_CONTEXTS][TX_SIZES];
unsigned int p16x16[TX_SIZE_CONTEXTS][TX_SIZES - 1];
unsigned int p8x8[TX_SIZE_CONTEXTS][TX_SIZES - 2];
+ unsigned int tx_totals[TX_SIZES];
};
typedef struct frame_contexts {
#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/common/vp9_entropymv.h"
-#define MV_COUNT_SAT 20
-#define MV_MAX_UPDATE_FACTOR 128
-
// Integer pel reference mv threshold for use of high-precision 1/8 mv
#define COMPANDED_MVREF_THRESH 8
}
}
-static vp9_prob adapt_prob(vp9_prob prep, const unsigned int ct[2]) {
- return merge_probs(prep, ct, MV_COUNT_SAT, MV_MAX_UPDATE_FACTOR);
-}
-
-static void adapt_probs(const vp9_tree_index *tree, const vp9_prob *pre_probs,
- const unsigned int *counts, vp9_prob *probs) {
- vp9_tree_merge_probs(tree, pre_probs, counts, MV_COUNT_SAT,
- MV_MAX_UPDATE_FACTOR, probs);
-}
-
void vp9_adapt_mv_probs(VP9_COMMON *cm, int allow_hp) {
int i, j;
const nmv_context *pre_fc = &cm->frame_contexts[cm->frame_context_idx].nmvc;
const nmv_context_counts *counts = &cm->counts.mv;
- adapt_probs(vp9_mv_joint_tree, pre_fc->joints, counts->joints, fc->joints);
+ vp9_tree_merge_probs(vp9_mv_joint_tree, pre_fc->joints, counts->joints,
+ fc->joints);
for (i = 0; i < 2; ++i) {
nmv_component *comp = &fc->comps[i];
const nmv_component *pre_comp = &pre_fc->comps[i];
const nmv_component_counts *c = &counts->comps[i];
- comp->sign = adapt_prob(pre_comp->sign, c->sign);
- adapt_probs(vp9_mv_class_tree, pre_comp->classes, c->classes,
- comp->classes);
- adapt_probs(vp9_mv_class0_tree, pre_comp->class0, c->class0, comp->class0);
+ comp->sign = mode_mv_merge_probs(pre_comp->sign, c->sign);
+ vp9_tree_merge_probs(vp9_mv_class_tree, pre_comp->classes, c->classes,
+ comp->classes);
+ vp9_tree_merge_probs(vp9_mv_class0_tree, pre_comp->class0, c->class0,
+ comp->class0);
for (j = 0; j < MV_OFFSET_BITS; ++j)
- comp->bits[j] = adapt_prob(pre_comp->bits[j], c->bits[j]);
+ comp->bits[j] = mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
for (j = 0; j < CLASS0_SIZE; ++j)
- adapt_probs(vp9_mv_fp_tree, pre_comp->class0_fp[j], c->class0_fp[j],
- comp->class0_fp[j]);
+ vp9_tree_merge_probs(vp9_mv_fp_tree, pre_comp->class0_fp[j],
+ c->class0_fp[j], comp->class0_fp[j]);
- adapt_probs(vp9_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
+ vp9_tree_merge_probs(vp9_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
if (allow_hp) {
- comp->class0_hp = adapt_prob(pre_comp->class0_hp, c->class0_hp);
- comp->hp = adapt_prob(pre_comp->hp, c->hp);
+ comp->class0_hp = mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
+ comp->hp = mode_mv_merge_probs(pre_comp->hp, c->hp);
}
}
}
} TX_TYPE;
typedef enum {
- UNKNOWN = 0,
- BT_601 = 1, // YUV
- BT_709 = 2, // YUV
- SMPTE_170 = 3, // YUV
- SMPTE_240 = 4, // YUV
- BT_2020 = 5, // YUV
- RESERVED_2 = 6,
- SRGB = 7 // RGB
-} COLOR_SPACE;
-
-typedef enum {
VP9_LAST_FLAG = 1 << 0,
VP9_GOLD_FLAG = 1 << 1,
VP9_ALT_FLAG = 1 << 2,
// stage 1
temp1 = (input[0] + input[2]) * cospi_16_64;
temp2 = (input[0] - input[2]) * cospi_16_64;
- step[0] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step[1] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step[0] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step[1] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64;
temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64;
- step[2] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step[3] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step[2] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step[3] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
// stage 2
output[0] = WRAPLOW(step[0] + step[3], bd);
int dest_stride, int bd) {
int i;
tran_high_t a1;
- tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
+ tran_low_t out = WRAPLOW(
+ highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
- out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
+ out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd);
a1 = ROUND_POWER_OF_TWO(out, 4);
for (i = 0; i < 4; i++) {
step1[3] = input[6];
temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64;
temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64;
- step1[4] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[7] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[4] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[7] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64;
temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64;
- step1[5] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[6] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
// stage 2 & stage 3 - even half
vp9_highbd_idct4(step1, step1, bd);
step1[4] = step2[4];
temp1 = (step2[6] - step2[5]) * cospi_16_64;
temp2 = (step2[5] + step2[6]) * cospi_16_64;
- step1[5] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[6] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step1[7] = step2[7];
// stage 4
int stride, int bd) {
int i, j;
tran_high_t a1;
- tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
+ tran_low_t out = WRAPLOW(
+ highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
- out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
+ out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd);
a1 = ROUND_POWER_OF_TWO(out, 5);
for (j = 0; j < 8; ++j) {
for (i = 0; i < 8; ++i)
// The overall dynamic range is 14b (input) + 14b (multiplication scaling)
// + 1b (addition) = 29b.
// Hence the output bit depth is 15b.
- output[0] = WRAPLOW(dct_const_round_shift(s0 + s3), bd);
- output[1] = WRAPLOW(dct_const_round_shift(s1 + s3), bd);
- output[2] = WRAPLOW(dct_const_round_shift(s2), bd);
- output[3] = WRAPLOW(dct_const_round_shift(s0 + s1 - s3), bd);
+ output[0] = WRAPLOW(highbd_dct_const_round_shift(s0 + s3, bd), bd);
+ output[1] = WRAPLOW(highbd_dct_const_round_shift(s1 + s3, bd), bd);
+ output[2] = WRAPLOW(highbd_dct_const_round_shift(s2, bd), bd);
+ output[3] = WRAPLOW(highbd_dct_const_round_shift(s0 + s1 - s3, bd), bd);
}
void vp9_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
- x0 = WRAPLOW(dct_const_round_shift(s0 + s4), bd);
- x1 = WRAPLOW(dct_const_round_shift(s1 + s5), bd);
- x2 = WRAPLOW(dct_const_round_shift(s2 + s6), bd);
- x3 = WRAPLOW(dct_const_round_shift(s3 + s7), bd);
- x4 = WRAPLOW(dct_const_round_shift(s0 - s4), bd);
- x5 = WRAPLOW(dct_const_round_shift(s1 - s5), bd);
- x6 = WRAPLOW(dct_const_round_shift(s2 - s6), bd);
- x7 = WRAPLOW(dct_const_round_shift(s3 - s7), bd);
+ x0 = WRAPLOW(highbd_dct_const_round_shift(s0 + s4, bd), bd);
+ x1 = WRAPLOW(highbd_dct_const_round_shift(s1 + s5, bd), bd);
+ x2 = WRAPLOW(highbd_dct_const_round_shift(s2 + s6, bd), bd);
+ x3 = WRAPLOW(highbd_dct_const_round_shift(s3 + s7, bd), bd);
+ x4 = WRAPLOW(highbd_dct_const_round_shift(s0 - s4, bd), bd);
+ x5 = WRAPLOW(highbd_dct_const_round_shift(s1 - s5, bd), bd);
+ x6 = WRAPLOW(highbd_dct_const_round_shift(s2 - s6, bd), bd);
+ x7 = WRAPLOW(highbd_dct_const_round_shift(s3 - s7, bd), bd);
// stage 2
s0 = x0;
x1 = WRAPLOW(s1 + s3, bd);
x2 = WRAPLOW(s0 - s2, bd);
x3 = WRAPLOW(s1 - s3, bd);
- x4 = WRAPLOW(dct_const_round_shift(s4 + s6), bd);
- x5 = WRAPLOW(dct_const_round_shift(s5 + s7), bd);
- x6 = WRAPLOW(dct_const_round_shift(s4 - s6), bd);
- x7 = WRAPLOW(dct_const_round_shift(s5 - s7), bd);
+ x4 = WRAPLOW(highbd_dct_const_round_shift(s4 + s6, bd), bd);
+ x5 = WRAPLOW(highbd_dct_const_round_shift(s5 + s7, bd), bd);
+ x6 = WRAPLOW(highbd_dct_const_round_shift(s4 - s6, bd), bd);
+ x7 = WRAPLOW(highbd_dct_const_round_shift(s5 - s7, bd), bd);
// stage 3
s2 = cospi_16_64 * (x2 + x3);
s6 = cospi_16_64 * (x6 + x7);
s7 = cospi_16_64 * (x6 - x7);
- x2 = WRAPLOW(dct_const_round_shift(s2), bd);
- x3 = WRAPLOW(dct_const_round_shift(s3), bd);
- x6 = WRAPLOW(dct_const_round_shift(s6), bd);
- x7 = WRAPLOW(dct_const_round_shift(s7), bd);
+ x2 = WRAPLOW(highbd_dct_const_round_shift(s2, bd), bd);
+ x3 = WRAPLOW(highbd_dct_const_round_shift(s3, bd), bd);
+ x6 = WRAPLOW(highbd_dct_const_round_shift(s6, bd), bd);
+ x7 = WRAPLOW(highbd_dct_const_round_shift(s7, bd), bd);
output[0] = WRAPLOW(x0, bd);
output[1] = WRAPLOW(-x4, bd);
temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
- step2[8] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[15] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[8] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[15] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
- step2[9] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[14] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[9] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[14] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
- step2[10] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[13] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
- step2[11] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[12] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[11] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[12] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
// stage 3
step1[0] = step2[0];
temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
- step1[4] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[7] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[4] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[7] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
- step1[5] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[6] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step1[8] = WRAPLOW(step2[8] + step2[9], bd);
step1[9] = WRAPLOW(step2[8] - step2[9], bd);
// stage 4
temp1 = (step1[0] + step1[1]) * cospi_16_64;
temp2 = (step1[0] - step1[1]) * cospi_16_64;
- step2[0] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[1] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[0] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[1] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
- step2[2] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[3] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[2] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[3] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step2[4] = WRAPLOW(step1[4] + step1[5], bd);
step2[5] = WRAPLOW(step1[4] - step1[5], bd);
step2[6] = WRAPLOW(-step1[6] + step1[7], bd);
step2[15] = step1[15];
temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
- step2[9] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[14] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[9] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[14] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
- step2[10] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[13] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step2[11] = step1[11];
step2[12] = step1[12];
step1[4] = step2[4];
temp1 = (step2[6] - step2[5]) * cospi_16_64;
temp2 = (step2[5] + step2[6]) * cospi_16_64;
- step1[5] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[6] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step1[7] = step2[7];
step1[8] = WRAPLOW(step2[8] + step2[11], bd);
step2[9] = step1[9];
temp1 = (-step1[10] + step1[13]) * cospi_16_64;
temp2 = (step1[10] + step1[13]) * cospi_16_64;
- step2[10] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[13] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = (-step1[11] + step1[12]) * cospi_16_64;
temp2 = (step1[11] + step1[12]) * cospi_16_64;
- step2[11] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[12] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[11] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[12] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step2[14] = step1[14];
step2[15] = step1[15];
s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
- x0 = WRAPLOW(dct_const_round_shift(s0 + s8), bd);
- x1 = WRAPLOW(dct_const_round_shift(s1 + s9), bd);
- x2 = WRAPLOW(dct_const_round_shift(s2 + s10), bd);
- x3 = WRAPLOW(dct_const_round_shift(s3 + s11), bd);
- x4 = WRAPLOW(dct_const_round_shift(s4 + s12), bd);
- x5 = WRAPLOW(dct_const_round_shift(s5 + s13), bd);
- x6 = WRAPLOW(dct_const_round_shift(s6 + s14), bd);
- x7 = WRAPLOW(dct_const_round_shift(s7 + s15), bd);
- x8 = WRAPLOW(dct_const_round_shift(s0 - s8), bd);
- x9 = WRAPLOW(dct_const_round_shift(s1 - s9), bd);
- x10 = WRAPLOW(dct_const_round_shift(s2 - s10), bd);
- x11 = WRAPLOW(dct_const_round_shift(s3 - s11), bd);
- x12 = WRAPLOW(dct_const_round_shift(s4 - s12), bd);
- x13 = WRAPLOW(dct_const_round_shift(s5 - s13), bd);
- x14 = WRAPLOW(dct_const_round_shift(s6 - s14), bd);
- x15 = WRAPLOW(dct_const_round_shift(s7 - s15), bd);
+ x0 = WRAPLOW(highbd_dct_const_round_shift(s0 + s8, bd), bd);
+ x1 = WRAPLOW(highbd_dct_const_round_shift(s1 + s9, bd), bd);
+ x2 = WRAPLOW(highbd_dct_const_round_shift(s2 + s10, bd), bd);
+ x3 = WRAPLOW(highbd_dct_const_round_shift(s3 + s11, bd), bd);
+ x4 = WRAPLOW(highbd_dct_const_round_shift(s4 + s12, bd), bd);
+ x5 = WRAPLOW(highbd_dct_const_round_shift(s5 + s13, bd), bd);
+ x6 = WRAPLOW(highbd_dct_const_round_shift(s6 + s14, bd), bd);
+ x7 = WRAPLOW(highbd_dct_const_round_shift(s7 + s15, bd), bd);
+ x8 = WRAPLOW(highbd_dct_const_round_shift(s0 - s8, bd), bd);
+ x9 = WRAPLOW(highbd_dct_const_round_shift(s1 - s9, bd), bd);
+ x10 = WRAPLOW(highbd_dct_const_round_shift(s2 - s10, bd), bd);
+ x11 = WRAPLOW(highbd_dct_const_round_shift(s3 - s11, bd), bd);
+ x12 = WRAPLOW(highbd_dct_const_round_shift(s4 - s12, bd), bd);
+ x13 = WRAPLOW(highbd_dct_const_round_shift(s5 - s13, bd), bd);
+ x14 = WRAPLOW(highbd_dct_const_round_shift(s6 - s14, bd), bd);
+ x15 = WRAPLOW(highbd_dct_const_round_shift(s7 - s15, bd), bd);
// stage 2
s0 = x0;
x5 = WRAPLOW(s1 - s5, bd);
x6 = WRAPLOW(s2 - s6, bd);
x7 = WRAPLOW(s3 - s7, bd);
- x8 = WRAPLOW(dct_const_round_shift(s8 + s12), bd);
- x9 = WRAPLOW(dct_const_round_shift(s9 + s13), bd);
- x10 = WRAPLOW(dct_const_round_shift(s10 + s14), bd);
- x11 = WRAPLOW(dct_const_round_shift(s11 + s15), bd);
- x12 = WRAPLOW(dct_const_round_shift(s8 - s12), bd);
- x13 = WRAPLOW(dct_const_round_shift(s9 - s13), bd);
- x14 = WRAPLOW(dct_const_round_shift(s10 - s14), bd);
- x15 = WRAPLOW(dct_const_round_shift(s11 - s15), bd);
+ x8 = WRAPLOW(highbd_dct_const_round_shift(s8 + s12, bd), bd);
+ x9 = WRAPLOW(highbd_dct_const_round_shift(s9 + s13, bd), bd);
+ x10 = WRAPLOW(highbd_dct_const_round_shift(s10 + s14, bd), bd);
+ x11 = WRAPLOW(highbd_dct_const_round_shift(s11 + s15, bd), bd);
+ x12 = WRAPLOW(highbd_dct_const_round_shift(s8 - s12, bd), bd);
+ x13 = WRAPLOW(highbd_dct_const_round_shift(s9 - s13, bd), bd);
+ x14 = WRAPLOW(highbd_dct_const_round_shift(s10 - s14, bd), bd);
+ x15 = WRAPLOW(highbd_dct_const_round_shift(s11 - s15, bd), bd);
// stage 3
s0 = x0;
x1 = WRAPLOW(s1 + s3, bd);
x2 = WRAPLOW(s0 - s2, bd);
x3 = WRAPLOW(s1 - s3, bd);
- x4 = WRAPLOW(dct_const_round_shift(s4 + s6), bd);
- x5 = WRAPLOW(dct_const_round_shift(s5 + s7), bd);
- x6 = WRAPLOW(dct_const_round_shift(s4 - s6), bd);
- x7 = WRAPLOW(dct_const_round_shift(s5 - s7), bd);
+ x4 = WRAPLOW(highbd_dct_const_round_shift(s4 + s6, bd), bd);
+ x5 = WRAPLOW(highbd_dct_const_round_shift(s5 + s7, bd), bd);
+ x6 = WRAPLOW(highbd_dct_const_round_shift(s4 - s6, bd), bd);
+ x7 = WRAPLOW(highbd_dct_const_round_shift(s5 - s7, bd), bd);
x8 = WRAPLOW(s8 + s10, bd);
x9 = WRAPLOW(s9 + s11, bd);
x10 = WRAPLOW(s8 - s10, bd);
x11 = WRAPLOW(s9 - s11, bd);
- x12 = WRAPLOW(dct_const_round_shift(s12 + s14), bd);
- x13 = WRAPLOW(dct_const_round_shift(s13 + s15), bd);
- x14 = WRAPLOW(dct_const_round_shift(s12 - s14), bd);
- x15 = WRAPLOW(dct_const_round_shift(s13 - s15), bd);
+ x12 = WRAPLOW(highbd_dct_const_round_shift(s12 + s14, bd), bd);
+ x13 = WRAPLOW(highbd_dct_const_round_shift(s13 + s15, bd), bd);
+ x14 = WRAPLOW(highbd_dct_const_round_shift(s12 - s14, bd), bd);
+ x15 = WRAPLOW(highbd_dct_const_round_shift(s13 - s15, bd), bd);
// stage 4
s2 = (- cospi_16_64) * (x2 + x3);
s14 = (- cospi_16_64) * (x14 + x15);
s15 = cospi_16_64 * (x14 - x15);
- x2 = WRAPLOW(dct_const_round_shift(s2), bd);
- x3 = WRAPLOW(dct_const_round_shift(s3), bd);
- x6 = WRAPLOW(dct_const_round_shift(s6), bd);
- x7 = WRAPLOW(dct_const_round_shift(s7), bd);
- x10 = WRAPLOW(dct_const_round_shift(s10), bd);
- x11 = WRAPLOW(dct_const_round_shift(s11), bd);
- x14 = WRAPLOW(dct_const_round_shift(s14), bd);
- x15 = WRAPLOW(dct_const_round_shift(s15), bd);
+ x2 = WRAPLOW(highbd_dct_const_round_shift(s2, bd), bd);
+ x3 = WRAPLOW(highbd_dct_const_round_shift(s3, bd), bd);
+ x6 = WRAPLOW(highbd_dct_const_round_shift(s6, bd), bd);
+ x7 = WRAPLOW(highbd_dct_const_round_shift(s7, bd), bd);
+ x10 = WRAPLOW(highbd_dct_const_round_shift(s10, bd), bd);
+ x11 = WRAPLOW(highbd_dct_const_round_shift(s11, bd), bd);
+ x14 = WRAPLOW(highbd_dct_const_round_shift(s14, bd), bd);
+ x15 = WRAPLOW(highbd_dct_const_round_shift(s15, bd), bd);
output[0] = WRAPLOW(x0, bd);
output[1] = WRAPLOW(-x8, bd);
int stride, int bd) {
int i, j;
tran_high_t a1;
- tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
+ tran_low_t out = WRAPLOW(
+ highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
- out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
+ out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd);
a1 = ROUND_POWER_OF_TWO(out, 6);
for (j = 0; j < 16; ++j) {
for (i = 0; i < 16; ++i)
temp1 = input[1] * cospi_31_64 - input[31] * cospi_1_64;
temp2 = input[1] * cospi_1_64 + input[31] * cospi_31_64;
- step1[16] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[31] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[16] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[31] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = input[17] * cospi_15_64 - input[15] * cospi_17_64;
temp2 = input[17] * cospi_17_64 + input[15] * cospi_15_64;
- step1[17] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[30] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[17] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[30] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = input[9] * cospi_23_64 - input[23] * cospi_9_64;
temp2 = input[9] * cospi_9_64 + input[23] * cospi_23_64;
- step1[18] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[29] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[18] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[29] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = input[25] * cospi_7_64 - input[7] * cospi_25_64;
temp2 = input[25] * cospi_25_64 + input[7] * cospi_7_64;
- step1[19] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[28] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[19] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[28] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = input[5] * cospi_27_64 - input[27] * cospi_5_64;
temp2 = input[5] * cospi_5_64 + input[27] * cospi_27_64;
- step1[20] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[27] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[20] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[27] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = input[21] * cospi_11_64 - input[11] * cospi_21_64;
temp2 = input[21] * cospi_21_64 + input[11] * cospi_11_64;
- step1[21] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[26] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[21] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[26] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = input[13] * cospi_19_64 - input[19] * cospi_13_64;
temp2 = input[13] * cospi_13_64 + input[19] * cospi_19_64;
- step1[22] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[25] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[22] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[25] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = input[29] * cospi_3_64 - input[3] * cospi_29_64;
temp2 = input[29] * cospi_29_64 + input[3] * cospi_3_64;
- step1[23] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[24] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[23] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[24] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
// stage 2
step2[0] = step1[0];
temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
- step2[8] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[15] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[8] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[15] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
- step2[9] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[14] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[9] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[14] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
- step2[10] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[13] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
- step2[11] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[12] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[11] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[12] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step2[16] = WRAPLOW(step1[16] + step1[17], bd);
step2[17] = WRAPLOW(step1[16] - step1[17], bd);
temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
- step1[4] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[7] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[4] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[7] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
- step1[5] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[6] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step1[8] = WRAPLOW(step2[8] + step2[9], bd);
step1[9] = WRAPLOW(step2[8] - step2[9], bd);
step1[31] = step2[31];
temp1 = -step2[17] * cospi_4_64 + step2[30] * cospi_28_64;
temp2 = step2[17] * cospi_28_64 + step2[30] * cospi_4_64;
- step1[17] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[30] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[17] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[30] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = -step2[18] * cospi_28_64 - step2[29] * cospi_4_64;
temp2 = -step2[18] * cospi_4_64 + step2[29] * cospi_28_64;
- step1[18] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[29] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[18] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[29] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step1[19] = step2[19];
step1[20] = step2[20];
temp1 = -step2[21] * cospi_20_64 + step2[26] * cospi_12_64;
temp2 = step2[21] * cospi_12_64 + step2[26] * cospi_20_64;
- step1[21] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[26] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[21] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[26] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = -step2[22] * cospi_12_64 - step2[25] * cospi_20_64;
temp2 = -step2[22] * cospi_20_64 + step2[25] * cospi_12_64;
- step1[22] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[25] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[22] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[25] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step1[23] = step2[23];
step1[24] = step2[24];
step1[27] = step2[27];
// stage 4
temp1 = (step1[0] + step1[1]) * cospi_16_64;
temp2 = (step1[0] - step1[1]) * cospi_16_64;
- step2[0] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[1] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[0] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[1] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
- step2[2] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[3] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[2] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[3] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step2[4] = WRAPLOW(step1[4] + step1[5], bd);
step2[5] = WRAPLOW(step1[4] - step1[5], bd);
step2[6] = WRAPLOW(-step1[6] + step1[7], bd);
step2[15] = step1[15];
temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
- step2[9] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[14] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[9] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[14] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
- step2[10] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[13] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step2[11] = step1[11];
step2[12] = step1[12];
step1[4] = step2[4];
temp1 = (step2[6] - step2[5]) * cospi_16_64;
temp2 = (step2[5] + step2[6]) * cospi_16_64;
- step1[5] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[6] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[5] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step1[7] = step2[7];
step1[8] = WRAPLOW(step2[8] + step2[11], bd);
step1[17] = step2[17];
temp1 = -step2[18] * cospi_8_64 + step2[29] * cospi_24_64;
temp2 = step2[18] * cospi_24_64 + step2[29] * cospi_8_64;
- step1[18] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[29] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[18] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[29] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = -step2[19] * cospi_8_64 + step2[28] * cospi_24_64;
temp2 = step2[19] * cospi_24_64 + step2[28] * cospi_8_64;
- step1[19] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[28] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[19] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[28] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = -step2[20] * cospi_24_64 - step2[27] * cospi_8_64;
temp2 = -step2[20] * cospi_8_64 + step2[27] * cospi_24_64;
- step1[20] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[27] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[20] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[27] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = -step2[21] * cospi_24_64 - step2[26] * cospi_8_64;
temp2 = -step2[21] * cospi_8_64 + step2[26] * cospi_24_64;
- step1[21] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[26] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[21] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[26] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step1[22] = step2[22];
step1[23] = step2[23];
step1[24] = step2[24];
step2[9] = step1[9];
temp1 = (-step1[10] + step1[13]) * cospi_16_64;
temp2 = (step1[10] + step1[13]) * cospi_16_64;
- step2[10] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[13] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[10] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[13] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = (-step1[11] + step1[12]) * cospi_16_64;
temp2 = (step1[11] + step1[12]) * cospi_16_64;
- step2[11] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step2[12] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step2[11] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step2[12] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step2[14] = step1[14];
step2[15] = step1[15];
step1[19] = step2[19];
temp1 = (-step2[20] + step2[27]) * cospi_16_64;
temp2 = (step2[20] + step2[27]) * cospi_16_64;
- step1[20] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[27] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[20] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[27] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = (-step2[21] + step2[26]) * cospi_16_64;
temp2 = (step2[21] + step2[26]) * cospi_16_64;
- step1[21] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[26] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[21] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[26] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = (-step2[22] + step2[25]) * cospi_16_64;
temp2 = (step2[22] + step2[25]) * cospi_16_64;
- step1[22] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[25] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[22] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[25] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
temp1 = (-step2[23] + step2[24]) * cospi_16_64;
temp2 = (step2[23] + step2[24]) * cospi_16_64;
- step1[23] = WRAPLOW(dct_const_round_shift(temp1), bd);
- step1[24] = WRAPLOW(dct_const_round_shift(temp2), bd);
+ step1[23] = WRAPLOW(highbd_dct_const_round_shift(temp1, bd), bd);
+ step1[24] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
step1[28] = step2[28];
step1[29] = step2[29];
step1[30] = step2[30];
int a1;
uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
- tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
- out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
+ tran_low_t out = WRAPLOW(
+ highbd_dct_const_round_shift(input[0] * cospi_16_64, bd), bd);
+ out = WRAPLOW(highbd_dct_const_round_shift(out * cospi_16_64, bd), bd);
a1 = ROUND_POWER_OF_TWO(out, 6);
for (j = 0; j < 32; ++j) {
static const tran_high_t sinpi_4_9 = 15212;
static INLINE tran_low_t check_range(tran_high_t input) {
-#if CONFIG_VP9_HIGHBITDEPTH
- // For valid highbitdepth VP9 streams, intermediate stage coefficients will
- // stay within the ranges:
- // - 8 bit: signed 16 bit integer
- // - 10 bit: signed 18 bit integer
- // - 12 bit: signed 20 bit integer
-#elif CONFIG_COEFFICIENT_RANGE_CHECKING
+#if CONFIG_COEFFICIENT_RANGE_CHECKING
// For valid VP9 input streams, intermediate stage coefficients should always
// stay within the range of a signed 16 bit integer. Coefficients can go out
// of this range for invalid/corrupt VP9 streams. However, strictly checking
// --enable-coefficient-range-checking.
assert(INT16_MIN <= input);
assert(input <= INT16_MAX);
-#endif
+#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
return (tran_low_t)input;
}
return check_range(rv);
}
+#if CONFIG_VP9_HIGHBITDEPTH
+static INLINE tran_low_t highbd_check_range(tran_high_t input,
+ int bd) {
+#if CONFIG_COEFFICIENT_RANGE_CHECKING
+ // For valid highbitdepth VP9 streams, intermediate stage coefficients will
+ // stay within the ranges:
+ // - 8 bit: signed 16 bit integer
+ // - 10 bit: signed 18 bit integer
+ // - 12 bit: signed 20 bit integer
+ const int32_t int_max = (1 << (7 + bd)) - 1;
+ const int32_t int_min = -int_max - 1;
+ assert(int_min <= input);
+ assert(input <= int_max);
+ (void) int_min;
+#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
+ (void) bd;
+ return (tran_low_t)input;
+}
+
+static INLINE tran_low_t highbd_dct_const_round_shift(tran_high_t input,
+ int bd) {
+ tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
+ return highbd_check_range(rv, bd);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
typedef void (*transform_1d)(const tran_low_t*, tran_low_t*);
typedef struct {
}
#endif // CONFIG_VP9_HIGHBITDEPTH
-static void filter_block_plane_non420(VP9_COMMON *cm,
- struct macroblockd_plane *plane,
- MODE_INFO *mi_8x8,
- int mi_row, int mi_col) {
+void vp9_filter_block_plane_non420(VP9_COMMON *cm,
+ struct macroblockd_plane *plane,
+ MODE_INFO *mi_8x8,
+ int mi_row, int mi_col) {
const int ss_x = plane->subsampling_x;
const int ss_y = plane->subsampling_y;
const int row_step = 1 << ss_y;
if (use_420)
vp9_filter_block_plane(cm, &planes[plane], mi_row, &lfm);
else
- filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
- mi_row, mi_col);
+ vp9_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
+ mi_row, mi_col);
}
}
}
int mi_row,
LOOP_FILTER_MASK *lfm);
+void vp9_filter_block_plane_non420(struct VP9Common *cm,
+ struct macroblockd_plane *plane,
+ MODE_INFO *mi_8x8,
+ int mi_row, int mi_col);
+
void vp9_loop_filter_init(struct VP9Common *cm);
// Update the loop filter for the current frame.
--- /dev/null
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "./vpx_config.h"
+#include "vpx_mem/vpx_mem.h"
+#include "vp9/common/vp9_loopfilter_thread.h"
+#include "vp9/common/vp9_reconinter.h"
+
+#if CONFIG_MULTITHREAD
+static INLINE void mutex_lock(pthread_mutex_t *const mutex) {
+ const int kMaxTryLocks = 4000;
+ int locked = 0;
+ int i;
+
+ for (i = 0; i < kMaxTryLocks; ++i) {
+ if (!pthread_mutex_trylock(mutex)) {
+ locked = 1;
+ break;
+ }
+ }
+
+ if (!locked)
+ pthread_mutex_lock(mutex);
+}
+#endif // CONFIG_MULTITHREAD
+
+static INLINE void sync_read(VP9LfSync *const lf_sync, int r, int c) {
+#if CONFIG_MULTITHREAD
+ const int nsync = lf_sync->sync_range;
+
+ if (r && !(c & (nsync - 1))) {
+ pthread_mutex_t *const mutex = &lf_sync->mutex_[r - 1];
+ mutex_lock(mutex);
+
+ while (c > lf_sync->cur_sb_col[r - 1] - nsync) {
+ pthread_cond_wait(&lf_sync->cond_[r - 1], mutex);
+ }
+ pthread_mutex_unlock(mutex);
+ }
+#else
+ (void)lf_sync;
+ (void)r;
+ (void)c;
+#endif // CONFIG_MULTITHREAD
+}
+
+static INLINE void sync_write(VP9LfSync *const lf_sync, int r, int c,
+ const int sb_cols) {
+#if CONFIG_MULTITHREAD
+ const int nsync = lf_sync->sync_range;
+ int cur;
+ // Only signal when there are enough filtered SB for next row to run.
+ int sig = 1;
+
+ if (c < sb_cols - 1) {
+ cur = c;
+ if (c % nsync)
+ sig = 0;
+ } else {
+ cur = sb_cols + nsync;
+ }
+
+ if (sig) {
+ mutex_lock(&lf_sync->mutex_[r]);
+
+ lf_sync->cur_sb_col[r] = cur;
+
+ pthread_cond_signal(&lf_sync->cond_[r]);
+ pthread_mutex_unlock(&lf_sync->mutex_[r]);
+ }
+#else
+ (void)lf_sync;
+ (void)r;
+ (void)c;
+ (void)sb_cols;
+#endif // CONFIG_MULTITHREAD
+}
+
+// Implement row loopfiltering for each thread.
+static INLINE
+void thread_loop_filter_rows(const YV12_BUFFER_CONFIG *const frame_buffer,
+ VP9_COMMON *const cm,
+ struct macroblockd_plane planes[MAX_MB_PLANE],
+ int start, int stop, int y_only,
+ VP9LfSync *const lf_sync) {
+ const int num_planes = y_only ? 1 : MAX_MB_PLANE;
+ const int use_420 = y_only || (planes[1].subsampling_y == 1 &&
+ planes[1].subsampling_x == 1);
+ const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2;
+ int mi_row, mi_col;
+
+ for (mi_row = start; mi_row < stop;
+ mi_row += lf_sync->num_workers * MI_BLOCK_SIZE) {
+ MODE_INFO *const mi = cm->mi + mi_row * cm->mi_stride;
+
+ for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
+ const int r = mi_row >> MI_BLOCK_SIZE_LOG2;
+ const int c = mi_col >> MI_BLOCK_SIZE_LOG2;
+ LOOP_FILTER_MASK lfm;
+ int plane;
+
+ sync_read(lf_sync, r, c);
+
+ vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+
+ // TODO(JBB): Make setup_mask work for non 420.
+ if (use_420)
+ vp9_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride,
+ &lfm);
+
+ for (plane = 0; plane < num_planes; ++plane) {
+ if (use_420)
+ vp9_filter_block_plane(cm, &planes[plane], mi_row, &lfm);
+ else
+ vp9_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
+ mi_row, mi_col);
+ }
+
+ sync_write(lf_sync, r, c, sb_cols);
+ }
+ }
+}
+
+// Row-based multi-threaded loopfilter hook
+static int loop_filter_row_worker(VP9LfSync *const lf_sync,
+ LFWorkerData *const lf_data) {
+ thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
+ lf_data->start, lf_data->stop, lf_data->y_only,
+ lf_sync);
+ return 1;
+}
+
+static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame,
+ VP9_COMMON *cm,
+ struct macroblockd_plane planes[MAX_MB_PLANE],
+ int start, int stop, int y_only,
+ VP9Worker *workers, int nworkers,
+ VP9LfSync *lf_sync) {
+ const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
+ // Number of superblock rows and cols
+ const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
+ // Decoder may allocate more threads than number of tiles based on user's
+ // input.
+ const int tile_cols = 1 << cm->log2_tile_cols;
+ const int num_workers = MIN(nworkers, tile_cols);
+ int i;
+
+ if (!lf_sync->sync_range || cm->last_height != cm->height ||
+ num_workers > lf_sync->num_workers) {
+ vp9_loop_filter_dealloc(lf_sync);
+ vp9_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
+ }
+
+ // Initialize cur_sb_col to -1 for all SB rows.
+ vpx_memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows);
+
+ // Set up loopfilter thread data.
+ // The decoder is capping num_workers because it has been observed that using
+ // more threads on the loopfilter than there are cores will hurt performance
+ // on Android. This is because the system will only schedule the tile decode
+ // workers on cores equal to the number of tile columns. Then if the decoder
+ // tries to use more threads for the loopfilter, it will hurt performance
+ // because of contention. If the multithreading code changes in the future
+ // then the number of workers used by the loopfilter should be revisited.
+ for (i = 0; i < num_workers; ++i) {
+ VP9Worker *const worker = &workers[i];
+ LFWorkerData *const lf_data = &lf_sync->lfdata[i];
+
+ worker->hook = (VP9WorkerHook)loop_filter_row_worker;
+ worker->data1 = lf_sync;
+ worker->data2 = lf_data;
+
+ // Loopfilter data
+ vp9_loop_filter_data_reset(lf_data, frame, cm, planes);
+ lf_data->start = start + i * MI_BLOCK_SIZE;
+ lf_data->stop = stop;
+ lf_data->y_only = y_only;
+
+ // Start loopfiltering
+ if (i == num_workers - 1) {
+ winterface->execute(worker);
+ } else {
+ winterface->launch(worker);
+ }
+ }
+
+ // Wait till all rows are finished
+ for (i = 0; i < num_workers; ++i) {
+ winterface->sync(&workers[i]);
+ }
+}
+
+void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame,
+ VP9_COMMON *cm,
+ struct macroblockd_plane planes[MAX_MB_PLANE],
+ int frame_filter_level,
+ int y_only, int partial_frame,
+ VP9Worker *workers, int num_workers,
+ VP9LfSync *lf_sync) {
+ int start_mi_row, end_mi_row, mi_rows_to_filter;
+
+ if (!frame_filter_level) return;
+
+ start_mi_row = 0;
+ mi_rows_to_filter = cm->mi_rows;
+ if (partial_frame && cm->mi_rows > 8) {
+ start_mi_row = cm->mi_rows >> 1;
+ start_mi_row &= 0xfffffff8;
+ mi_rows_to_filter = MAX(cm->mi_rows / 8, 8);
+ }
+ end_mi_row = start_mi_row + mi_rows_to_filter;
+ vp9_loop_filter_frame_init(cm, frame_filter_level);
+
+ loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row,
+ y_only, workers, num_workers, lf_sync);
+}
+
+// Set up nsync by width.
+static INLINE int get_sync_range(int width) {
+ // nsync numbers are picked by testing. For example, for 4k
+ // video, using 4 gives best performance.
+ if (width < 640)
+ return 1;
+ else if (width <= 1280)
+ return 2;
+ else if (width <= 4096)
+ return 4;
+ else
+ return 8;
+}
+
+// Allocate memory for lf row synchronization
+void vp9_loop_filter_alloc(VP9LfSync *lf_sync, VP9_COMMON *cm, int rows,
+ int width, int num_workers) {
+ lf_sync->rows = rows;
+#if CONFIG_MULTITHREAD
+ {
+ int i;
+
+ CHECK_MEM_ERROR(cm, lf_sync->mutex_,
+ vpx_malloc(sizeof(*lf_sync->mutex_) * rows));
+ if (lf_sync->mutex_) {
+ for (i = 0; i < rows; ++i) {
+ pthread_mutex_init(&lf_sync->mutex_[i], NULL);
+ }
+ }
+
+ CHECK_MEM_ERROR(cm, lf_sync->cond_,
+ vpx_malloc(sizeof(*lf_sync->cond_) * rows));
+ if (lf_sync->cond_) {
+ for (i = 0; i < rows; ++i) {
+ pthread_cond_init(&lf_sync->cond_[i], NULL);
+ }
+ }
+ }
+#endif // CONFIG_MULTITHREAD
+
+ CHECK_MEM_ERROR(cm, lf_sync->lfdata,
+ vpx_malloc(num_workers * sizeof(*lf_sync->lfdata)));
+ lf_sync->num_workers = num_workers;
+
+ CHECK_MEM_ERROR(cm, lf_sync->cur_sb_col,
+ vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows));
+
+ // Set up nsync.
+ lf_sync->sync_range = get_sync_range(width);
+}
+
+// Deallocate lf synchronization related mutex and data
+void vp9_loop_filter_dealloc(VP9LfSync *lf_sync) {
+ if (lf_sync != NULL) {
+#if CONFIG_MULTITHREAD
+ int i;
+
+ if (lf_sync->mutex_ != NULL) {
+ for (i = 0; i < lf_sync->rows; ++i) {
+ pthread_mutex_destroy(&lf_sync->mutex_[i]);
+ }
+ vpx_free(lf_sync->mutex_);
+ }
+ if (lf_sync->cond_ != NULL) {
+ for (i = 0; i < lf_sync->rows; ++i) {
+ pthread_cond_destroy(&lf_sync->cond_[i]);
+ }
+ vpx_free(lf_sync->cond_);
+ }
+#endif // CONFIG_MULTITHREAD
+ vpx_free(lf_sync->lfdata);
+ vpx_free(lf_sync->cur_sb_col);
+ // clear the structure as the source of this call may be a resize in which
+ // case this call will be followed by an _alloc() which may fail.
+ vp9_zero(*lf_sync);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef VP9_COMMON_VP9_LOOPFILTER_THREAD_H_
+#define VP9_COMMON_VP9_LOOPFILTER_THREAD_H_
+#include "./vpx_config.h"
+#include "vp9/common/vp9_loopfilter.h"
+#include "vp9/common/vp9_thread.h"
+
+struct VP9Common;
+
+// Loopfilter row synchronization
+typedef struct VP9LfSyncData {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_t *mutex_;
+ pthread_cond_t *cond_;
+#endif
+ // Allocate memory to store the loop-filtered superblock index in each row.
+ int *cur_sb_col;
+ // The optimal sync_range for different resolution and platform should be
+ // determined by testing. Currently, it is chosen to be a power-of-2 number.
+ int sync_range;
+ int rows;
+
+ // Row-based parallel loopfilter data
+ LFWorkerData *lfdata;
+ int num_workers;
+} VP9LfSync;
+
+// Allocate memory for loopfilter row synchronization.
+void vp9_loop_filter_alloc(VP9LfSync *lf_sync, struct VP9Common *cm, int rows,
+ int width, int num_workers);
+
+// Deallocate loopfilter synchronization related mutex and data.
+void vp9_loop_filter_dealloc(VP9LfSync *lf_sync);
+
+// Multi-threaded loopfilter that uses the tile threads.
+void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame,
+ struct VP9Common *cm,
+ struct macroblockd_plane planes[MAX_MB_PLANE],
+ int frame_filter_level,
+ int y_only, int partial_frame,
+ VP9Worker *workers, int num_workers,
+ VP9LfSync *lf_sync);
+
+#endif // VP9_COMMON_VP9_LOOPFILTER_THREAD_H_
}
}
+void vp9_filter_by_weight8x8_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride, int src_weight) {
+ filter_by_weight(src, src_stride, dst, dst_stride, 8, src_weight);
+}
+
+void vp9_filter_by_weight16x16_c(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ int src_weight) {
+ filter_by_weight(src, src_stride, dst, dst_stride, 16, src_weight);
+}
+
static void filter_by_weight32x32(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride, int weight) {
- filter_by_weight(src, src_stride, dst, dst_stride, 16, weight);
- filter_by_weight(src + 16, src_stride, dst + 16, dst_stride, 16, weight);
- filter_by_weight(src + src_stride * 16, src_stride, dst + dst_stride * 16,
- dst_stride, 16, weight);
- filter_by_weight(src + src_stride * 16 + 16, src_stride,
- dst + dst_stride * 16 + 16, dst_stride, 16, weight);
+ vp9_filter_by_weight16x16(src, src_stride, dst, dst_stride, weight);
+ vp9_filter_by_weight16x16(src + 16, src_stride, dst + 16, dst_stride,
+ weight);
+ vp9_filter_by_weight16x16(src + src_stride * 16, src_stride,
+ dst + dst_stride * 16, dst_stride, weight);
+ vp9_filter_by_weight16x16(src + src_stride * 16 + 16, src_stride,
+ dst + dst_stride * 16 + 16, dst_stride, weight);
}
static void filter_by_weight64x64(const uint8_t *src, int src_stride,
int uvd_stride, BLOCK_SIZE block_size,
int weight) {
if (block_size == BLOCK_16X16) {
- filter_by_weight(y, y_stride, yd, yd_stride, 16, weight);
- filter_by_weight(u, uv_stride, ud, uvd_stride, 8, weight);
- filter_by_weight(v, uv_stride, vd, uvd_stride, 8, weight);
+ vp9_filter_by_weight16x16(y, y_stride, yd, yd_stride, weight);
+ vp9_filter_by_weight8x8(u, uv_stride, ud, uvd_stride, weight);
+ vp9_filter_by_weight8x8(v, uv_stride, vd, uvd_stride, weight);
} else if (block_size == BLOCK_32X32) {
filter_by_weight32x32(y, y_stride, yd, yd_stride, weight);
- filter_by_weight(u, uv_stride, ud, uvd_stride, 16, weight);
- filter_by_weight(v, uv_stride, vd, uvd_stride, 16, weight);
+ vp9_filter_by_weight16x16(u, uv_stride, ud, uvd_stride, weight);
+ vp9_filter_by_weight16x16(v, uv_stride, vd, uvd_stride, weight);
} else if (block_size == BLOCK_64X64) {
filter_by_weight64x64(y, y_stride, yd, yd_stride, weight);
filter_by_weight32x32(u, uv_stride, ud, uvd_stride, weight);
}
}
+static void get_thr(BLOCK_SIZE bs, int qdiff, int *sad_thr, int *vdiff_thr) {
+ const int adj = qdiff >> MFQE_PRECISION;
+ if (bs == BLOCK_16X16) {
+ *sad_thr = 7 + adj;
+ } else if (bs == BLOCK_32X32) {
+ *sad_thr = 6 + adj;
+ } else { // BLOCK_64X64
+ *sad_thr = 5 + adj;
+ }
+ *vdiff_thr = 125 + qdiff;
+}
+
static void mfqe_block(BLOCK_SIZE bs, const uint8_t *y, const uint8_t *u,
const uint8_t *v, int y_stride, int uv_stride,
- uint8_t *yd, uint8_t *ud, uint8_t *vd,
- int yd_stride, int uvd_stride) {
- int sad, sad_thr, vdiff;
+ uint8_t *yd, uint8_t *ud, uint8_t *vd, int yd_stride,
+ int uvd_stride, int qdiff) {
+ int sad, sad_thr, vdiff, vdiff_thr;
uint32_t sse;
+ get_thr(bs, qdiff, &sad_thr, &vdiff_thr);
+
if (bs == BLOCK_16X16) {
vdiff = (vp9_variance16x16(y, y_stride, yd, yd_stride, &sse) + 128) >> 8;
sad = (vp9_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8;
sad = (vp9_sad64x64(y, y_stride, yd, yd_stride) + 2048) >> 12;
}
- if (bs == BLOCK_16X16) {
- sad_thr = 8;
- } else if (bs == BLOCK_32X32) {
- sad_thr = 7;
- } else { // BLOCK_64X64
- sad_thr = 6;
- }
-
- // TODO(jackychen): More experiments and remove magic numbers.
// vdiff > sad * 3 means vdiff should not be too small, otherwise,
// it might be a lighting change in smooth area. When there is a
// lighting change in smooth area, it is dangerous to do MFQE.
- if (sad > 1 && sad < sad_thr && vdiff > sad * 3 && vdiff < 150) {
- // TODO(jackychen): Add weighted average in the calculation.
- // Currently, the data is copied from last frame without averaging.
- apply_ifactor(y, y_stride, yd, yd_stride, u, v, uv_stride,
- ud, vd, uvd_stride, bs, 0);
+ if (sad > 1 && vdiff > sad * 3) {
+ const int weight = 1 << MFQE_PRECISION;
+ int ifactor = weight * sad * vdiff / (sad_thr * vdiff_thr);
+ // When ifactor equals weight, no MFQE is done.
+ if (ifactor > weight) {
+ ifactor = weight;
+ }
+ apply_ifactor(y, y_stride, yd, yd_stride, u, v, uv_stride, ud, vd,
+ uvd_stride, bs, ifactor);
} else {
// Copy the block from current frame (i.e., no mfqe is done).
copy_block(y, u, v, y_stride, uv_stride, yd, ud, vd,
int yd_stride, int uvd_stride) {
int mi_offset, y_offset, uv_offset;
const BLOCK_SIZE cur_bs = mi->mbmi.sb_type;
- // TODO(jackychen): Consider how and whether to use qdiff in MFQE.
- // int qdiff = cm->base_qindex - cm->postproc_state.last_base_qindex;
+ const int qdiff = cm->base_qindex - cm->postproc_state.last_base_qindex;
const int bsl = b_width_log2_lookup[bs];
PARTITION_TYPE partition = partition_lookup[bsl][cur_bs];
const BLOCK_SIZE subsize = get_subsize(bs, partition);
if (mfqe_decision(mi, mfqe_bs)) {
// Do mfqe on the first square partition.
mfqe_block(bs_tmp, y, u, v, y_stride, uv_stride,
- yd, ud, vd, yd_stride, uvd_stride);
+ yd, ud, vd, yd_stride, uvd_stride, qdiff);
// Do mfqe on the second square partition.
mfqe_block(bs_tmp, y + y_offset, u + uv_offset, v + uv_offset,
y_stride, uv_stride, yd + y_offset, ud + uv_offset,
- vd + uv_offset, yd_stride, uvd_stride);
+ vd + uv_offset, yd_stride, uvd_stride, qdiff);
}
if (mfqe_decision(mi + mi_offset * cm->mi_stride, mfqe_bs)) {
// Do mfqe on the first square partition.
mfqe_block(bs_tmp, y + y_offset * y_stride, u + uv_offset * uv_stride,
v + uv_offset * uv_stride, y_stride, uv_stride,
yd + y_offset * yd_stride, ud + uv_offset * uvd_stride,
- vd + uv_offset * uvd_stride, yd_stride, uvd_stride);
+ vd + uv_offset * uvd_stride, yd_stride, uvd_stride, qdiff);
// Do mfqe on the second square partition.
mfqe_block(bs_tmp, y + y_offset * y_stride + y_offset,
u + uv_offset * uv_stride + uv_offset,
uv_stride, yd + y_offset * yd_stride + y_offset,
ud + uv_offset * uvd_stride + uv_offset,
vd + uv_offset * uvd_stride + uv_offset,
- yd_stride, uvd_stride);
+ yd_stride, uvd_stride, qdiff);
}
break;
case PARTITION_VERT:
if (mfqe_decision(mi, mfqe_bs)) {
// Do mfqe on the first square partition.
mfqe_block(bs_tmp, y, u, v, y_stride, uv_stride,
- yd, ud, vd, yd_stride, uvd_stride);
+ yd, ud, vd, yd_stride, uvd_stride, qdiff);
// Do mfqe on the second square partition.
mfqe_block(bs_tmp, y + y_offset * y_stride, u + uv_offset * uv_stride,
v + uv_offset * uv_stride, y_stride, uv_stride,
yd + y_offset * yd_stride, ud + uv_offset * uvd_stride,
- vd + uv_offset * uvd_stride, yd_stride, uvd_stride);
+ vd + uv_offset * uvd_stride, yd_stride, uvd_stride, qdiff);
}
if (mfqe_decision(mi + mi_offset, mfqe_bs)) {
// Do mfqe on the first square partition.
mfqe_block(bs_tmp, y + y_offset, u + uv_offset, v + uv_offset,
y_stride, uv_stride, yd + y_offset, ud + uv_offset,
- vd + uv_offset, yd_stride, uvd_stride);
+ vd + uv_offset, yd_stride, uvd_stride, qdiff);
// Do mfqe on the second square partition.
mfqe_block(bs_tmp, y + y_offset * y_stride + y_offset,
u + uv_offset * uv_stride + uv_offset,
uv_stride, yd + y_offset * yd_stride + y_offset,
ud + uv_offset * uvd_stride + uv_offset,
vd + uv_offset * uvd_stride + uv_offset,
- yd_stride, uvd_stride);
+ yd_stride, uvd_stride, qdiff);
}
break;
case PARTITION_NONE:
if (mfqe_decision(mi, cur_bs)) {
// Do mfqe on this partition.
mfqe_block(cur_bs, y, u, v, y_stride, uv_stride,
- yd, ud, vd, yd_stride, uvd_stride);
+ yd, ud, vd, yd_stride, uvd_stride, qdiff);
} else {
// Copy the block from current frame(i.e., no mfqe is done).
copy_block(y, u, v, y_stride, uv_stride, yd, ud, vd,
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list,
- int block, int mi_row, int mi_col) {
+ int block, int mi_row, int mi_col,
+ find_mv_refs_sync sync, void *const data) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
int i, refmv_count = 0;
const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
}
}
+ // Synchronize here for frame parallel decode if sync function is provided.
+ if (sync != NULL) {
+ sync(data, mi_row);
+ }
+
// Check the last frame's mode and mv info.
if (cm->use_prev_frame_mvs) {
if (prev_frame_mvs->ref_frame[0] == ref_frame) {
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list,
- int mi_row, int mi_col) {
+ int mi_row, int mi_col,
+ find_mv_refs_sync sync, void *const data) {
find_mv_refs_idx(cm, xd, tile, mi, ref_frame, mv_ref_list, -1,
- mi_row, mi_col);
+ mi_row, mi_col, sync, data);
}
static void lower_mv_precision(MV *mv, int allow_hp) {
}
void vp9_find_best_ref_mvs(MACROBLOCKD *xd, int allow_hp,
- int_mv *mvlist, int_mv *nearest, int_mv *near) {
+ int_mv *mvlist, int_mv *nearest_mv,
+ int_mv *near_mv) {
int i;
// Make sure all the candidates are properly clamped etc
for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i) {
lower_mv_precision(&mvlist[i].as_mv, allow_hp);
clamp_mv2(&mvlist[i].as_mv, xd);
}
- *nearest = mvlist[0];
- *near = mvlist[1];
+ *nearest_mv = mvlist[0];
+ *near_mv = mvlist[1];
}
void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
const TileInfo *const tile,
int block, int ref, int mi_row, int mi_col,
- int_mv *nearest, int_mv *near) {
+ int_mv *nearest_mv, int_mv *near_mv) {
int_mv mv_list[MAX_MV_REF_CANDIDATES];
MODE_INFO *const mi = xd->mi[0].src_mi;
b_mode_info *bmi = mi->bmi;
assert(MAX_MV_REF_CANDIDATES == 2);
find_mv_refs_idx(cm, xd, tile, mi, mi->mbmi.ref_frame[ref], mv_list, block,
- mi_row, mi_col);
+ mi_row, mi_col, NULL, NULL);
- near->as_int = 0;
+ near_mv->as_int = 0;
switch (block) {
case 0:
- nearest->as_int = mv_list[0].as_int;
- near->as_int = mv_list[1].as_int;
+ nearest_mv->as_int = mv_list[0].as_int;
+ near_mv->as_int = mv_list[1].as_int;
break;
case 1:
case 2:
- nearest->as_int = bmi[0].as_mv[ref].as_int;
+ nearest_mv->as_int = bmi[0].as_mv[ref].as_int;
for (n = 0; n < MAX_MV_REF_CANDIDATES; ++n)
- if (nearest->as_int != mv_list[n].as_int) {
- near->as_int = mv_list[n].as_int;
+ if (nearest_mv->as_int != mv_list[n].as_int) {
+ near_mv->as_int = mv_list[n].as_int;
break;
}
break;
candidates[2] = mv_list[0];
candidates[3] = mv_list[1];
- nearest->as_int = bmi[2].as_mv[ref].as_int;
+ nearest_mv->as_int = bmi[2].as_mv[ref].as_int;
for (n = 0; n < 2 + MAX_MV_REF_CANDIDATES; ++n)
- if (nearest->as_int != candidates[n].as_int) {
- near->as_int = candidates[n].as_int;
+ if (nearest_mv->as_int != candidates[n].as_int) {
+ near_mv->as_int = candidates[n].as_int;
break;
}
break;
xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
}
+typedef void (*find_mv_refs_sync)(void *const data, int mi_row);
void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
- int_mv *mv_ref_list, int mi_row, int mi_col);
+ int_mv *mv_ref_list, int mi_row, int mi_col,
+ find_mv_refs_sync sync, void *const data);
// check a list of motion vectors by sad score using a number rows of pixels
// above and a number cols of pixels in the left to select the one with best
// score to use as ref motion vector
void vp9_find_best_ref_mvs(MACROBLOCKD *xd, int allow_hp,
- int_mv *mvlist, int_mv *nearest, int_mv *near);
+ int_mv *mvlist, int_mv *nearest_mv, int_mv *near_mv);
void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
const TileInfo *const tile,
int block, int ref, int mi_row, int mi_col,
- int_mv *nearest, int_mv *near);
+ int_mv *nearest_mv, int_mv *near_mv);
#ifdef __cplusplus
} // extern "C"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_frame_buffers.h"
#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_thread.h"
#include "vp9/common/vp9_tile_common.h"
#if CONFIG_VP9_POSTPROC
#define REF_FRAMES_LOG2 3
#define REF_FRAMES (1 << REF_FRAMES_LOG2)
-// 1 scratch frame for the new frame, 3 for scaled references on the encoder
+// 4 scratch frames for the new frames to support a maximum of 4 cores decoding
+// in parallel, 3 for scaled references on the encoder.
+// TODO(hkuang): Add ondemand frame buffers instead of hardcoding the number
+// of framebuffers.
// TODO(jkoleszar): These 3 extra references could probably come from the
// normal reference pool.
-#define FRAME_BUFFERS (REF_FRAMES + 4)
+#define FRAME_BUFFERS (REF_FRAMES + 7)
#define FRAME_CONTEXTS_LOG2 2
#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2)
+#define NUM_PING_PONG_BUFFERS 2
+
extern const struct {
PARTITION_CONTEXT above;
PARTITION_CONTEXT left;
int mi_cols;
vpx_codec_frame_buffer_t raw_frame_buffer;
YV12_BUFFER_CONFIG buf;
+
+ // The Following variables will only be used in frame parallel decode.
+
+ // frame_worker_owner indicates which FrameWorker owns this buffer. NULL means
+ // that no FrameWorker owns, or is decoding, this buffer.
+ VP9Worker *frame_worker_owner;
+
+ // row and col indicate which position frame has been decoded to in real
+ // pixel unit. They are reset to -1 when decoding begins and set to INT_MAX
+ // when the frame is fully decoded.
+ int row;
+ int col;
} RefCntBuffer;
+typedef struct {
+ // Protect BufferPool from being accessed by several FrameWorkers at
+ // the same time during frame parallel decode.
+ // TODO(hkuang): Try to use atomic variable instead of locking the whole pool.
+#if CONFIG_MULTITHREAD
+ pthread_mutex_t pool_mutex;
+#endif
+
+ // Private data associated with the frame buffer callbacks.
+ void *cb_priv;
+
+ vpx_get_frame_buffer_cb_fn_t get_fb_cb;
+ vpx_release_frame_buffer_cb_fn_t release_fb_cb;
+
+ RefCntBuffer frame_bufs[FRAME_BUFFERS];
+
+ // Frame buffers allocated internally by the codec.
+ InternalFrameBufferList int_frame_buffers;
+} BufferPool;
+
typedef struct VP9Common {
struct vpx_internal_error_info error;
DECLARE_ALIGNED(16, int16_t, y_dequant[QINDEX_RANGE][8]);
DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]);
- COLOR_SPACE color_space;
+ vpx_color_space_t color_space;
int width;
int height;
#endif
YV12_BUFFER_CONFIG *frame_to_show;
- RefCntBuffer frame_bufs[FRAME_BUFFERS];
RefCntBuffer *prev_frame;
// TODO(hkuang): Combine this with cur_buf in macroblockd.
int ref_frame_map[REF_FRAMES]; /* maps fb_idx to reference slot */
+ // Prepare ref_frame_map for the next frame.
+ // Only used in frame parallel decode.
+ int next_ref_frame_map[REF_FRAMES];
+
// TODO(jkoleszar): could expand active_ref_idx to 4, with 0 as intra, and
// roll new_fb_idx into it.
int use_prev_frame_mvs;
// Persistent mb segment id map used in prediction.
- unsigned char *last_frame_seg_map;
+ int seg_map_idx;
+ int prev_seg_map_idx;
+
+ uint8_t *seg_map_array[NUM_PING_PONG_BUFFERS];
+ uint8_t *last_frame_seg_map;
+ uint8_t *current_frame_seg_map;
INTERP_FILTER interp_filter;
struct loopfilter lf;
struct segmentation seg;
+ // TODO(hkuang): Remove this as it is the same as frame_parallel_decode
+ // in pbi.
+ int frame_parallel_decode; // frame-based threading.
+
// Context probabilities for reference frame prediction
MV_REFERENCE_FRAME comp_fixed_ref;
MV_REFERENCE_FRAME comp_var_ref[2];
// Handles memory for the codec.
InternalFrameBufferList int_frame_buffers;
+ // External BufferPool passed from outside.
+ BufferPool *buffer_pool;
+
PARTITION_CONTEXT *above_seg_context;
ENTROPY_CONTEXT *above_context;
} VP9_COMMON;
+// TODO(hkuang): Don't need to lock the whole pool after implementing atomic
+// frame reference count.
+void lock_buffer_pool(BufferPool *const pool);
+void unlock_buffer_pool(BufferPool *const pool);
+
static INLINE YV12_BUFFER_CONFIG *get_ref_frame(VP9_COMMON *cm, int index) {
if (index < 0 || index >= REF_FRAMES)
return NULL;
if (cm->ref_frame_map[index] < 0)
return NULL;
assert(cm->ref_frame_map[index] < FRAME_BUFFERS);
- return &cm->frame_bufs[cm->ref_frame_map[index]].buf;
+ return &cm->buffer_pool->frame_bufs[cm->ref_frame_map[index]].buf;
}
static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(VP9_COMMON *cm) {
- return &cm->frame_bufs[cm->new_fb_idx].buf;
+ return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
}
static INLINE int get_free_fb(VP9_COMMON *cm) {
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
int i;
- for (i = 0; i < FRAME_BUFFERS; i++)
- if (cm->frame_bufs[i].ref_count == 0)
+
+ lock_buffer_pool(cm->buffer_pool);
+ for (i = 0; i < FRAME_BUFFERS; ++i)
+ if (frame_bufs[i].ref_count == 0)
break;
assert(i < FRAME_BUFFERS);
- cm->frame_bufs[i].ref_count = 1;
+ frame_bufs[i].ref_count = 1;
+ unlock_buffer_pool(cm->buffer_pool);
return i;
}
xd->above_seg_context = cm->above_seg_context;
xd->mi_stride = cm->mi_stride;
+ xd->error_info = &cm->error;
}
static INLINE int frame_is_intra_only(const VP9_COMMON *const cm) {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
-
static unsigned int tree_merge_probs_impl(unsigned int i,
const vp9_tree_index *tree,
const vp9_prob *pre_probs,
const unsigned int *counts,
- unsigned int count_sat,
- unsigned int max_update,
vp9_prob *probs) {
const int l = tree[i];
const unsigned int left_count = (l <= 0)
? counts[-l]
- : tree_merge_probs_impl(l, tree, pre_probs, counts,
- count_sat, max_update, probs);
+ : tree_merge_probs_impl(l, tree, pre_probs, counts, probs);
const int r = tree[i + 1];
const unsigned int right_count = (r <= 0)
? counts[-r]
- : tree_merge_probs_impl(r, tree, pre_probs, counts,
- count_sat, max_update, probs);
+ : tree_merge_probs_impl(r, tree, pre_probs, counts, probs);
const unsigned int ct[2] = { left_count, right_count };
- probs[i >> 1] = merge_probs(pre_probs[i >> 1], ct,
- count_sat, max_update);
+ probs[i >> 1] = mode_mv_merge_probs(pre_probs[i >> 1], ct);
return left_count + right_count;
}
void vp9_tree_merge_probs(const vp9_tree_index *tree, const vp9_prob *pre_probs,
- const unsigned int *counts, unsigned int count_sat,
- unsigned int max_update_factor, vp9_prob *probs) {
- tree_merge_probs_impl(0, tree, pre_probs, counts, count_sat,
- max_update_factor, probs);
+ const unsigned int *counts, vp9_prob *probs) {
+ tree_merge_probs_impl(0, tree, pre_probs, counts, probs);
}
#define vp9_complement(x) (255 - x)
+#define MODE_MV_COUNT_SAT 20
+
/* We build coding trees compactly in arrays.
Each node of the tree is a pair of vp9_tree_indices.
Array index often references a corresponding probability table.
return weighted_prob(pre_prob, prob, factor);
}
+// MODE_MV_MAX_UPDATE_FACTOR (128) * count / MODE_MV_COUNT_SAT;
+static const int count_to_update_factor[MODE_MV_COUNT_SAT + 1] = {
+ 0, 6, 12, 19, 25, 32, 38, 44, 51, 57, 64,
+ 70, 76, 83, 89, 96, 102, 108, 115, 121, 128
+};
+
+static INLINE vp9_prob mode_mv_merge_probs(vp9_prob pre_prob,
+ const unsigned int ct[2]) {
+ const unsigned int den = ct[0] + ct[1];
+ if (den == 0) {
+ return pre_prob;
+ } else {
+ const unsigned int count = MIN(den, MODE_MV_COUNT_SAT);
+ const unsigned int factor = count_to_update_factor[count];
+ const vp9_prob prob =
+ clip_prob(((int64_t)(ct[0]) * 256 + (den >> 1)) / den);
+ return weighted_prob(pre_prob, prob, factor);
+ }
+}
+
void vp9_tree_merge_probs(const vp9_tree_index *tree, const vp9_prob *pre_probs,
- const unsigned int *counts, unsigned int count_sat,
- unsigned int max_update_factor, vp9_prob *probs);
+ const unsigned int *counts, vp9_prob *probs);
DECLARE_ALIGNED(16, extern const uint8_t, vp9_norm[256]);
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_reconintra.h"
-static void build_mc_border(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- int x, int y, int b_w, int b_h, int w, int h) {
- // Get a pointer to the start of the real data for this row.
- const uint8_t *ref_row = src - x - y * src_stride;
-
- if (y >= h)
- ref_row += (h - 1) * src_stride;
- else if (y > 0)
- ref_row += y * src_stride;
-
- do {
- int right = 0, copy;
- int left = x < 0 ? -x : 0;
-
- if (left > b_w)
- left = b_w;
-
- if (x + b_w > w)
- right = x + b_w - w;
-
- if (right > b_w)
- right = b_w;
-
- copy = b_w - left - right;
-
- if (left)
- memset(dst, ref_row[0], left);
-
- if (copy)
- memcpy(dst + left, ref_row + x + left, copy);
-
- if (right)
- memset(dst + left + copy, ref_row[w - 1], right);
-
- dst += dst_stride;
- ++y;
-
- if (y > 0 && y < h)
- ref_row += src_stride;
- } while (--b_h);
-}
-
-#if CONFIG_VP9_HIGHBITDEPTH
-static void high_build_mc_border(const uint8_t *src8, int src_stride,
- uint16_t *dst, int dst_stride,
- int x, int y, int b_w, int b_h,
- int w, int h) {
- // Get a pointer to the start of the real data for this row.
- const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
- const uint16_t *ref_row = src - x - y * src_stride;
-
- if (y >= h)
- ref_row += (h - 1) * src_stride;
- else if (y > 0)
- ref_row += y * src_stride;
-
- do {
- int right = 0, copy;
- int left = x < 0 ? -x : 0;
-
- if (left > b_w)
- left = b_w;
-
- if (x + b_w > w)
- right = x + b_w - w;
-
- if (right > b_w)
- right = b_w;
-
- copy = b_w - left - right;
-
- if (left)
- vpx_memset16(dst, ref_row[0], left);
-
- if (copy)
- memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
-
- if (right)
- vpx_memset16(dst + left + copy, ref_row[w - 1], right);
-
- dst += dst_stride;
- ++y;
-
- if (y > 0 && y < h)
- ref_row += src_stride;
- } while (--b_h);
-}
-#endif // CONFIG_VP9_HIGHBITDEPTH
-
-static void inter_predictor(const uint8_t *src, int src_stride,
+void inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int subpel_x,
const int subpel_y,
kernel[subpel_x], xs, kernel[subpel_y], ys, w, h);
}
-void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const MV *src_mv,
- const struct scale_factors *sf,
- int w, int h, int ref,
- const InterpKernel *kernel,
- enum mv_precision precision,
- int x, int y) {
- const int is_q4 = precision == MV_PRECISION_Q4;
- const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
- is_q4 ? src_mv->col : src_mv->col * 2 };
- MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
- const int subpel_x = mv.col & SUBPEL_MASK;
- const int subpel_y = mv.row & SUBPEL_MASK;
-
- src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
-
- inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
- sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
-}
-
#if CONFIG_VP9_HIGHBITDEPTH
-static void high_inter_predictor(const uint8_t *src, int src_stride,
+void high_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int subpel_x,
const int subpel_y,
}
#endif // CONFIG_VP9_HIGHBITDEPTH
+void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const MV *src_mv,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ enum mv_precision precision,
+ int x, int y) {
+ const int is_q4 = precision == MV_PRECISION_Q4;
+ const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
+ is_q4 ? src_mv->col : src_mv->col * 2 };
+ MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
+ const int subpel_x = mv.col & SUBPEL_MASK;
+ const int subpel_y = mv.row & SUBPEL_MASK;
+
+ src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
+
+ inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
+ sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
+}
+
static INLINE int round_mv_comp_q4(int value) {
return (value < 0 ? value - 2 : value + 2) / 4;
}
return clamped_mv;
}
-static MV average_split_mvs(const struct macroblockd_plane *pd,
- const MODE_INFO *mi, int ref, int block) {
+MV average_split_mvs(const struct macroblockd_plane *pd,
+ const MODE_INFO *mi, int ref, int block) {
const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0);
MV res = {0, 0};
switch (ss_idx) {
return res;
}
-static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
+void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
int bw, int bh,
int x, int y, int w, int h,
int mi_x, int mi_y) {
MAX_MB_PLANE - 1);
}
-// TODO(jingning): This function serves as a placeholder for decoder prediction
-// using on demand border extension. It should be moved to /decoder/ directory.
-static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
- int bw, int bh,
- int x, int y, int w, int h,
- int mi_x, int mi_y) {
- struct macroblockd_plane *const pd = &xd->plane[plane];
- const MODE_INFO *mi = xd->mi[0].src_mi;
- const int is_compound = has_second_ref(&mi->mbmi);
- const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
- int ref;
-
- for (ref = 0; ref < 1 + is_compound; ++ref) {
- const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
- struct buf_2d *const pre_buf = &pd->pre[ref];
- struct buf_2d *const dst_buf = &pd->dst;
- uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
- const MV mv = mi->mbmi.sb_type < BLOCK_8X8
- ? average_split_mvs(pd, mi, ref, block)
- : mi->mbmi.mv[ref].as_mv;
-
- const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
- pd->subsampling_x,
- pd->subsampling_y);
-
- MV32 scaled_mv;
- int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height, buf_stride,
- subpel_x, subpel_y;
- uint8_t *ref_frame, *buf_ptr;
- const YV12_BUFFER_CONFIG *ref_buf = xd->block_refs[ref]->buf;
- const int is_scaled = vp9_is_scaled(sf);
-
- // Get reference frame pointer, width and height.
- if (plane == 0) {
- frame_width = ref_buf->y_crop_width;
- frame_height = ref_buf->y_crop_height;
- ref_frame = ref_buf->y_buffer;
- } else {
- frame_width = ref_buf->uv_crop_width;
- frame_height = ref_buf->uv_crop_height;
- ref_frame = plane == 1 ? ref_buf->u_buffer : ref_buf->v_buffer;
- }
-
- if (is_scaled) {
- // Co-ordinate of containing block to pixel precision.
- int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
- int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
-
- // Co-ordinate of the block to 1/16th pixel precision.
- x0_16 = (x_start + x) << SUBPEL_BITS;
- y0_16 = (y_start + y) << SUBPEL_BITS;
-
- // Co-ordinate of current block in reference frame
- // to 1/16th pixel precision.
- x0_16 = sf->scale_value_x(x0_16, sf);
- y0_16 = sf->scale_value_y(y0_16, sf);
-
- // Map the top left corner of the block into the reference frame.
- x0 = sf->scale_value_x(x_start + x, sf);
- y0 = sf->scale_value_y(y_start + y, sf);
-
- // Scale the MV and incorporate the sub-pixel offset of the block
- // in the reference frame.
- scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
- xs = sf->x_step_q4;
- ys = sf->y_step_q4;
- } else {
- // Co-ordinate of containing block to pixel precision.
- x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
- y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
-
- // Co-ordinate of the block to 1/16th pixel precision.
- x0_16 = x0 << SUBPEL_BITS;
- y0_16 = y0 << SUBPEL_BITS;
-
- scaled_mv.row = mv_q4.row;
- scaled_mv.col = mv_q4.col;
- xs = ys = 16;
- }
- subpel_x = scaled_mv.col & SUBPEL_MASK;
- subpel_y = scaled_mv.row & SUBPEL_MASK;
-
- // Calculate the top left corner of the best matching block in the
- // reference frame.
- x0 += scaled_mv.col >> SUBPEL_BITS;
- y0 += scaled_mv.row >> SUBPEL_BITS;
- x0_16 += scaled_mv.col;
- y0_16 += scaled_mv.row;
-
- // Get reference block pointer.
- buf_ptr = ref_frame + y0 * pre_buf->stride + x0;
- buf_stride = pre_buf->stride;
-
- // Do border extension if there is motion or the
- // width/height is not a multiple of 8 pixels.
- if (is_scaled || scaled_mv.col || scaled_mv.row ||
- (frame_width & 0x7) || (frame_height & 0x7)) {
- // Get reference block bottom right coordinate.
- int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1;
- int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
- int x_pad = 0, y_pad = 0;
-
- if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) {
- x0 -= VP9_INTERP_EXTEND - 1;
- x1 += VP9_INTERP_EXTEND;
- x_pad = 1;
- }
-
- if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) {
- y0 -= VP9_INTERP_EXTEND - 1;
- y1 += VP9_INTERP_EXTEND;
- y_pad = 1;
- }
-
- // Skip border extension if block is inside the frame.
- if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
- y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
- uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0;
- // Extend the border.
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- high_build_mc_border(buf_ptr1,
- pre_buf->stride,
- xd->mc_buf_high,
- x1 - x0 + 1,
- x0,
- y0,
- x1 - x0 + 1,
- y1 - y0 + 1,
- frame_width,
- frame_height);
- buf_stride = x1 - x0 + 1;
- buf_ptr = CONVERT_TO_BYTEPTR(xd->mc_buf_high) +
- y_pad * 3 * buf_stride + x_pad * 3;
- } else {
- build_mc_border(buf_ptr1,
- pre_buf->stride,
- xd->mc_buf,
- x1 - x0 + 1,
- x0,
- y0,
- x1 - x0 + 1,
- y1 - y0 + 1,
- frame_width,
- frame_height);
- buf_stride = x1 - x0 + 1;
- buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
- }
-#else
- build_mc_border(buf_ptr1,
- pre_buf->stride,
- xd->mc_buf,
- x1 - x0 + 1,
- x0,
- y0,
- x1 - x0 + 1,
- y1 - y0 + 1,
- frame_width,
- frame_height);
- buf_stride = x1 - x0 + 1;
- buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
-#endif // CONFIG_VP9_HIGHBITDEPTH
- }
- }
-
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
- subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
- } else {
- inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
- subpel_y, sf, w, h, ref, kernel, xs, ys);
- }
-#else
- inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
- subpel_y, sf, w, h, ref, kernel, xs, ys);
-#endif // CONFIG_VP9_HIGHBITDEPTH
- }
-}
-
-void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
- BLOCK_SIZE bsize) {
- int plane;
- const int mi_x = mi_col * MI_SIZE;
- const int mi_y = mi_row * MI_SIZE;
- for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
- const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
- &xd->plane[plane]);
- const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
- const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
- const int bw = 4 * num_4x4_w;
- const int bh = 4 * num_4x4_h;
-
- if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) {
- int i = 0, x, y;
- assert(bsize == BLOCK_8X8);
- for (y = 0; y < num_4x4_h; ++y)
- for (x = 0; x < num_4x4_w; ++x)
- dec_build_inter_predictors(xd, plane, i++, bw, bh,
- 4 * x, 4 * y, 4, 4, mi_x, mi_y);
- } else {
- dec_build_inter_predictors(xd, plane, 0, bw, bh,
- 0, 0, bw, bh, mi_x, mi_y);
- }
- }
-}
-
void vp9_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
const YV12_BUFFER_CONFIG *src,
int mi_row, int mi_col) {
extern "C" {
#endif
+void inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int subpel_x,
+ const int subpel_y,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ int xs, int ys);
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void high_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int subpel_x,
+ const int subpel_y,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ int xs, int ys, int bd);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+MV average_split_mvs(const struct macroblockd_plane *pd, const MODE_INFO *mi,
+ int ref, int block);
+
+MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
+ int bw, int bh, int ss_x, int ss_y);
+
+void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
+ int bw, int bh,
+ int x, int y, int w, int h,
+ int mi_x, int mi_y);
+
void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
-void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
- BLOCK_SIZE bsize);
-
void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const MV *mv_q3,
void vp9_rtcd() {
vpx_scale_rtcd();
+ // TODO(JBB): Remove this once, by insuring that both the encoder and
+ // decoder setup functions are protected by once();
once(setup_rtcd_internal);
}
$vp9_lpf_vertical_16_dual_neon_asm=vp9_lpf_vertical_16_dual_neon;
add_proto qw/void vp9_lpf_vertical_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count";
-specialize qw/vp9_lpf_vertical_8 sse2 neon dspr2/;
+specialize qw/vp9_lpf_vertical_8 sse2 neon_asm dspr2/;
+$vp9_lpf_vertical_8_neon_asm=vp9_lpf_vertical_8_neon;
add_proto qw/void vp9_lpf_vertical_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vp9_lpf_vertical_8_dual sse2 neon dspr2/;
+specialize qw/vp9_lpf_vertical_8_dual sse2 neon_asm dspr2/;
+$vp9_lpf_vertical_8_dual_neon_asm=vp9_lpf_vertical_8_dual_neon;
add_proto qw/void vp9_lpf_vertical_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count";
specialize qw/vp9_lpf_vertical_4 mmx neon dspr2/;
$vp9_lpf_horizontal_16_neon_asm=vp9_lpf_horizontal_16_neon;
add_proto qw/void vp9_lpf_horizontal_8/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count";
-specialize qw/vp9_lpf_horizontal_8 sse2 neon dspr2/;
+specialize qw/vp9_lpf_horizontal_8 sse2 neon_asm dspr2/;
+$vp9_lpf_horizontal_8_neon_asm=vp9_lpf_horizontal_8_neon;
add_proto qw/void vp9_lpf_horizontal_8_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
-specialize qw/vp9_lpf_horizontal_8_dual sse2 neon dspr2/;
+specialize qw/vp9_lpf_horizontal_8_dual sse2 neon_asm dspr2/;
+$vp9_lpf_horizontal_8_dual_neon_asm=vp9_lpf_horizontal_8_dual_neon;
add_proto qw/void vp9_lpf_horizontal_4/, "uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count";
specialize qw/vp9_lpf_horizontal_4 mmx neon dspr2/;
add_proto qw/void vp9_plane_add_noise/, "uint8_t *Start, char *noise, char blackclamp[16], char whiteclamp[16], char bothclamp[16], unsigned int Width, unsigned int Height, int Pitch";
specialize qw/vp9_plane_add_noise sse2/;
$vp9_plane_add_noise_sse2=vp9_plane_add_noise_wmt;
+
+add_proto qw/void vp9_filter_by_weight16x16/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int src_weight";
+specialize qw/vp9_filter_by_weight16x16 sse2/;
+
+add_proto qw/void vp9_filter_by_weight8x8/, "const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int src_weight";
+specialize qw/vp9_filter_by_weight8x8 sse2/;
}
#
specialize qw/vp9_variance16x32/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-specialize qw/vp9_variance64x32 avx2/, "$sse2_x86inc";
+specialize qw/vp9_variance64x32 avx2 neon/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-specialize qw/vp9_variance32x64/, "$sse2_x86inc";
+specialize qw/vp9_variance32x64 neon/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_variance32x32 avx2 neon/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-specialize qw/vp9_variance64x64 avx2/, "$sse2_x86inc";
+specialize qw/vp9_variance64x64 avx2 neon/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_variance16x16 avx2 neon/, "$sse2_x86inc";
specialize qw/vp9_variance4x4/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
-specialize qw/vp9_sub_pixel_variance64x64 avx2/, "$sse2_x86inc", "$ssse3_x86inc";
+specialize qw/vp9_sub_pixel_variance64x64 avx2 neon/, "$sse2_x86inc", "$ssse3_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_avg_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, const uint8_t *second_pred";
specialize qw/vp9_sub_pixel_avg_variance64x64 avx2/, "$sse2_x86inc", "$ssse3_x86inc";
specialize qw/vp9_sad4x4x8 sse4/;
add_proto qw/void vp9_sad64x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array";
-specialize qw/vp9_sad64x64x4d sse2 avx2/;
+specialize qw/vp9_sad64x64x4d sse2 avx2 neon/;
add_proto qw/void vp9_sad32x64x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array";
specialize qw/vp9_sad32x64x4d sse2/;
specialize qw/vp9_sad16x32x4d sse2/;
add_proto qw/void vp9_sad32x32x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array";
-specialize qw/vp9_sad32x32x4d sse2 avx2/;
+specialize qw/vp9_sad32x32x4d sse2 avx2 neon/;
add_proto qw/void vp9_sad16x16x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array";
-specialize qw/vp9_sad16x16x4d sse2/;
+specialize qw/vp9_sad16x16x4d sse2 neon/;
add_proto qw/void vp9_sad16x8x4d/, "const uint8_t *src_ptr, int src_stride, const uint8_t* const ref_ptr[], int ref_stride, unsigned int *sad_array";
specialize qw/vp9_sad16x8x4d sse2/;
specialize qw/vp9_get_mb_ss/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_avg_8x8/, "const uint8_t *, int p";
-specialize qw/vp9_avg_8x8 sse2/;
+specialize qw/vp9_avg_8x8 sse2 neon/;
add_proto qw/unsigned int vp9_avg_4x4/, "const uint8_t *, int p";
specialize qw/vp9_avg_4x4 sse2/;
add_proto qw/int64_t vp9_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
specialize qw/vp9_block_error/;
- add_proto qw/void vp9_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_quantize_fp/;
- add_proto qw/void vp9_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_quantize_fp_32x32/;
- add_proto qw/void vp9_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_quantize_b/;
- add_proto qw/void vp9_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_quantize_b_32x32/;
- add_proto qw/void vp9_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_fdct8x8_quant/;
} else {
add_proto qw/int64_t vp9_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
specialize qw/vp9_block_error avx2/, "$sse2_x86inc";
- add_proto qw/void vp9_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_quantize_fp neon sse2/, "$ssse3_x86_64";
- add_proto qw/void vp9_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_quantize_fp_32x32/, "$ssse3_x86_64";
- add_proto qw/void vp9_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_quantize_b sse2/, "$ssse3_x86_64";
- add_proto qw/void vp9_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_quantize_b_32x32/, "$ssse3_x86_64";
- add_proto qw/void vp9_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
- specialize qw/vp9_fdct8x8_quant sse2 ssse3/;
+ add_proto qw/void vp9_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ specialize qw/vp9_fdct8x8_quant sse2 ssse3 neon/;
}
#
add_proto qw/void vp9_highbd_subtract_block/, "int rows, int cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride, int bd";
specialize qw/vp9_highbd_subtract_block/;
- add_proto qw/void vp9_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_highbd_quantize_fp/;
- add_proto qw/void vp9_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_highbd_quantize_fp_32x32/;
- add_proto qw/void vp9_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_highbd_quantize_b sse2/;
- add_proto qw/void vp9_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, int zbin_oq_value, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+ add_proto qw/void vp9_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
specialize qw/vp9_highbd_quantize_b_32x32 sse2/;
#
vp9_tile_set_col(tile, cm, col);
}
-void vp9_get_tile_n_bits(int mi_cols,
- int *min_log2_tile_cols, int *max_log2_tile_cols) {
- const int sb_cols = mi_cols_aligned_to_sb(mi_cols) >> MI_BLOCK_SIZE_LOG2;
- int min_log2 = 0, max_log2 = 0;
-
- // max
- while ((sb_cols >> max_log2) >= MIN_TILE_WIDTH_B64)
- ++max_log2;
- --max_log2;
- if (max_log2 < 0)
- max_log2 = 0;
-
- // min
- while ((MAX_TILE_WIDTH_B64 << min_log2) < sb_cols)
+static int get_min_log2_tile_cols(const int sb64_cols) {
+ int min_log2 = 0;
+ while ((MAX_TILE_WIDTH_B64 << min_log2) < sb64_cols)
++min_log2;
+ return min_log2;
+}
- assert(min_log2 <= max_log2);
+static int get_max_log2_tile_cols(const int sb64_cols) {
+ int max_log2 = 1;
+ while ((sb64_cols >> max_log2) >= MIN_TILE_WIDTH_B64)
+ ++max_log2;
+ return max_log2 - 1;
+}
- *min_log2_tile_cols = min_log2;
- *max_log2_tile_cols = max_log2;
+void vp9_get_tile_n_bits(int mi_cols,
+ int *min_log2_tile_cols, int *max_log2_tile_cols) {
+ const int sb64_cols = mi_cols_aligned_to_sb(mi_cols) >> MI_BLOCK_SIZE_LOG2;
+ *min_log2_tile_cols = get_min_log2_tile_cols(sb64_cols);
+ *max_log2_tile_cols = get_max_log2_tile_cols(sb64_cols);
+ assert(*min_log2_tile_cols <= *max_log2_tile_cols);
}
--- /dev/null
+;
+; Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+; This file is a duplicate of mfqe_sse2.asm in VP8.
+; TODO(jackychen): Find a way to fix the duplicate.
+%include "vpx_ports/x86_abi_support.asm"
+
+;void vp9_filter_by_weight16x16_sse2
+;(
+; unsigned char *src,
+; int src_stride,
+; unsigned char *dst,
+; int dst_stride,
+; int src_weight
+;)
+global sym(vp9_filter_by_weight16x16_sse2) PRIVATE
+sym(vp9_filter_by_weight16x16_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ SAVE_XMM 6
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ movd xmm0, arg(4) ; src_weight
+ pshuflw xmm0, xmm0, 0x0 ; replicate to all low words
+ punpcklqdq xmm0, xmm0 ; replicate to all hi words
+
+ movdqa xmm1, [GLOBAL(tMFQE)]
+ psubw xmm1, xmm0 ; dst_weight
+
+ mov rax, arg(0) ; src
+ mov rsi, arg(1) ; src_stride
+ mov rdx, arg(2) ; dst
+ mov rdi, arg(3) ; dst_stride
+
+ mov rcx, 16 ; loop count
+ pxor xmm6, xmm6
+
+.combine
+ movdqa xmm2, [rax]
+ movdqa xmm4, [rdx]
+ add rax, rsi
+
+ ; src * src_weight
+ movdqa xmm3, xmm2
+ punpcklbw xmm2, xmm6
+ punpckhbw xmm3, xmm6
+ pmullw xmm2, xmm0
+ pmullw xmm3, xmm0
+
+ ; dst * dst_weight
+ movdqa xmm5, xmm4
+ punpcklbw xmm4, xmm6
+ punpckhbw xmm5, xmm6
+ pmullw xmm4, xmm1
+ pmullw xmm5, xmm1
+
+ ; sum, round and shift
+ paddw xmm2, xmm4
+ paddw xmm3, xmm5
+ paddw xmm2, [GLOBAL(tMFQE_round)]
+ paddw xmm3, [GLOBAL(tMFQE_round)]
+ psrlw xmm2, 4
+ psrlw xmm3, 4
+
+ packuswb xmm2, xmm3
+ movdqa [rdx], xmm2
+ add rdx, rdi
+
+ dec rcx
+ jnz .combine
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+
+ ret
+
+;void vp9_filter_by_weight8x8_sse2
+;(
+; unsigned char *src,
+; int src_stride,
+; unsigned char *dst,
+; int dst_stride,
+; int src_weight
+;)
+global sym(vp9_filter_by_weight8x8_sse2) PRIVATE
+sym(vp9_filter_by_weight8x8_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 5
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ movd xmm0, arg(4) ; src_weight
+ pshuflw xmm0, xmm0, 0x0 ; replicate to all low words
+ punpcklqdq xmm0, xmm0 ; replicate to all hi words
+
+ movdqa xmm1, [GLOBAL(tMFQE)]
+ psubw xmm1, xmm0 ; dst_weight
+
+ mov rax, arg(0) ; src
+ mov rsi, arg(1) ; src_stride
+ mov rdx, arg(2) ; dst
+ mov rdi, arg(3) ; dst_stride
+
+ mov rcx, 8 ; loop count
+ pxor xmm4, xmm4
+
+.combine
+ movq xmm2, [rax]
+ movq xmm3, [rdx]
+ add rax, rsi
+
+ ; src * src_weight
+ punpcklbw xmm2, xmm4
+ pmullw xmm2, xmm0
+
+ ; dst * dst_weight
+ punpcklbw xmm3, xmm4
+ pmullw xmm3, xmm1
+
+ ; sum, round and shift
+ paddw xmm2, xmm3
+ paddw xmm2, [GLOBAL(tMFQE_round)]
+ psrlw xmm2, 4
+
+ packuswb xmm2, xmm4
+ movq [rdx], xmm2
+ add rdx, rdi
+
+ dec rcx
+ jnz .combine
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+
+ ret
+
+;void vp9_variance_and_sad_16x16_sse2 | arg
+;(
+; unsigned char *src1, 0
+; int stride1, 1
+; unsigned char *src2, 2
+; int stride2, 3
+; unsigned int *variance, 4
+; unsigned int *sad, 5
+;)
+global sym(vp9_variance_and_sad_16x16_sse2) PRIVATE
+sym(vp9_variance_and_sad_16x16_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 6
+ GET_GOT rbx
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rax, arg(0) ; src1
+ mov rcx, arg(1) ; stride1
+ mov rdx, arg(2) ; src2
+ mov rdi, arg(3) ; stride2
+
+ mov rsi, 16 ; block height
+
+ ; Prep accumulator registers
+ pxor xmm3, xmm3 ; SAD
+ pxor xmm4, xmm4 ; sum of src2
+ pxor xmm5, xmm5 ; sum of src2^2
+
+ ; Because we're working with the actual output frames
+ ; we can't depend on any kind of data alignment.
+.accumulate
+ movdqa xmm0, [rax] ; src1
+ movdqa xmm1, [rdx] ; src2
+ add rax, rcx ; src1 + stride1
+ add rdx, rdi ; src2 + stride2
+
+ ; SAD(src1, src2)
+ psadbw xmm0, xmm1
+ paddusw xmm3, xmm0
+
+ ; SUM(src2)
+ pxor xmm2, xmm2
+ psadbw xmm2, xmm1 ; sum src2 by misusing SAD against 0
+ paddusw xmm4, xmm2
+
+ ; pmaddubsw would be ideal if it took two unsigned values. instead,
+ ; it expects a signed and an unsigned value. so instead we zero extend
+ ; and operate on words.
+ pxor xmm2, xmm2
+ movdqa xmm0, xmm1
+ punpcklbw xmm0, xmm2
+ punpckhbw xmm1, xmm2
+ pmaddwd xmm0, xmm0
+ pmaddwd xmm1, xmm1
+ paddd xmm5, xmm0
+ paddd xmm5, xmm1
+
+ sub rsi, 1
+ jnz .accumulate
+
+ ; phaddd only operates on adjacent double words.
+ ; Finalize SAD and store
+ movdqa xmm0, xmm3
+ psrldq xmm0, 8
+ paddusw xmm0, xmm3
+ paddd xmm0, [GLOBAL(t128)]
+ psrld xmm0, 8
+
+ mov rax, arg(5)
+ movd [rax], xmm0
+
+ ; Accumulate sum of src2
+ movdqa xmm0, xmm4
+ psrldq xmm0, 8
+ paddusw xmm0, xmm4
+ ; Square src2. Ignore high value
+ pmuludq xmm0, xmm0
+ psrld xmm0, 8
+
+ ; phaddw could be used to sum adjacent values but we want
+ ; all the values summed. promote to doubles, accumulate,
+ ; shift and sum
+ pxor xmm2, xmm2
+ movdqa xmm1, xmm5
+ punpckldq xmm1, xmm2
+ punpckhdq xmm5, xmm2
+ paddd xmm1, xmm5
+ movdqa xmm2, xmm1
+ psrldq xmm1, 8
+ paddd xmm1, xmm2
+
+ psubd xmm1, xmm0
+
+ ; (variance + 128) >> 8
+ paddd xmm1, [GLOBAL(t128)]
+ psrld xmm1, 8
+ mov rax, arg(4)
+
+ movd [rax], xmm1
+
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_GOT
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+SECTION_RODATA
+align 16
+t128:
+%ifndef __NASM_VER__
+ ddq 128
+%elif CONFIG_BIG_ENDIAN
+ dq 0, 128
+%else
+ dq 128, 0
+%endif
+align 16
+tMFQE: ; 1 << MFQE_PRECISION
+ times 8 dw 0x10
+align 16
+tMFQE_round: ; 1 << (MFQE_PRECISION - 1)
+ times 8 dw 0x08
#include "vp9/common/vp9_entropy.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_idct.h"
+#include "vp9/common/vp9_loopfilter_thread.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/decoder/vp9_decodemv.h"
#include "vp9/decoder/vp9_decoder.h"
#include "vp9/decoder/vp9_dsubexp.h"
-#include "vp9/decoder/vp9_dthread.h"
#include "vp9/decoder/vp9_read_bit_buffer.h"
#include "vp9/decoder/vp9_reader.h"
return &xd->mi[0].mbmi;
}
-static void decode_block(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
const TileInfo *const tile,
int mi_row, int mi_col,
vp9_reader *r, BLOCK_SIZE bsize) {
+ VP9_COMMON *const cm = &pbi->common;
const int less8x8 = bsize < BLOCK_8X8;
MB_MODE_INFO *mbmi = set_offsets(cm, xd, tile, bsize, mi_row, mi_col);
- vp9_read_mode_info(cm, xd, tile, mi_row, mi_col, r);
+ vp9_read_mode_info(pbi, xd, tile, mi_row, mi_col, r);
if (less8x8)
bsize = BLOCK_8X8;
predict_and_reconstruct_intra_block, &arg);
} else {
// Prediction
- vp9_dec_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ vp9_dec_build_inter_predictors_sb(pbi, xd, mi_row, mi_col, bsize);
// Reconstruction
if (!mbmi->skip) {
return p;
}
-static void decode_partition(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd,
const TileInfo *const tile,
int mi_row, int mi_col,
vp9_reader* r, BLOCK_SIZE bsize) {
+ VP9_COMMON *const cm = &pbi->common;
const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
PARTITION_TYPE partition;
BLOCK_SIZE subsize, uv_subsize;
subsize = get_subsize(bsize, partition);
uv_subsize = ss_size_lookup[subsize][cm->subsampling_x][cm->subsampling_y];
if (subsize >= BLOCK_8X8 && uv_subsize == BLOCK_INVALID)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
- "Invalid block size.");
+ vpx_internal_error(xd->error_info,
+ VPX_CODEC_CORRUPT_FRAME, "Invalid block size.");
if (subsize < BLOCK_8X8) {
- decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
+ decode_block(pbi, xd, tile, mi_row, mi_col, r, subsize);
} else {
switch (partition) {
case PARTITION_NONE:
- decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
+ decode_block(pbi, xd, tile, mi_row, mi_col, r, subsize);
break;
case PARTITION_HORZ:
- decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
+ decode_block(pbi, xd, tile, mi_row, mi_col, r, subsize);
if (mi_row + hbs < cm->mi_rows)
- decode_block(cm, xd, tile, mi_row + hbs, mi_col, r, subsize);
+ decode_block(pbi, xd, tile, mi_row + hbs, mi_col, r, subsize);
break;
case PARTITION_VERT:
- decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
+ decode_block(pbi, xd, tile, mi_row, mi_col, r, subsize);
if (mi_col + hbs < cm->mi_cols)
- decode_block(cm, xd, tile, mi_row, mi_col + hbs, r, subsize);
+ decode_block(pbi, xd, tile, mi_row, mi_col + hbs, r, subsize);
break;
case PARTITION_SPLIT:
- decode_partition(cm, xd, tile, mi_row, mi_col, r, subsize);
- decode_partition(cm, xd, tile, mi_row, mi_col + hbs, r, subsize);
- decode_partition(cm, xd, tile, mi_row + hbs, mi_col, r, subsize);
- decode_partition(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize);
+ decode_partition(pbi, xd, tile, mi_row, mi_col, r, subsize);
+ decode_partition(pbi, xd, tile, mi_row, mi_col + hbs, r, subsize);
+ decode_partition(pbi, xd, tile, mi_row + hbs, mi_col, r, subsize);
+ decode_partition(pbi, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize);
break;
default:
assert(0 && "Invalid partition type");
static void setup_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
int width, height;
+ BufferPool *const pool = cm->buffer_pool;
vp9_read_frame_size(rb, &width, &height);
resize_context_buffers(cm, width, height);
setup_display_size(cm, rb);
+ lock_buffer_pool(pool);
if (vp9_realloc_frame_buffer(
get_frame_new_buffer(cm), cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#endif
VP9_DEC_BORDER_IN_PIXELS,
cm->byte_alignment,
- &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer, cm->get_fb_cb,
- cm->cb_priv)) {
+ &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
+ pool->cb_priv)) {
+ unlock_buffer_pool(pool);
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
}
- cm->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
- cm->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
- cm->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
+ unlock_buffer_pool(pool);
+
+ pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
+ pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
+ pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
}
static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth,
int width, height;
int found = 0, i;
int has_valid_ref_frame = 0;
+ BufferPool *const pool = cm->buffer_pool;
for (i = 0; i < REFS_PER_FRAME; ++i) {
if (vp9_rb_read_bit(rb)) {
YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
cm->subsampling_x,
cm->subsampling_y))
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
- "Referenced frame has incompatible color space");
+ "Referenced frame has incompatible color format");
}
resize_context_buffers(cm, width, height);
setup_display_size(cm, rb);
+ lock_buffer_pool(pool);
if (vp9_realloc_frame_buffer(
get_frame_new_buffer(cm), cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#endif
VP9_DEC_BORDER_IN_PIXELS,
cm->byte_alignment,
- &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer, cm->get_fb_cb,
- cm->cb_priv)) {
+ &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
+ pool->cb_priv)) {
+ unlock_buffer_pool(pool);
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
}
- cm->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
- cm->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
- cm->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
+ unlock_buffer_pool(pool);
+
+ pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
+ pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
+ pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
}
static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
vp9_zero(tile_data->xd.left_seg_context);
for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
mi_col += MI_BLOCK_SIZE) {
- decode_partition(tile_data->cm, &tile_data->xd, &tile, mi_row, mi_col,
+ decode_partition(pbi, &tile_data->xd, &tile, mi_row, mi_col,
&tile_data->bit_reader, BLOCK_64X64);
}
pbi->mb.corrupted |= tile_data->xd.corrupted;
winterface->execute(&pbi->lf_worker);
}
}
+ // After loopfiltering, the last 7 row pixels in each superblock row may
+ // still be changed by the longest loopfilter of the next superblock
+ // row.
+ if (pbi->frame_parallel_decode)
+ vp9_frameworker_broadcast(pbi->cur_buf,
+ mi_row << MI_BLOCK_SIZE_LOG2);
}
}
// Get last tile data.
tile_data = pbi->tile_data + tile_cols * tile_rows - 1;
+ if (pbi->frame_parallel_decode)
+ vp9_frameworker_broadcast(pbi->cur_buf, INT_MAX);
return vp9_reader_find_end(&tile_data->bit_reader);
}
const TileInfo *const tile) {
int mi_row, mi_col;
+ if (setjmp(tile_data->error_info.jmp)) {
+ tile_data->error_info.setjmp = 0;
+ tile_data->xd.corrupted = 1;
+ return 0;
+ }
+
+ tile_data->error_info.setjmp = 1;
+ tile_data->xd.error_info = &tile_data->error_info;
+
for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
mi_row += MI_BLOCK_SIZE) {
vp9_zero(tile_data->xd.left_context);
vp9_zero(tile_data->xd.left_seg_context);
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += MI_BLOCK_SIZE) {
- decode_partition(tile_data->cm, &tile_data->xd, tile,
+ decode_partition(tile_data->pbi, &tile_data->xd, tile,
mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64);
}
}
TileInfo *const tile = (TileInfo*)worker->data2;
TileBuffer *const buf = &tile_buffers[0][n];
- tile_data->cm = cm;
+ tile_data->pbi = pbi;
tile_data->xd = pbi->mb;
tile_data->xd.corrupted = 0;
- vp9_tile_init(tile, tile_data->cm, 0, buf->col);
+ vp9_tile_init(tile, &pbi->common, 0, buf->col);
setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
&tile_data->bit_reader, pbi->decrypt_cb,
pbi->decrypt_state);
for (; i > 0; --i) {
VP9Worker *const worker = &pbi->tile_workers[i - 1];
+ // TODO(jzern): The tile may have specific error data associated with
+ // its vpx_internal_error_info which could be propagated to the main info
+ // in cm. Additionally once the threads have been synced and an error is
+ // detected, there's no point in continuing to decode tiles.
pbi->mb.corrupted |= !winterface->sync(worker);
}
if (final_worker > -1) {
cm->use_highbitdepth = 0;
#endif
}
- cm->color_space = (COLOR_SPACE)vp9_rb_read_literal(rb, 3);
- if (cm->color_space != SRGB) {
+ cm->color_space = vp9_rb_read_literal(rb, 3);
+ if (cm->color_space != VPX_CS_SRGB) {
vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range
if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
cm->subsampling_x = vp9_rb_read_bit(rb);
static size_t read_uncompressed_header(VP9Decoder *pbi,
struct vp9_read_bit_buffer *rb) {
VP9_COMMON *const cm = &pbi->common;
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+ BufferPool *const pool = pbi->common.buffer_pool;
+ int i, mask, ref_index = 0;
size_t sz;
- int i;
cm->last_frame_type = cm->frame_type;
if (cm->show_existing_frame) {
// Show an existing frame directly.
const int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
-
- if (frame_to_show < 0 || cm->frame_bufs[frame_to_show].ref_count < 1)
+ lock_buffer_pool(pool);
+ if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
+ unlock_buffer_pool(pool);
vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
"Buffer %d does not contain a decoded frame",
frame_to_show);
+ }
- ref_cnt_fb(cm->frame_bufs, &cm->new_fb_idx, frame_to_show);
+ ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
+ unlock_buffer_pool(pool);
pbi->refresh_frame_flags = 0;
cm->lf.filter_level = 0;
cm->show_frame = 1;
+
+ if (pbi->frame_parallel_decode) {
+ for (i = 0; i < REF_FRAMES; ++i)
+ cm->next_ref_frame_map[i] = cm->ref_frame_map[i];
+ }
return 0;
}
}
setup_frame_size(cm, rb);
- pbi->need_resync = 0;
+ if (pbi->need_resync) {
+ vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
+ pbi->need_resync = 0;
+ }
} else {
cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb);
} else {
// NOTE: The intra-only frame header does not include the specification
// of either the color format or color sub-sampling in profile 0. VP9
- // specifies that the default color space should be YUV 4:2:0 in this
+ // specifies that the default color format should be YUV 4:2:0 in this
// case (normative).
- cm->color_space = BT_601;
+ cm->color_space = VPX_CS_BT_601;
cm->subsampling_y = cm->subsampling_x = 1;
cm->bit_depth = VPX_BITS_8;
#if CONFIG_VP9_HIGHBITDEPTH
pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
setup_frame_size(cm, rb);
- pbi->need_resync = 0;
- } else {
+ if (pbi->need_resync) {
+ vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
+ pbi->need_resync = 0;
+ }
+ } else if (pbi->need_resync != 1) { /* Skip if need resync */
pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
for (i = 0; i < REFS_PER_FRAME; ++i) {
const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2);
const int idx = cm->ref_frame_map[ref];
RefBuffer *const ref_frame = &cm->frame_refs[i];
ref_frame->idx = idx;
- ref_frame->buf = &cm->frame_bufs[idx].buf;
+ ref_frame->buf = &frame_bufs[idx].buf;
cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
}
// below, forcing the use of context 0 for those frame types.
cm->frame_context_idx = vp9_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
+ // Generate next_ref_frame_map.
+ lock_buffer_pool(pool);
+ for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
+ if (mask & 1) {
+ cm->next_ref_frame_map[ref_index] = cm->new_fb_idx;
+ ++frame_bufs[cm->new_fb_idx].ref_count;
+ } else {
+ cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
+ }
+ // Current thread holds the reference frame.
+ if (cm->ref_frame_map[ref_index] >= 0)
+ ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
+ ++ref_index;
+ }
+
+ for (; ref_index < REF_FRAMES; ++ref_index) {
+ cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
+ // Current thread holds the reference frame.
+ if (cm->ref_frame_map[ref_index] >= 0)
+ ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
+ }
+ unlock_buffer_pool(pool);
+ pbi->hold_ref_buf = 1;
+
if (frame_is_intra_only(cm) || cm->error_resilient_mode)
vp9_setup_past_independence(cm);
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
struct vp9_read_bit_buffer rb = { NULL, NULL, 0, NULL, 0};
-
+ int context_updated = 0;
uint8_t clear_data[MAX_VP9_HEADER_SIZE];
const size_t first_partition_size = read_uncompressed_header(pbi,
init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt header length");
- init_macroblockd(cm, &pbi->mb);
-
cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
cm->width == cm->last_width &&
cm->height == cm->last_height &&
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Decode failed. Frame data header is corrupted.");
+ if (cm->lf.filter_level) {
+ vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
+ }
+
+ // If encoded in frame parallel mode, frame context is ready after decoding
+ // the frame header.
+ if (pbi->frame_parallel_decode && cm->frame_parallel_decoding_mode) {
+ VP9Worker *const worker = pbi->frame_worker_owner;
+ FrameWorkerData *const frame_worker_data = worker->data1;
+ if (cm->refresh_frame_context) {
+ context_updated = 1;
+ cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
+ }
+ vp9_frameworker_lock_stats(worker);
+ pbi->cur_buf->row = -1;
+ pbi->cur_buf->col = -1;
+ frame_worker_data->frame_context_ready = 1;
+ // Signal the main thread that context is ready.
+ vp9_frameworker_signal_stats(worker);
+ vp9_frameworker_unlock_stats(worker);
+ }
+
// TODO(jzern): remove frame_parallel_decoding_mode restriction for
// single-frame tile decoding.
if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1 &&
if (!xd->corrupted) {
// If multiple threads are used to decode tiles, then we use those threads
// to do parallel loopfiltering.
- vp9_loop_filter_frame_mt(&pbi->lf_row_sync, new_fb, pbi->mb.plane, cm,
- pbi->tile_workers, pbi->num_tile_workers,
- cm->lf.filter_level, 0);
+ vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level,
+ 0, 0, pbi->tile_workers, pbi->num_tile_workers,
+ &pbi->lf_row_sync);
} else {
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Decode failed. Frame data is corrupted.");
*p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
}
- new_fb->corrupted |= xd->corrupted;
-
- if (!new_fb->corrupted) {
+ if (!xd->corrupted) {
if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
vp9_adapt_coef_probs(cm);
"Decode failed. Frame data is corrupted.");
}
- if (cm->refresh_frame_context)
+ // Non frame parallel update frame context here.
+ if (cm->refresh_frame_context && !context_updated)
cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
}
+
+static void build_mc_border(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ int x, int y, int b_w, int b_h, int w, int h) {
+ // Get a pointer to the start of the real data for this row.
+ const uint8_t *ref_row = src - x - y * src_stride;
+
+ if (y >= h)
+ ref_row += (h - 1) * src_stride;
+ else if (y > 0)
+ ref_row += y * src_stride;
+
+ do {
+ int right = 0, copy;
+ int left = x < 0 ? -x : 0;
+
+ if (left > b_w)
+ left = b_w;
+
+ if (x + b_w > w)
+ right = x + b_w - w;
+
+ if (right > b_w)
+ right = b_w;
+
+ copy = b_w - left - right;
+
+ if (left)
+ memset(dst, ref_row[0], left);
+
+ if (copy)
+ memcpy(dst + left, ref_row + x + left, copy);
+
+ if (right)
+ memset(dst + left + copy, ref_row[w - 1], right);
+
+ dst += dst_stride;
+ ++y;
+
+ if (y > 0 && y < h)
+ ref_row += src_stride;
+ } while (--b_h);
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static void high_build_mc_border(const uint8_t *src8, int src_stride,
+ uint16_t *dst, int dst_stride,
+ int x, int y, int b_w, int b_h,
+ int w, int h) {
+ // Get a pointer to the start of the real data for this row.
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *ref_row = src - x - y * src_stride;
+
+ if (y >= h)
+ ref_row += (h - 1) * src_stride;
+ else if (y > 0)
+ ref_row += y * src_stride;
+
+ do {
+ int right = 0, copy;
+ int left = x < 0 ? -x : 0;
+
+ if (left > b_w)
+ left = b_w;
+
+ if (x + b_w > w)
+ right = x + b_w - w;
+
+ if (right > b_w)
+ right = b_w;
+
+ copy = b_w - left - right;
+
+ if (left)
+ vpx_memset16(dst, ref_row[0], left);
+
+ if (copy)
+ memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
+
+ if (right)
+ vpx_memset16(dst + left + copy, ref_row[w - 1], right);
+
+ dst += dst_stride;
+ ++y;
+
+ if (y > 0 && y < h)
+ ref_row += src_stride;
+ } while (--b_h);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+void dec_build_inter_predictors(VP9Decoder *const pbi, MACROBLOCKD *xd,
+ int plane, int block, int bw, int bh, int x,
+ int y, int w, int h, int mi_x, int mi_y) {
+ struct macroblockd_plane *const pd = &xd->plane[plane];
+ const MODE_INFO *mi = xd->mi[0].src_mi;
+ const int is_compound = has_second_ref(&mi->mbmi);
+ const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
+ int ref;
+
+ for (ref = 0; ref < 1 + is_compound; ++ref) {
+ const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
+ struct buf_2d *const pre_buf = &pd->pre[ref];
+ struct buf_2d *const dst_buf = &pd->dst;
+ uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
+ const MV mv = mi->mbmi.sb_type < BLOCK_8X8
+ ? average_split_mvs(pd, mi, ref, block)
+ : mi->mbmi.mv[ref].as_mv;
+
+ const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
+ pd->subsampling_x,
+ pd->subsampling_y);
+
+ MV32 scaled_mv;
+ int xs, ys, x0, y0, x0_16, y0_16, y1, frame_width, frame_height,
+ buf_stride, subpel_x, subpel_y;
+ uint8_t *ref_frame, *buf_ptr;
+ const int idx = xd->block_refs[ref]->idx;
+ BufferPool *const pool = pbi->common.buffer_pool;
+ RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx];
+ const int is_scaled = vp9_is_scaled(sf);
+
+ // Get reference frame pointer, width and height.
+ if (plane == 0) {
+ frame_width = ref_frame_buf->buf.y_crop_width;
+ frame_height = ref_frame_buf->buf.y_crop_height;
+ ref_frame = ref_frame_buf->buf.y_buffer;
+ } else {
+ frame_width = ref_frame_buf->buf.uv_crop_width;
+ frame_height = ref_frame_buf->buf.uv_crop_height;
+ ref_frame = plane == 1 ? ref_frame_buf->buf.u_buffer
+ : ref_frame_buf->buf.v_buffer;
+ }
+
+ if (is_scaled) {
+ // Co-ordinate of containing block to pixel precision.
+ int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
+ int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
+
+ // Co-ordinate of the block to 1/16th pixel precision.
+ x0_16 = (x_start + x) << SUBPEL_BITS;
+ y0_16 = (y_start + y) << SUBPEL_BITS;
+
+ // Co-ordinate of current block in reference frame
+ // to 1/16th pixel precision.
+ x0_16 = sf->scale_value_x(x0_16, sf);
+ y0_16 = sf->scale_value_y(y0_16, sf);
+
+ // Map the top left corner of the block into the reference frame.
+ x0 = sf->scale_value_x(x_start + x, sf);
+ y0 = sf->scale_value_y(y_start + y, sf);
+
+ // Scale the MV and incorporate the sub-pixel offset of the block
+ // in the reference frame.
+ scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+ xs = sf->x_step_q4;
+ ys = sf->y_step_q4;
+ } else {
+ // Co-ordinate of containing block to pixel precision.
+ x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
+ y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
+
+ // Co-ordinate of the block to 1/16th pixel precision.
+ x0_16 = x0 << SUBPEL_BITS;
+ y0_16 = y0 << SUBPEL_BITS;
+
+ scaled_mv.row = mv_q4.row;
+ scaled_mv.col = mv_q4.col;
+ xs = ys = 16;
+ }
+ subpel_x = scaled_mv.col & SUBPEL_MASK;
+ subpel_y = scaled_mv.row & SUBPEL_MASK;
+
+ // Calculate the top left corner of the best matching block in the
+ // reference frame.
+ x0 += scaled_mv.col >> SUBPEL_BITS;
+ y0 += scaled_mv.row >> SUBPEL_BITS;
+ x0_16 += scaled_mv.col;
+ y0_16 += scaled_mv.row;
+
+ // Get reference block pointer.
+ buf_ptr = ref_frame + y0 * pre_buf->stride + x0;
+ buf_stride = pre_buf->stride;
+
+ // Get reference block bottom right vertical coordinate.
+ y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
+
+ // Do border extension if there is motion or the
+ // width/height is not a multiple of 8 pixels.
+ if (is_scaled || scaled_mv.col || scaled_mv.row ||
+ (frame_width & 0x7) || (frame_height & 0x7)) {
+ // Get reference block bottom right horizontal coordinate.
+ int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1;
+ int x_pad = 0, y_pad = 0;
+
+ if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) {
+ x0 -= VP9_INTERP_EXTEND - 1;
+ x1 += VP9_INTERP_EXTEND;
+ x_pad = 1;
+ }
+
+ if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) {
+ y0 -= VP9_INTERP_EXTEND - 1;
+ y1 += VP9_INTERP_EXTEND;
+ y_pad = 1;
+ }
+
+ // Wait until reference block is ready. Pad 7 more pixels as last 7
+ // pixels of each superblock row can be changed by next superblock row.
+ if (pbi->frame_parallel_decode)
+ vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
+ (y1 + 7) << (plane == 0 ? 0 : 1));
+
+ // Skip border extension if block is inside the frame.
+ if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
+ y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
+ uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0;
+ // Extend the border.
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ high_build_mc_border(buf_ptr1,
+ pre_buf->stride,
+ xd->mc_buf_high,
+ x1 - x0 + 1,
+ x0,
+ y0,
+ x1 - x0 + 1,
+ y1 - y0 + 1,
+ frame_width,
+ frame_height);
+ buf_stride = x1 - x0 + 1;
+ buf_ptr = CONVERT_TO_BYTEPTR(xd->mc_buf_high) +
+ y_pad * 3 * buf_stride + x_pad * 3;
+ } else {
+ build_mc_border(buf_ptr1,
+ pre_buf->stride,
+ xd->mc_buf,
+ x1 - x0 + 1,
+ x0,
+ y0,
+ x1 - x0 + 1,
+ y1 - y0 + 1,
+ frame_width,
+ frame_height);
+ buf_stride = x1 - x0 + 1;
+ buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
+ }
+#else
+ build_mc_border(buf_ptr1,
+ pre_buf->stride,
+ xd->mc_buf,
+ x1 - x0 + 1,
+ x0,
+ y0,
+ x1 - x0 + 1,
+ y1 - y0 + 1,
+ frame_width,
+ frame_height);
+ buf_stride = x1 - x0 + 1;
+ buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ } else {
+ // Wait until reference block is ready. Pad 7 more pixels as last 7
+ // pixels of each superblock row can be changed by next superblock row.
+ if (pbi->frame_parallel_decode)
+ vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
+ (y1 + 7) << (plane == 0 ? 0 : 1));
+ }
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
+ subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
+ } else {
+ inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
+ subpel_y, sf, w, h, ref, kernel, xs, ys);
+ }
+#else
+ inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
+ subpel_y, sf, w, h, ref, kernel, xs, ys);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+}
+
+void vp9_dec_build_inter_predictors_sb(VP9Decoder *const pbi, MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
+ int plane;
+ const int mi_x = mi_col * MI_SIZE;
+ const int mi_y = mi_row * MI_SIZE;
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
+ &xd->plane[plane]);
+ const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
+ const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
+ const int bw = 4 * num_4x4_w;
+ const int bh = 4 * num_4x4_h;
+
+ if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) {
+ int i = 0, x, y;
+ assert(bsize == BLOCK_8X8);
+ for (y = 0; y < num_4x4_h; ++y)
+ for (x = 0; x < num_4x4_w; ++x)
+ dec_build_inter_predictors(pbi, xd, plane, i++, bw, bh,
+ 4 * x, 4 * y, 4, 4, mi_x, mi_y);
+ } else {
+ dec_build_inter_predictors(pbi, xd, plane, 0, bw, bh,
+ 0, 0, bw, bh, mi_x, mi_y);
+ }
+ }
+}
int *width, int *height);
BITSTREAM_PROFILE vp9_read_profile(struct vp9_read_bit_buffer *rb);
+void vp9_dec_build_inter_predictors_sb(struct VP9Decoder *const pbi,
+ MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize);
#ifdef __cplusplus
} // extern "C"
#endif
for (y = 0; y < ymis; y++)
for (x = 0; x < xmis; x++)
- cm->last_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
+ cm->current_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
+}
+
+static void copy_segment_id(const VP9_COMMON *cm,
+ const uint8_t *last_segment_ids,
+ uint8_t *current_segment_ids,
+ BLOCK_SIZE bsize, int mi_row, int mi_col) {
+ const int mi_offset = mi_row * cm->mi_cols + mi_col;
+ const int bw = num_8x8_blocks_wide_lookup[bsize];
+ const int bh = num_8x8_blocks_high_lookup[bsize];
+ const int xmis = MIN(cm->mi_cols - mi_col, bw);
+ const int ymis = MIN(cm->mi_rows - mi_row, bh);
+ int x, y;
+
+ for (y = 0; y < ymis; y++)
+ for (x = 0; x < xmis; x++)
+ current_segment_ids[mi_offset + y * cm->mi_cols + x] = last_segment_ids ?
+ last_segment_ids[mi_offset + y * cm->mi_cols + x] : 0;
}
static int read_intra_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
if (!seg->enabled)
return 0; // Default for disabled segmentation
- if (!seg->update_map)
+ if (!seg->update_map) {
+ copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
+ bsize, mi_row, mi_col);
return 0;
+ }
segment_id = read_segment_id(r, seg);
set_segment_id(cm, bsize, mi_row, mi_col, segment_id);
if (!seg->enabled)
return 0; // Default for disabled segmentation
- predicted_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
- bsize, mi_row, mi_col);
- if (!seg->update_map)
+ predicted_segment_id = cm->last_frame_seg_map ?
+ vp9_get_segment_id(cm, cm->last_frame_seg_map, bsize, mi_row, mi_col) : 0;
+
+ if (!seg->update_map) {
+ copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
+ bsize, mi_row, mi_col);
return predicted_segment_id;
+ }
if (seg->temporal_update) {
const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
}
}
-static void read_inter_block_mode_info(VP9_COMMON *const cm,
+static void fpm_sync(void *const data, int mi_row) {
+ VP9Decoder *const pbi = (VP9Decoder *)data;
+ vp9_frameworker_wait(pbi->frame_worker_owner, pbi->prev_buf,
+ mi_row << MI_BLOCK_SIZE_LOG2);
+}
+
+static void read_inter_block_mode_info(VP9Decoder *const pbi,
MACROBLOCKD *const xd,
const TileInfo *const tile,
MODE_INFO *const mi,
int mi_row, int mi_col, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
const int allow_hp = cm->allow_high_precision_mv;
RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
if ((!vp9_is_valid_scale(&ref_buf->sf)))
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
"Reference frame has invalid dimensions");
vp9_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col,
&ref_buf->sf);
vp9_find_mv_refs(cm, xd, tile, mi, frame, mbmi->ref_mvs[frame],
- mi_row, mi_col);
+ mi_row, mi_col, fpm_sync, (void *)pbi);
}
inter_mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
mbmi->mode = ZEROMV;
if (bsize < BLOCK_8X8) {
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
+ vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid usage of segement feature on small blocks");
return;
}
}
}
-static void read_inter_frame_mode_info(VP9_COMMON *const cm,
+static void read_inter_frame_mode_info(VP9Decoder *const pbi,
MACROBLOCKD *const xd,
const TileInfo *const tile,
int mi_row, int mi_col, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0].src_mi;
MB_MODE_INFO *const mbmi = &mi->mbmi;
int inter_block;
mbmi->tx_size = read_tx_size(cm, xd, !mbmi->skip || !inter_block, r);
if (inter_block)
- read_inter_block_mode_info(cm, xd, tile, mi, mi_row, mi_col, r);
+ read_inter_block_mode_info(pbi, xd, tile, mi, mi_row, mi_col, r);
else
read_intra_block_mode_info(cm, mi, r);
}
-void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd,
+void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
const TileInfo *const tile,
int mi_row, int mi_col, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0].src_mi;
const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
if (frame_is_intra_only(cm))
read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r);
else
- read_inter_frame_mode_info(cm, xd, tile, mi_row, mi_col, r);
+ read_inter_frame_mode_info(pbi, xd, tile, mi_row, mi_col, r);
for (h = 0; h < y_mis; ++h) {
MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
#ifndef VP9_DECODER_VP9_DECODEMV_H_
#define VP9_DECODER_VP9_DECODEMV_H_
+#include "vp9/decoder/vp9_decoder.h"
#include "vp9/decoder/vp9_reader.h"
#ifdef __cplusplus
struct TileInfo;
-void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd,
+void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
const struct TileInfo *const tile,
int mi_row, int mi_col, vp9_reader *r);
#include "./vpx_scale_rtcd.h"
#include "vpx_mem/vpx_mem.h"
+#include "vpx_ports/vpx_once.h"
#include "vpx_ports/vpx_timer.h"
#include "vpx_scale/vpx_scale.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_systemdependent.h"
+#include "vp9/common/vp9_thread.h"
#include "vp9/decoder/vp9_decodeframe.h"
#include "vp9/decoder/vp9_decoder.h"
#include "vp9/decoder/vp9_detokenize.h"
-#include "vp9/decoder/vp9_dthread.h"
-static void initialize_dec() {
- static int init_done = 0;
+static void initialize_dec(void) {
+ static volatile int init_done = 0;
if (!init_done) {
vp9_rtcd();
cm->mip = NULL;
}
-VP9Decoder *vp9_decoder_create() {
- VP9Decoder *const pbi = vpx_memalign(32, sizeof(*pbi));
- VP9_COMMON *const cm = pbi ? &pbi->common : NULL;
+VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
+ VP9Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
+ VP9_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
if (!cm)
return NULL;
sizeof(*cm->frame_contexts)));
pbi->need_resync = 1;
- initialize_dec();
+ once(initialize_dec);
// Initialize the references to not point to any frame buffers.
vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
+ vpx_memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map));
cm->current_video_frame = 0;
pbi->ready_for_new_data = 1;
+ pbi->common.buffer_pool = pool;
+
cm->bit_depth = VPX_BITS_8;
cm->dequant_bit_depth = VPX_BITS_8;
}
void vp9_decoder_remove(VP9Decoder *pbi) {
- VP9_COMMON *const cm = &pbi->common;
int i;
vp9_get_worker_interface()->end(&pbi->lf_worker);
vp9_loop_filter_dealloc(&pbi->lf_row_sync);
}
- vp9_remove_common(cm);
vpx_free(pbi);
}
VP9_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd) {
RefBuffer *ref_buf = NULL;
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
// TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
// encoder is using the frame buffers for. This is just a stub to keep the
const int free_fb = get_free_fb(cm);
// Decrease ref_count since it will be increased again in
// ref_cnt_fb() below.
- cm->frame_bufs[free_fb].ref_count--;
+ --frame_bufs[free_fb].ref_count;
// Manage the reference counters and copy image.
- ref_cnt_fb(cm->frame_bufs, ref_fb_ptr, free_fb);
- ref_buf->buf = &cm->frame_bufs[*ref_fb_ptr].buf;
+ ref_cnt_fb(frame_bufs, ref_fb_ptr, free_fb);
+ ref_buf->buf = &frame_bufs[*ref_fb_ptr].buf;
vp8_yv12_copy_frame(sd, ref_buf->buf);
}
static void swap_frame_buffers(VP9Decoder *pbi) {
int ref_index = 0, mask;
VP9_COMMON *const cm = &pbi->common;
+ BufferPool *const pool = cm->buffer_pool;
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+ lock_buffer_pool(pool);
for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
- if (mask & 1) {
- const int old_idx = cm->ref_frame_map[ref_index];
- ref_cnt_fb(cm->frame_bufs, &cm->ref_frame_map[ref_index],
- cm->new_fb_idx);
- if (old_idx >= 0 && cm->frame_bufs[old_idx].ref_count == 0)
- cm->release_fb_cb(cm->cb_priv,
- &cm->frame_bufs[old_idx].raw_frame_buffer);
+ const int old_idx = cm->ref_frame_map[ref_index];
+ // Current thread releases the holding of reference frame.
+ decrease_ref_count(old_idx, frame_bufs, pool);
+
+ // Release the reference frame in reference map.
+ if ((mask & 1) && old_idx >= 0) {
+ decrease_ref_count(old_idx, frame_bufs, pool);
}
+ cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
++ref_index;
}
+ // Current thread releases the holding of reference frame.
+ for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) {
+ const int old_idx = cm->ref_frame_map[ref_index];
+ decrease_ref_count(old_idx, frame_bufs, pool);
+ cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
+ }
+ unlock_buffer_pool(pool);
+ pbi->hold_ref_buf = 0;
cm->frame_to_show = get_frame_new_buffer(cm);
- cm->frame_bufs[cm->new_fb_idx].ref_count--;
+
+ if (!pbi->frame_parallel_decode || !cm->show_frame) {
+ lock_buffer_pool(pool);
+ --frame_bufs[cm->new_fb_idx].ref_count;
+ unlock_buffer_pool(pool);
+ }
// Invalidate these references until the next frame starts.
for (ref_index = 0; ref_index < 3; ref_index++)
int vp9_receive_compressed_data(VP9Decoder *pbi,
size_t size, const uint8_t **psource) {
- VP9_COMMON *const cm = &pbi->common;
+ VP9_COMMON *volatile const cm = &pbi->common;
+ BufferPool *const pool = cm->buffer_pool;
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
const uint8_t *source = *psource;
int retcode = 0;
-
cm->error.error_code = VPX_CODEC_OK;
if (size == 0) {
// TODO(jkoleszar): Error concealment is undefined and non-normative
// at this point, but if it becomes so, [0] may not always be the correct
// thing to do here.
- if (cm->frame_refs[0].idx != INT_MAX)
+ if (cm->frame_refs[0].idx > 0)
cm->frame_refs[0].buf->corrupted = 1;
}
pbi->ready_for_new_data = 0;
// Check if the previous frame was a frame without any references to it.
- if (cm->new_fb_idx >= 0 && cm->frame_bufs[cm->new_fb_idx].ref_count == 0)
- cm->release_fb_cb(cm->cb_priv,
- &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer);
+ // Release frame buffer if not decoding in frame parallel mode.
+ if (!pbi->frame_parallel_decode && cm->new_fb_idx >= 0
+ && frame_bufs[cm->new_fb_idx].ref_count == 0)
+ pool->release_fb_cb(pool->cb_priv,
+ &frame_bufs[cm->new_fb_idx].raw_frame_buffer);
cm->new_fb_idx = get_free_fb(cm);
// Assign a MV array to the frame buffer.
- cm->cur_frame = &cm->frame_bufs[cm->new_fb_idx];
+ cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
+
+ pbi->hold_ref_buf = 0;
+ if (pbi->frame_parallel_decode) {
+ VP9Worker *const worker = pbi->frame_worker_owner;
+ vp9_frameworker_lock_stats(worker);
+ frame_bufs[cm->new_fb_idx].frame_worker_owner = worker;
+ // Reset decoding progress.
+ pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
+ pbi->cur_buf->row = -1;
+ pbi->cur_buf->col = -1;
+ vp9_frameworker_unlock_stats(worker);
+ } else {
+ pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
+ }
+
if (setjmp(cm->error.jmp)) {
const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
+ VP9_COMMON *const cm = &pbi->common;
int i;
- pbi->need_resync = 1;
cm->error.setjmp = 0;
+ pbi->ready_for_new_data = 1;
// Synchronize all threads immediately as a subsequent decode call may
// cause a resize invalidating some allocations.
winterface->sync(&pbi->tile_workers[i]);
}
- vp9_clear_system_state();
+ lock_buffer_pool(pool);
+ // Release all the reference buffers if worker thread is holding them.
+ if (pbi->hold_ref_buf == 1) {
+ int ref_index = 0, mask;
+ BufferPool *const pool = cm->buffer_pool;
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+ for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
+ const int old_idx = cm->ref_frame_map[ref_index];
+ // Current thread releases the holding of reference frame.
+ decrease_ref_count(old_idx, frame_bufs, pool);
+
+ // Release the reference frame in reference map.
+ if ((mask & 1) && old_idx >= 0) {
+ decrease_ref_count(old_idx, frame_bufs, pool);
+ }
+ ++ref_index;
+ }
- if (cm->new_fb_idx > 0 && cm->frame_bufs[cm->new_fb_idx].ref_count > 0)
- cm->frame_bufs[cm->new_fb_idx].ref_count--;
+ // Current thread releases the holding of reference frame.
+ for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) {
+ const int old_idx = cm->ref_frame_map[ref_index];
+ decrease_ref_count(old_idx, frame_bufs, pool);
+ }
+ pbi->hold_ref_buf = 0;
+ }
+ // Release current frame.
+ decrease_ref_count(cm->new_fb_idx, frame_bufs, pool);
+ unlock_buffer_pool(pool);
+ vp9_clear_system_state();
return -1;
}
cm->error.setjmp = 1;
-
vp9_decode_frame(pbi, source, source + size, psource);
swap_frame_buffers(pbi);
vp9_clear_system_state();
- cm->last_width = cm->width;
- cm->last_height = cm->height;
-
if (!cm->show_existing_frame) {
cm->last_show_frame = cm->show_frame;
cm->prev_frame = cm->cur_frame;
+ if (cm->seg.enabled && !pbi->frame_parallel_decode)
+ vp9_swap_current_and_last_seg_map(cm);
}
- if (cm->show_frame)
- cm->current_video_frame++;
+ // Update progress in frame parallel decode.
+ if (pbi->frame_parallel_decode) {
+ // Need to lock the mutex here as another thread may
+ // be accessing this buffer.
+ VP9Worker *const worker = pbi->frame_worker_owner;
+ FrameWorkerData *const frame_worker_data = worker->data1;
+ vp9_frameworker_lock_stats(worker);
+
+ if (cm->show_frame) {
+ cm->current_video_frame++;
+ }
+ frame_worker_data->frame_decoded = 1;
+ frame_worker_data->frame_context_ready = 1;
+ vp9_frameworker_signal_stats(worker);
+ vp9_frameworker_unlock_stats(worker);
+ } else {
+ cm->last_width = cm->width;
+ cm->last_height = cm->height;
+ if (cm->show_frame) {
+ cm->current_video_frame++;
+ }
+ }
cm->error.setjmp = 0;
return retcode;
if (!cm->show_frame)
return ret;
+ pbi->ready_for_new_data = 1;
+
#if CONFIG_VP9_POSTPROC
if (!cm->show_existing_frame) {
ret = vp9_post_proc_frame(cm, sd, flags);
#include "vpx/vpx_codec.h"
#include "vpx_scale/yv12config.h"
-
+#include "vp9/common/vp9_loopfilter_thread.h"
#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/common/vp9_ppflags.h"
#include "vp9/common/vp9_thread.h"
-
#include "vp9/decoder/vp9_dthread.h"
+#include "vp9/decoder/vp9_reader.h"
#ifdef __cplusplus
extern "C" {
DECLARE_ALIGNED(16, MACROBLOCKD, xd);
} TileData;
+typedef struct TileWorkerData {
+ struct VP9Decoder *pbi;
+ vp9_reader bit_reader;
+ DECLARE_ALIGNED(16, MACROBLOCKD, xd);
+ struct vpx_internal_error_info error_info;
+} TileWorkerData;
+
typedef struct VP9Decoder {
DECLARE_ALIGNED(16, MACROBLOCKD, mb);
int frame_parallel_decode; // frame-based threading.
+ // TODO(hkuang): Combine this with cur_buf in macroblockd as they are
+ // the same.
+ RefCntBuffer *cur_buf; // Current decoding frame buffer.
+ RefCntBuffer *prev_buf; // Previous decoding frame buffer.
+
+ VP9Worker *frame_worker_owner; // frame_worker that owns this pbi.
VP9Worker lf_worker;
VP9Worker *tile_workers;
TileWorkerData *tile_worker_data;
int max_threads;
int inv_tile_order;
- int need_resync; // wait for key/intra-only frame
+ int need_resync; // wait for key/intra-only frame.
+ int hold_ref_buf; // hold the reference buffer.
} VP9Decoder;
int vp9_receive_compressed_data(struct VP9Decoder *pbi,
VP9_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
-struct VP9Decoder *vp9_decoder_create();
-
-void vp9_decoder_remove(struct VP9Decoder *pbi);
-
static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
void *decrypt_state,
const uint8_t *data) {
vpx_decrypt_cb decrypt_cb,
void *decrypt_state);
+struct VP9Decoder *vp9_decoder_create(BufferPool *const pool);
+
+void vp9_decoder_remove(struct VP9Decoder *pbi);
+
+static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs,
+ BufferPool *const pool) {
+ if (idx >= 0) {
+ --frame_bufs[idx].ref_count;
+ // A worker may only get a free framebuffer index when calling get_free_fb.
+ // But the private buffer is not set up until finish decoding header.
+ // So any error happens during decoding header, the frame_bufs will not
+ // have valid priv buffer.
+ if (frame_bufs[idx].ref_count == 0 &&
+ frame_bufs[idx].raw_frame_buffer.priv) {
+ pool->release_fb_cb(pool->cb_priv, &frame_bufs[idx].raw_frame_buffer);
+ }
+ }
+}
+
#ifdef __cplusplus
} // extern "C"
#endif
#include "vp9/common/vp9_blockd.h"
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_entropy.h"
+#if CONFIG_COEFFICIENT_RANGE_CHECKING
+#include "vp9/common/vp9_idct.h"
+#endif
#include "vp9/decoder/vp9_detokenize.h"
#define INCREMENT_COUNT(token) \
do { \
if (!cm->frame_parallel_decoding_mode) \
- ++coef_counts[band][ctx][token]; \
+ ++coef_counts[band][ctx][token]; \
} while (0)
static INLINE int read_coeff(const vp9_prob *probs, int n, vp9_reader *r) {
}
v = (val * dqv) >> dq_shift;
#if CONFIG_COEFFICIENT_RANGE_CHECKING
+#if CONFIG_VP9_HIGHBITDEPTH
+ dqcoeff[scan[c]] = highbd_check_range((vp9_read_bit(r) ? -v : v),
+ cm->bit_depth);
+#else
dqcoeff[scan[c]] = check_range(vp9_read_bit(r) ? -v : v);
+#endif // CONFIG_VP9_HIGHBITDEPTH
#else
dqcoeff[scan[c]] = vp9_read_bit(r) ? -v : v;
-#endif
+#endif // CONFIG_COEFFICIENT_RANGE_CHECKING
token_cache[scan[c]] = vp9_pt_energy_class[token];
++c;
ctx = get_coef_context(nb, token_cache, c);
*/
#include "./vpx_config.h"
-
#include "vpx_mem/vpx_mem.h"
-
#include "vp9/common/vp9_reconinter.h"
-
#include "vp9/decoder/vp9_dthread.h"
#include "vp9/decoder/vp9_decoder.h"
-#if CONFIG_MULTITHREAD
-static INLINE void mutex_lock(pthread_mutex_t *const mutex) {
- const int kMaxTryLocks = 4000;
- int locked = 0;
- int i;
+// #define DEBUG_THREAD
- for (i = 0; i < kMaxTryLocks; ++i) {
- if (!pthread_mutex_trylock(mutex)) {
- locked = 1;
- break;
- }
- }
-
- if (!locked)
- pthread_mutex_lock(mutex);
+// TODO(hkuang): Clean up all the #ifdef in this file.
+void vp9_frameworker_lock_stats(VP9Worker *const worker) {
+#if CONFIG_MULTITHREAD
+ FrameWorkerData *const worker_data = worker->data1;
+ pthread_mutex_lock(&worker_data->stats_mutex);
+#else
+ (void)worker;
+#endif
}
-#endif // CONFIG_MULTITHREAD
-static INLINE void sync_read(VP9LfSync *const lf_sync, int r, int c) {
+void vp9_frameworker_unlock_stats(VP9Worker *const worker) {
#if CONFIG_MULTITHREAD
- const int nsync = lf_sync->sync_range;
-
- if (r && !(c & (nsync - 1))) {
- pthread_mutex_t *const mutex = &lf_sync->mutex_[r - 1];
- mutex_lock(mutex);
-
- while (c > lf_sync->cur_sb_col[r - 1] - nsync) {
- pthread_cond_wait(&lf_sync->cond_[r - 1], mutex);
- }
- pthread_mutex_unlock(mutex);
- }
+ FrameWorkerData *const worker_data = worker->data1;
+ pthread_mutex_unlock(&worker_data->stats_mutex);
#else
- (void)lf_sync;
- (void)r;
- (void)c;
-#endif // CONFIG_MULTITHREAD
+ (void)worker;
+#endif
}
-static INLINE void sync_write(VP9LfSync *const lf_sync, int r, int c,
- const int sb_cols) {
+void vp9_frameworker_signal_stats(VP9Worker *const worker) {
#if CONFIG_MULTITHREAD
- const int nsync = lf_sync->sync_range;
- int cur;
- // Only signal when there are enough filtered SB for next row to run.
- int sig = 1;
-
- if (c < sb_cols - 1) {
- cur = c;
- if (c % nsync)
- sig = 0;
- } else {
- cur = sb_cols + nsync;
- }
-
- if (sig) {
- mutex_lock(&lf_sync->mutex_[r]);
-
- lf_sync->cur_sb_col[r] = cur;
-
- pthread_cond_signal(&lf_sync->cond_[r]);
- pthread_mutex_unlock(&lf_sync->mutex_[r]);
- }
+ FrameWorkerData *const worker_data = worker->data1;
+ // TODO(hkuang): Investigate using broadcast or signal.
+ pthread_cond_signal(&worker_data->stats_cond);
#else
- (void)lf_sync;
- (void)r;
- (void)c;
- (void)sb_cols;
-#endif // CONFIG_MULTITHREAD
+ (void)worker;
+#endif
}
-// Implement row loopfiltering for each thread.
-static void loop_filter_rows_mt(const YV12_BUFFER_CONFIG *const frame_buffer,
- VP9_COMMON *const cm,
- struct macroblockd_plane planes[MAX_MB_PLANE],
- int start, int stop, int y_only,
- VP9LfSync *const lf_sync) {
- const int num_planes = y_only ? 1 : MAX_MB_PLANE;
- int r, c; // SB row and col
- const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2;
-
- for (r = start; r < stop; r += lf_sync->num_workers) {
- const int mi_row = r << MI_BLOCK_SIZE_LOG2;
- MODE_INFO *const mi = cm->mi + mi_row * cm->mi_stride;
-
- for (c = 0; c < sb_cols; ++c) {
- const int mi_col = c << MI_BLOCK_SIZE_LOG2;
- LOOP_FILTER_MASK lfm;
- int plane;
+// TODO(hkuang): Remove worker parameter as it is only used in debug code.
+void vp9_frameworker_wait(VP9Worker *const worker, RefCntBuffer *const ref_buf,
+ int row) {
+#if CONFIG_MULTITHREAD
+ if (!ref_buf)
+ return;
- sync_read(lf_sync, r, c);
+ // Enabling the following line of code will get harmless tsan error but
+ // will get best performance.
+ // if (ref_buf->row >= row && ref_buf->buf.corrupted != 1) return;
- vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
- vp9_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
+ {
+ // Find the worker thread that owns the reference frame. If the reference
+ // frame has been fully decoded, it may not have owner.
+ VP9Worker *const ref_worker = ref_buf->frame_worker_owner;
+ FrameWorkerData *const ref_worker_data =
+ (FrameWorkerData *)ref_worker->data1;
+ const VP9Decoder *const pbi = ref_worker_data->pbi;
+
+#ifdef DEBUG_THREAD
+ {
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ printf("%d %p worker is waiting for %d %p worker (%d) ref %d \r\n",
+ worker_data->worker_id, worker, ref_worker_data->worker_id,
+ ref_buf->frame_worker_owner, row, ref_buf->row);
+ }
+#endif
- for (plane = 0; plane < num_planes; ++plane) {
- vp9_filter_block_plane(cm, &planes[plane], mi_row, &lfm);
- }
+ vp9_frameworker_lock_stats(ref_worker);
+ while (ref_buf->row < row && pbi->cur_buf == ref_buf &&
+ ref_buf->buf.corrupted != 1) {
+ pthread_cond_wait(&ref_worker_data->stats_cond,
+ &ref_worker_data->stats_mutex);
+ }
- sync_write(lf_sync, r, c, sb_cols);
+ if (ref_buf->buf.corrupted == 1) {
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ vp9_frameworker_unlock_stats(ref_worker);
+ vpx_internal_error(&worker_data->pbi->common.error,
+ VPX_CODEC_CORRUPT_FRAME,
+ "Worker %p failed to decode frame", worker);
}
+ vp9_frameworker_unlock_stats(ref_worker);
}
+#else
+ (void)worker;
+ (void)ref_buf;
+ (void)row;
+ (void)ref_buf;
+#endif // CONFIG_MULTITHREAD
}
-// Row-based multi-threaded loopfilter hook
-static int loop_filter_row_worker(VP9LfSync *const lf_sync,
- LFWorkerData *const lf_data) {
- loop_filter_rows_mt(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
- lf_data->start, lf_data->stop, lf_data->y_only, lf_sync);
- return 1;
-}
-
-// VP9 decoder: Implement multi-threaded loopfilter that uses the tile
-// threads.
-void vp9_loop_filter_frame_mt(VP9LfSync *lf_sync,
- YV12_BUFFER_CONFIG *frame,
- struct macroblockd_plane planes[MAX_MB_PLANE],
- VP9_COMMON *cm,
- VP9Worker *workers, int nworkers,
- int frame_filter_level,
- int y_only) {
- const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
- // Number of superblock rows and cols
- const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
- const int tile_cols = 1 << cm->log2_tile_cols;
- const int num_workers = MIN(nworkers, tile_cols);
- int i;
-
- if (!frame_filter_level) return;
-
- if (!lf_sync->sync_range || cm->last_height != cm->height ||
- num_workers > lf_sync->num_workers) {
- vp9_loop_filter_dealloc(lf_sync);
- vp9_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
- }
-
- vp9_loop_filter_frame_init(cm, frame_filter_level);
-
- // Initialize cur_sb_col to -1 for all SB rows.
- vpx_memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows);
-
- // Set up loopfilter thread data.
- // The decoder is capping num_workers because it has been observed that using
- // more threads on the loopfilter than there are cores will hurt performance
- // on Android. This is because the system will only schedule the tile decode
- // workers on cores equal to the number of tile columns. Then if the decoder
- // tries to use more threads for the loopfilter, it will hurt performance
- // because of contention. If the multithreading code changes in the future
- // then the number of workers used by the loopfilter should be revisited.
- for (i = 0; i < num_workers; ++i) {
- VP9Worker *const worker = &workers[i];
- LFWorkerData *const lf_data = &lf_sync->lfdata[i];
-
- worker->hook = (VP9WorkerHook)loop_filter_row_worker;
- worker->data1 = lf_sync;
- worker->data2 = lf_data;
-
- // Loopfilter data
- vp9_loop_filter_data_reset(lf_data, frame, cm, planes);
- lf_data->start = i;
- lf_data->stop = sb_rows;
- lf_data->y_only = y_only;
-
- // Start loopfiltering
- if (i == num_workers - 1) {
- winterface->execute(worker);
- } else {
- winterface->launch(worker);
- }
- }
+void vp9_frameworker_broadcast(RefCntBuffer *const buf, int row) {
+#if CONFIG_MULTITHREAD
+ VP9Worker *worker = buf->frame_worker_owner;
- // Wait till all rows are finished
- for (i = 0; i < num_workers; ++i) {
- winterface->sync(&workers[i]);
+#ifdef DEBUG_THREAD
+ {
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ printf("%d %p worker decode to (%d) \r\n", worker_data->worker_id,
+ buf->frame_worker_owner, row);
}
-}
+#endif
-// Set up nsync by width.
-static int get_sync_range(int width) {
- // nsync numbers are picked by testing. For example, for 4k
- // video, using 4 gives best performance.
- if (width < 640)
- return 1;
- else if (width <= 1280)
- return 2;
- else if (width <= 4096)
- return 4;
- else
- return 8;
+ vp9_frameworker_lock_stats(worker);
+ buf->row = row;
+ vp9_frameworker_signal_stats(worker);
+ vp9_frameworker_unlock_stats(worker);
+#else
+ (void)buf;
+ (void)row;
+#endif // CONFIG_MULTITHREAD
}
-// Allocate memory for lf row synchronization
-void vp9_loop_filter_alloc(VP9LfSync *lf_sync, VP9_COMMON *cm, int rows,
- int width, int num_workers) {
- lf_sync->rows = rows;
+void vp9_frameworker_copy_context(VP9Worker *const dst_worker,
+ VP9Worker *const src_worker) {
#if CONFIG_MULTITHREAD
- {
- int i;
-
- CHECK_MEM_ERROR(cm, lf_sync->mutex_,
- vpx_malloc(sizeof(*lf_sync->mutex_) * rows));
- if (lf_sync->mutex_) {
- for (i = 0; i < rows; ++i) {
- pthread_mutex_init(&lf_sync->mutex_[i], NULL);
- }
- }
+ FrameWorkerData *const src_worker_data = (FrameWorkerData *)src_worker->data1;
+ FrameWorkerData *const dst_worker_data = (FrameWorkerData *)dst_worker->data1;
+ VP9_COMMON *const src_cm = &src_worker_data->pbi->common;
+ VP9_COMMON *const dst_cm = &dst_worker_data->pbi->common;
+ int i;
- CHECK_MEM_ERROR(cm, lf_sync->cond_,
- vpx_malloc(sizeof(*lf_sync->cond_) * rows));
- if (lf_sync->cond_) {
- for (i = 0; i < rows; ++i) {
- pthread_cond_init(&lf_sync->cond_[i], NULL);
- }
- }
+ // Wait until source frame's context is ready.
+ vp9_frameworker_lock_stats(src_worker);
+ while (!src_worker_data->frame_context_ready) {
+ pthread_cond_wait(&src_worker_data->stats_cond,
+ &src_worker_data->stats_mutex);
}
-#endif // CONFIG_MULTITHREAD
-
- CHECK_MEM_ERROR(cm, lf_sync->lfdata,
- vpx_malloc(num_workers * sizeof(*lf_sync->lfdata)));
- lf_sync->num_workers = num_workers;
- CHECK_MEM_ERROR(cm, lf_sync->cur_sb_col,
- vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows));
-
- // Set up nsync.
- lf_sync->sync_range = get_sync_range(width);
-}
-
-// Deallocate lf synchronization related mutex and data
-void vp9_loop_filter_dealloc(VP9LfSync *lf_sync) {
- if (lf_sync != NULL) {
-#if CONFIG_MULTITHREAD
- int i;
+ // src worker may have already finished decoding a frame and swapped the mi.
+ // TODO(hkuang): Remove following code after implenment no ModeInfo decoding.
+ if (src_worker_data->frame_decoded) {
+ dst_cm->prev_mip = src_cm->prev_mip;
+ dst_cm->prev_mi = src_cm->prev_mi;
+ } else {
+ dst_cm->prev_mip = src_cm->mip;
+ dst_cm->prev_mi = src_cm->mi;
+ }
- if (lf_sync->mutex_ != NULL) {
- for (i = 0; i < lf_sync->rows; ++i) {
- pthread_mutex_destroy(&lf_sync->mutex_[i]);
- }
- vpx_free(lf_sync->mutex_);
- }
- if (lf_sync->cond_ != NULL) {
- for (i = 0; i < lf_sync->rows; ++i) {
- pthread_cond_destroy(&lf_sync->cond_[i]);
- }
- vpx_free(lf_sync->cond_);
- }
+ dst_cm->last_frame_seg_map = src_cm->seg.enabled ?
+ src_cm->current_frame_seg_map : src_cm->last_frame_seg_map;
+ dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync;
+ vp9_frameworker_unlock_stats(src_worker);
+
+ dst_worker_data->pbi->prev_buf =
+ src_worker_data->pbi->common.show_existing_frame ?
+ NULL : src_worker_data->pbi->cur_buf;
+
+ dst_cm->prev_frame = src_cm->show_existing_frame ?
+ src_cm->prev_frame : src_cm->cur_frame;
+ dst_cm->last_width = !src_cm->show_existing_frame ?
+ src_cm->width : src_cm->last_width;
+ dst_cm->last_height = !src_cm->show_existing_frame ?
+ src_cm->height : src_cm->last_height;
+ dst_cm->display_width = src_cm->display_width;
+ dst_cm->display_height = src_cm->display_height;
+ dst_cm->subsampling_x = src_cm->subsampling_x;
+ dst_cm->subsampling_y = src_cm->subsampling_y;
+ dst_cm->last_show_frame = !src_cm->show_existing_frame ?
+ src_cm->show_frame : src_cm->last_show_frame;
+ dst_cm->last_frame_type = src_cm->last_frame_type;
+ dst_cm->frame_type = src_cm->frame_type;
+ dst_cm->y_dc_delta_q = src_cm->y_dc_delta_q;
+ dst_cm->uv_dc_delta_q = src_cm->uv_dc_delta_q;
+ dst_cm->uv_ac_delta_q = src_cm->uv_ac_delta_q;
+ dst_cm->base_qindex = src_cm->base_qindex;
+
+ for (i = 0; i < REF_FRAMES; ++i)
+ dst_cm->ref_frame_map[i] = src_cm->next_ref_frame_map[i];
+
+ memcpy(dst_cm->lf_info.lfthr, src_cm->lf_info.lfthr,
+ (MAX_LOOP_FILTER + 1) * sizeof(loop_filter_thresh));
+ dst_cm->lf.last_sharpness_level = src_cm->lf.sharpness_level;
+ dst_cm->lf.filter_level = src_cm->lf.filter_level;
+ memcpy(dst_cm->lf.ref_deltas, src_cm->lf.ref_deltas, MAX_REF_LF_DELTAS);
+ memcpy(dst_cm->lf.mode_deltas, src_cm->lf.mode_deltas, MAX_MODE_LF_DELTAS);
+ dst_cm->seg = src_cm->seg;
+ memcpy(dst_cm->frame_contexts, src_cm->frame_contexts,
+ FRAME_CONTEXTS * sizeof(dst_cm->frame_contexts[0]));
+#else
+ (void) dst_worker;
+ (void) src_worker;
#endif // CONFIG_MULTITHREAD
- vpx_free(lf_sync->lfdata);
- vpx_free(lf_sync->cur_sb_col);
- // clear the structure as the source of this call may be a resize in which
- // case this call will be followed by an _alloc() which may fail.
- vp9_zero(*lf_sync);
- }
}
#include "./vpx_config.h"
#include "vp9/common/vp9_thread.h"
-#include "vp9/decoder/vp9_reader.h"
+#include "vpx/internal/vpx_codec_internal.h"
struct VP9Common;
struct VP9Decoder;
-typedef struct TileWorkerData {
- struct VP9Common *cm;
- vp9_reader bit_reader;
- DECLARE_ALIGNED(16, struct macroblockd, xd);
-} TileWorkerData;
+// WorkerData for the FrameWorker thread. It contains all the information of
+// the worker and decode structures for decoding a frame.
+typedef struct FrameWorkerData {
+ struct VP9Decoder *pbi;
+ const uint8_t *data;
+ const uint8_t *data_end;
+ size_t data_size;
+ void *user_priv;
+ int result;
+ int worker_id;
+
+ // scratch_buffer is used in frame parallel mode only.
+ // It is used to make a copy of the compressed data.
+ uint8_t *scratch_buffer;
+ size_t scratch_buffer_size;
-// Loopfilter row synchronization
-typedef struct VP9LfSyncData {
#if CONFIG_MULTITHREAD
- pthread_mutex_t *mutex_;
- pthread_cond_t *cond_;
+ pthread_mutex_t stats_mutex;
+ pthread_cond_t stats_cond;
#endif
- // Allocate memory to store the loop-filtered superblock index in each row.
- int *cur_sb_col;
- // The optimal sync_range for different resolution and platform should be
- // determined by testing. Currently, it is chosen to be a power-of-2 number.
- int sync_range;
- int rows;
-
- // Row-based parallel loopfilter data
- LFWorkerData *lfdata;
- int num_workers;
-} VP9LfSync;
-
-// Allocate memory for loopfilter row synchronization.
-void vp9_loop_filter_alloc(VP9LfSync *lf_sync, VP9_COMMON *cm, int rows,
- int width, int num_workers);
-
-// Deallocate loopfilter synchronization related mutex and data.
-void vp9_loop_filter_dealloc(VP9LfSync *lf_sync);
-
-// Multi-threaded loopfilter that uses the tile threads.
-void vp9_loop_filter_frame_mt(VP9LfSync *lf_sync,
- YV12_BUFFER_CONFIG *frame,
- struct macroblockd_plane planes[MAX_MB_PLANE],
- struct VP9Common *cm,
- VP9Worker *workers, int num_workers,
- int frame_filter_level,
- int y_only);
+
+ int frame_context_ready; // Current frame's context is ready to read.
+ int frame_decoded; // Finished decoding current frame.
+} FrameWorkerData;
+
+void vp9_frameworker_lock_stats(VP9Worker *const worker);
+void vp9_frameworker_unlock_stats(VP9Worker *const worker);
+void vp9_frameworker_signal_stats(VP9Worker *const worker);
+
+// Wait until ref_buf has been decoded to row in real pixel unit.
+// Note: worker may already finish decoding ref_buf and release it in order to
+// start decoding next frame. So need to check whether worker is still decoding
+// ref_buf.
+void vp9_frameworker_wait(VP9Worker *const worker, RefCntBuffer *const ref_buf,
+ int row);
+
+// FrameWorker broadcasts its decoding progress so other workers that are
+// waiting on it can resume decoding.
+void vp9_frameworker_broadcast(RefCntBuffer *const buf, int row);
+
+// Copy necessary decoding context from src worker to dst worker.
+void vp9_frameworker_copy_context(VP9Worker *const dst_worker,
+ VP9Worker *const src_worker);
#endif // VP9_DECODER_VP9_DTHREAD_H_
--- /dev/null
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include "./vp9_rtcd.h"
+#include "./vpx_config.h"
+
+#include "vpx/vpx_integer.h"
+
+static INLINE unsigned int horizontal_add_u16x8(const uint16x8_t v_16x8) {
+ const uint32x4_t a = vpaddlq_u16(v_16x8);
+ const uint64x2_t b = vpaddlq_u32(a);
+ const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
+ vreinterpret_u32_u64(vget_high_u64(b)));
+ return vget_lane_u32(c, 0);
+}
+
+unsigned int vp9_avg_8x8_neon(const uint8_t *s, int p) {
+ uint8x8_t v_s0 = vld1_u8(s);
+ const uint8x8_t v_s1 = vld1_u8(s + p);
+ uint16x8_t v_sum = vaddl_u8(v_s0, v_s1);
+
+ v_s0 = vld1_u8(s + 2 * p);
+ v_sum = vaddw_u8(v_sum, v_s0);
+
+ v_s0 = vld1_u8(s + 3 * p);
+ v_sum = vaddw_u8(v_sum, v_s0);
+
+ v_s0 = vld1_u8(s + 4 * p);
+ v_sum = vaddw_u8(v_sum, v_s0);
+
+ v_s0 = vld1_u8(s + 5 * p);
+ v_sum = vaddw_u8(v_sum, v_s0);
+
+ v_s0 = vld1_u8(s + 6 * p);
+ v_sum = vaddw_u8(v_sum, v_s0);
+
+ v_s0 = vld1_u8(s + 7 * p);
+ v_sum = vaddw_u8(v_sum, v_s0);
+
+ return (horizontal_add_u16x8(v_sum) + 32) >> 6;
+}
}
}
+void vp9_fdct8x8_quant_neon(const int16_t *input, int stride,
+ int16_t* coeff_ptr, intptr_t n_coeffs,
+ int skip_block, const int16_t* zbin_ptr,
+ const int16_t* round_ptr, const int16_t* quant_ptr,
+ const int16_t* quant_shift_ptr,
+ int16_t* qcoeff_ptr, int16_t* dqcoeff_ptr,
+ const int16_t* dequant_ptr, uint16_t* eob_ptr,
+ const int16_t* scan_ptr,
+ const int16_t* iscan_ptr) {
+ int16_t temp_buffer[64];
+ (void)coeff_ptr;
+
+ vp9_fdct8x8_neon(input, temp_buffer, stride);
+ vp9_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
+ quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
+ dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
+}
+
void vp9_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) {
int i;
// stage 1
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
- int zbin_oq_value, uint16_t *eob_ptr,
+ uint16_t *eob_ptr,
const int16_t *scan, const int16_t *iscan) {
// TODO(jingning) Decide the need of these arguments after the
// quantization process is completed.
(void)zbin_ptr;
(void)quant_shift_ptr;
- (void)zbin_oq_value;
(void)scan;
if (!skip_block) {
--- /dev/null
+/*
+ * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include "./vp9_rtcd.h"
+#include "./vpx_config.h"
+
+#include "vpx/vpx_integer.h"
+
+static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo,
+ const uint16x8_t vec_hi) {
+ const uint32x4_t vec_l_lo = vaddl_u16(vget_low_u16(vec_lo),
+ vget_high_u16(vec_lo));
+ const uint32x4_t vec_l_hi = vaddl_u16(vget_low_u16(vec_hi),
+ vget_high_u16(vec_hi));
+ const uint32x4_t a = vaddq_u32(vec_l_lo, vec_l_hi);
+ const uint64x2_t b = vpaddlq_u32(a);
+ const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
+ vreinterpret_u32_u64(vget_high_u64(b)));
+ return vget_lane_u32(c, 0);
+}
+
+// Calculate the absolute difference of 64 bytes from vec_src_00, vec_src_16,
+// vec_src_32, vec_src_48 and ref. Accumulate partial sums in vec_sum_ref_lo
+// and vec_sum_ref_hi.
+static void sad_neon_64(const uint8x16_t vec_src_00,
+ const uint8x16_t vec_src_16,
+ const uint8x16_t vec_src_32,
+ const uint8x16_t vec_src_48,
+ const uint8_t *ref,
+ uint16x8_t *vec_sum_ref_lo,
+ uint16x8_t *vec_sum_ref_hi) {
+ const uint8x16_t vec_ref_00 = vld1q_u8(ref);
+ const uint8x16_t vec_ref_16 = vld1q_u8(ref + 16);
+ const uint8x16_t vec_ref_32 = vld1q_u8(ref + 32);
+ const uint8x16_t vec_ref_48 = vld1q_u8(ref + 48);
+
+ *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_00),
+ vget_low_u8(vec_ref_00));
+ *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_00),
+ vget_high_u8(vec_ref_00));
+ *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_16),
+ vget_low_u8(vec_ref_16));
+ *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_16),
+ vget_high_u8(vec_ref_16));
+ *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_32),
+ vget_low_u8(vec_ref_32));
+ *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_32),
+ vget_high_u8(vec_ref_32));
+ *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_48),
+ vget_low_u8(vec_ref_48));
+ *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_48),
+ vget_high_u8(vec_ref_48));
+}
+
+// Calculate the absolute difference of 32 bytes from vec_src_00, vec_src_16,
+// and ref. Accumulate partial sums in vec_sum_ref_lo and vec_sum_ref_hi.
+static void sad_neon_32(const uint8x16_t vec_src_00,
+ const uint8x16_t vec_src_16,
+ const uint8_t *ref,
+ uint16x8_t *vec_sum_ref_lo,
+ uint16x8_t *vec_sum_ref_hi) {
+ const uint8x16_t vec_ref_00 = vld1q_u8(ref);
+ const uint8x16_t vec_ref_16 = vld1q_u8(ref + 16);
+
+ *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_00),
+ vget_low_u8(vec_ref_00));
+ *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_00),
+ vget_high_u8(vec_ref_00));
+ *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_16),
+ vget_low_u8(vec_ref_16));
+ *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_16),
+ vget_high_u8(vec_ref_16));
+}
+
+void vp9_sad64x64x4d_neon(const uint8_t *src, int src_stride,
+ const uint8_t* const ref[4], int ref_stride,
+ unsigned int *res) {
+ int i;
+ uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref0_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_hi = vdupq_n_u16(0);
+ const uint8_t *ref0, *ref1, *ref2, *ref3;
+ ref0 = ref[0];
+ ref1 = ref[1];
+ ref2 = ref[2];
+ ref3 = ref[3];
+
+ for (i = 0; i < 64; ++i) {
+ const uint8x16_t vec_src_00 = vld1q_u8(src);
+ const uint8x16_t vec_src_16 = vld1q_u8(src + 16);
+ const uint8x16_t vec_src_32 = vld1q_u8(src + 32);
+ const uint8x16_t vec_src_48 = vld1q_u8(src + 48);
+
+ sad_neon_64(vec_src_00, vec_src_16, vec_src_32, vec_src_48, ref0,
+ &vec_sum_ref0_lo, &vec_sum_ref0_hi);
+ sad_neon_64(vec_src_00, vec_src_16, vec_src_32, vec_src_48, ref1,
+ &vec_sum_ref1_lo, &vec_sum_ref1_hi);
+ sad_neon_64(vec_src_00, vec_src_16, vec_src_32, vec_src_48, ref2,
+ &vec_sum_ref2_lo, &vec_sum_ref2_hi);
+ sad_neon_64(vec_src_00, vec_src_16, vec_src_32, vec_src_48, ref3,
+ &vec_sum_ref3_lo, &vec_sum_ref3_hi);
+
+ src += src_stride;
+ ref0 += ref_stride;
+ ref1 += ref_stride;
+ ref2 += ref_stride;
+ ref3 += ref_stride;
+ }
+
+ res[0] = horizontal_long_add_16x8(vec_sum_ref0_lo, vec_sum_ref0_hi);
+ res[1] = horizontal_long_add_16x8(vec_sum_ref1_lo, vec_sum_ref1_hi);
+ res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi);
+ res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
+}
+
+void vp9_sad32x32x4d_neon(const uint8_t *src, int src_stride,
+ const uint8_t* const ref[4], int ref_stride,
+ unsigned int *res) {
+ int i;
+ uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref0_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_hi = vdupq_n_u16(0);
+ const uint8_t *ref0, *ref1, *ref2, *ref3;
+ ref0 = ref[0];
+ ref1 = ref[1];
+ ref2 = ref[2];
+ ref3 = ref[3];
+
+ for (i = 0; i < 32; ++i) {
+ const uint8x16_t vec_src_00 = vld1q_u8(src);
+ const uint8x16_t vec_src_16 = vld1q_u8(src + 16);
+
+ sad_neon_32(vec_src_00, vec_src_16, ref0,
+ &vec_sum_ref0_lo, &vec_sum_ref0_hi);
+ sad_neon_32(vec_src_00, vec_src_16, ref1,
+ &vec_sum_ref1_lo, &vec_sum_ref1_hi);
+ sad_neon_32(vec_src_00, vec_src_16, ref2,
+ &vec_sum_ref2_lo, &vec_sum_ref2_hi);
+ sad_neon_32(vec_src_00, vec_src_16, ref3,
+ &vec_sum_ref3_lo, &vec_sum_ref3_hi);
+
+ src += src_stride;
+ ref0 += ref_stride;
+ ref1 += ref_stride;
+ ref2 += ref_stride;
+ ref3 += ref_stride;
+ }
+
+ res[0] = horizontal_long_add_16x8(vec_sum_ref0_lo, vec_sum_ref0_hi);
+ res[1] = horizontal_long_add_16x8(vec_sum_ref1_lo, vec_sum_ref1_hi);
+ res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi);
+ res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
+}
+
+void vp9_sad16x16x4d_neon(const uint8_t *src, int src_stride,
+ const uint8_t* const ref[4], int ref_stride,
+ unsigned int *res) {
+ int i;
+ uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref0_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_hi = vdupq_n_u16(0);
+ const uint8_t *ref0, *ref1, *ref2, *ref3;
+ ref0 = ref[0];
+ ref1 = ref[1];
+ ref2 = ref[2];
+ ref3 = ref[3];
+
+ for (i = 0; i < 16; ++i) {
+ const uint8x16_t vec_src = vld1q_u8(src);
+ const uint8x16_t vec_ref0 = vld1q_u8(ref0);
+ const uint8x16_t vec_ref1 = vld1q_u8(ref1);
+ const uint8x16_t vec_ref2 = vld1q_u8(ref2);
+ const uint8x16_t vec_ref3 = vld1q_u8(ref3);
+
+ vec_sum_ref0_lo = vabal_u8(vec_sum_ref0_lo, vget_low_u8(vec_src),
+ vget_low_u8(vec_ref0));
+ vec_sum_ref0_hi = vabal_u8(vec_sum_ref0_hi, vget_high_u8(vec_src),
+ vget_high_u8(vec_ref0));
+ vec_sum_ref1_lo = vabal_u8(vec_sum_ref1_lo, vget_low_u8(vec_src),
+ vget_low_u8(vec_ref1));
+ vec_sum_ref1_hi = vabal_u8(vec_sum_ref1_hi, vget_high_u8(vec_src),
+ vget_high_u8(vec_ref1));
+ vec_sum_ref2_lo = vabal_u8(vec_sum_ref2_lo, vget_low_u8(vec_src),
+ vget_low_u8(vec_ref2));
+ vec_sum_ref2_hi = vabal_u8(vec_sum_ref2_hi, vget_high_u8(vec_src),
+ vget_high_u8(vec_ref2));
+ vec_sum_ref3_lo = vabal_u8(vec_sum_ref3_lo, vget_low_u8(vec_src),
+ vget_low_u8(vec_ref3));
+ vec_sum_ref3_hi = vabal_u8(vec_sum_ref3_hi, vget_high_u8(vec_src),
+ vget_high_u8(vec_ref3));
+
+ src += src_stride;
+ ref0 += ref_stride;
+ ref1 += ref_stride;
+ ref2 += ref_stride;
+ ref3 += ref_stride;
+ }
+
+ res[0] = horizontal_long_add_16x8(vec_sum_ref0_lo, vec_sum_ref0_hi);
+ res[1] = horizontal_long_add_16x8(vec_sum_ref1_lo, vec_sum_ref1_hi);
+ res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi);
+ res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
+}
#include <arm_neon.h>
#include "./vp9_rtcd.h"
+#include "./vpx_config.h"
#include "vpx_ports/mem.h"
#include "vpx/vpx_integer.h"
enum { kWidth32 = 32 };
enum { kHeight32 = 32 };
enum { kHeight32PlusOne = 33 };
+enum { kWidth64 = 64 };
+enum { kHeight64 = 64 };
+enum { kHeight64PlusOne = 65 };
enum { kPixelStepOne = 1 };
enum { kAlign16 = 16 };
return vget_lane_s32(c, 0);
}
+// w * h must be less than 2048 or local variable v_sum may overflow.
static void variance_neon_w8(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
- int w, int h, unsigned int *sse, int *sum) {
+ int w, int h, uint32_t *sse, int *sum) {
int i, j;
int16x8_t v_sum = vdupq_n_s16(0);
int32x4_t v_sse_lo = vdupq_n_s32(0);
unsigned int *sse) {
int sum;
variance_neon_w8(a, a_stride, b, b_stride, kWidth8, kHeight8, sse, &sum);
- return *sse - (((int64_t)sum * sum) / (kWidth8 * kHeight8));
+ return *sse - (((int64_t)sum * sum) >> 6); // >> 6 = / 8 * 8
}
void vp9_get16x16var_neon(const uint8_t *src_ptr, int source_stride,
unsigned int *sse) {
int sum;
variance_neon_w8(a, a_stride, b, b_stride, kWidth16, kHeight16, sse, &sum);
- return *sse - (((int64_t)sum * sum) / (kWidth16 * kHeight16));
+ return *sse - (((int64_t)sum * sum) >> 8); // >> 8 = / 16 * 16
}
static void var_filter_block2d_bil_w8(const uint8_t *src_ptr,
unsigned int *sse) {
int sum;
variance_neon_w8(a, a_stride, b, b_stride, kWidth32, kHeight32, sse, &sum);
- return *sse - (((int64_t)sum * sum) / (kWidth32 * kHeight32));
+ return *sse - (((int64_t)sum * sum) >> 10); // >> 10 = / 32 * 32
+}
+
+unsigned int vp9_variance32x64_neon(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse) {
+ int sum1, sum2;
+ uint32_t sse1, sse2;
+ variance_neon_w8(a, a_stride, b, b_stride, kWidth32, kHeight32, &sse1, &sum1);
+ variance_neon_w8(a + (kHeight32 * a_stride), a_stride,
+ b + (kHeight32 * b_stride), b_stride, kWidth32, kHeight32,
+ &sse2, &sum2);
+ *sse = sse1 + sse2;
+ sum1 += sum2;
+ return *sse - (((int64_t)sum1 * sum1) >> 11); // >> 11 = / 32 * 64
+}
+
+unsigned int vp9_variance64x32_neon(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse) {
+ int sum1, sum2;
+ uint32_t sse1, sse2;
+ variance_neon_w8(a, a_stride, b, b_stride, kWidth64, kHeight16, &sse1, &sum1);
+ variance_neon_w8(a + (kHeight16 * a_stride), a_stride,
+ b + (kHeight16 * b_stride), b_stride, kWidth64, kHeight16,
+ &sse2, &sum2);
+ *sse = sse1 + sse2;
+ sum1 += sum2;
+ return *sse - (((int64_t)sum1 * sum1) >> 11); // >> 11 = / 32 * 64
+}
+
+unsigned int vp9_variance64x64_neon(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse) {
+ int sum1, sum2;
+ uint32_t sse1, sse2;
+
+ variance_neon_w8(a, a_stride, b, b_stride, kWidth64, kHeight16, &sse1, &sum1);
+ variance_neon_w8(a + (kHeight16 * a_stride), a_stride,
+ b + (kHeight16 * b_stride), b_stride, kWidth64, kHeight16,
+ &sse2, &sum2);
+ sse1 += sse2;
+ sum1 += sum2;
+
+ variance_neon_w8(a + (kHeight16 * 2 * a_stride), a_stride,
+ b + (kHeight16 * 2 * b_stride), b_stride,
+ kWidth64, kHeight16, &sse2, &sum2);
+ sse1 += sse2;
+ sum1 += sum2;
+
+ variance_neon_w8(a + (kHeight16 * 3 * a_stride), a_stride,
+ b + (kHeight16 * 3 * b_stride), b_stride,
+ kWidth64, kHeight16, &sse2, &sum2);
+ *sse = sse1 + sse2;
+ sum1 += sum2;
+ return *sse - (((int64_t)sum1 * sum1) >> 12); // >> 12 = / 64 * 64
}
unsigned int vp9_sub_pixel_variance32x32_neon(const uint8_t *src,
kWidth32, BILINEAR_FILTERS_2TAP(yoffset));
return vp9_variance32x32_neon(temp2, kWidth32, dst, dst_stride, sse);
}
+
+unsigned int vp9_sub_pixel_variance64x64_neon(const uint8_t *src,
+ int src_stride,
+ int xoffset,
+ int yoffset,
+ const uint8_t *dst,
+ int dst_stride,
+ unsigned int *sse) {
+ DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, temp2, kHeight64 * kWidth64);
+ DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, fdata3, kHeight64PlusOne * kWidth64);
+
+ var_filter_block2d_bil_w16(src, fdata3, src_stride, kPixelStepOne,
+ kHeight64PlusOne, kWidth64,
+ BILINEAR_FILTERS_2TAP(xoffset));
+ var_filter_block2d_bil_w16(fdata3, temp2, kWidth64, kWidth64, kHeight64,
+ kWidth64, BILINEAR_FILTERS_2TAP(yoffset));
+ return vp9_variance64x64_neon(temp2, kWidth64, dst, dst_stride, sse);
+}
int percent_refresh;
// Maximum q-delta as percentage of base q.
int max_qdelta_perc;
- // Block size below which we don't apply cyclic refresh.
- BLOCK_SIZE min_block_size;
// Superblock starting index for cycling through the frame.
int sb_index;
// Controls how long block will need to wait to be refreshed again, in
int rdmult;
// Cyclic refresh map.
signed char *map;
- // Thresholds applied to projected rate/distortion of the superblock.
+ // Thresholds applied to the projected rate/distortion of the coding block,
+ // when deciding whether block should be refreshed.
int64_t thresh_rate_sb;
int64_t thresh_dist_sb;
+ // Threshold applied to the motion vector (in units of 1/8 pel) of the
+ // coding block, when deciding whether block should be refreshed.
+ int16_t motion_thresh;
// Rate target ratio to set q delta.
double rate_ratio_qdelta;
};
// mode, and rate/distortion.
static int candidate_refresh_aq(const CYCLIC_REFRESH *cr,
const MB_MODE_INFO *mbmi,
- BLOCK_SIZE bsize, int use_rd,
- int64_t rate_sb) {
- if (use_rd) {
- MV mv = mbmi->mv[0].as_mv;
- // If projected rate is below the thresh_rate (well below target,
- // so undershoot expected), accept it for lower-qp coding.
- if (rate_sb < cr->thresh_rate_sb)
- return 1;
- // Otherwise, reject the block for lower-qp coding if any of the following:
- // 1) mode uses large mv
- // 2) mode is an intra-mode (we may want to allow some of this under
- // another thresh_dist)
- else if (mv.row > 32 || mv.row < -32 ||
- mv.col > 32 || mv.col < -32 || !is_inter_block(mbmi))
- return 0;
- else
- return 1;
- } else {
- // Rate/distortion not used for update.
- if (bsize < cr->min_block_size ||
- mbmi->mv[0].as_int != 0 ||
- !is_inter_block(mbmi))
- return 0;
- else
- return 1;
- }
+ int64_t rate,
+ int64_t dist) {
+ MV mv = mbmi->mv[0].as_mv;
+ // If projected rate is below the thresh_rate accept it for lower-qp coding.
+ // Otherwise, reject the block for lower-qp coding if projected distortion
+ // is above the threshold, and any of the following is true:
+ // 1) mode uses large mv
+ // 2) mode is an intra-mode
+ if (rate < cr->thresh_rate_sb)
+ return 1;
+ else if (dist > cr->thresh_dist_sb &&
+ (mv.row > cr->motion_thresh || mv.row < -cr->motion_thresh ||
+ mv.col > cr->motion_thresh || mv.col < -cr->motion_thresh ||
+ !is_inter_block(mbmi)))
+ return 0;
+ else
+ return 1;
}
// Compute delta-q for the segment.
void vp9_cyclic_refresh_update_segment(VP9_COMP *const cpi,
MB_MODE_INFO *const mbmi,
int mi_row, int mi_col,
- BLOCK_SIZE bsize, int use_rd,
- int64_t rate_sb) {
+ BLOCK_SIZE bsize,
+ int64_t rate,
+ int64_t dist) {
const VP9_COMMON *const cm = &cpi->common;
CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
const int bw = num_8x8_blocks_wide_lookup[bsize];
const int xmis = MIN(cm->mi_cols - mi_col, bw);
const int ymis = MIN(cm->mi_rows - mi_row, bh);
const int block_index = mi_row * cm->mi_cols + mi_col;
- const int refresh_this_block = candidate_refresh_aq(cr, mbmi, bsize, use_rd,
- rate_sb);
+ const int refresh_this_block = candidate_refresh_aq(cr, mbmi, rate, dist);
// Default is to not update the refresh map.
int new_map_value = cr->map[block_index];
int x = 0; int y = 0;
const double q = vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth);
vp9_clear_system_state();
cr->max_qdelta_perc = 50;
- cr->min_block_size = BLOCK_8X8;
cr->time_for_refresh = 0;
- // Set rate threshold to some fraction of target (and scaled by 256).
- cr->thresh_rate_sb = (rc->sb64_target_rate * 256) >> 2;
+ // Set rate threshold to some fraction (set to 1 for now) of the target
+ // rate (target is given by sb64_target_rate and scaled by 256).
+ cr->thresh_rate_sb = (rc->sb64_target_rate << 8);
// Distortion threshold, quadratic in Q, scale factor to be adjusted.
- cr->thresh_dist_sb = 8 * (int)(q * q);
- if (cpi->sf.use_nonrd_pick_mode) {
- // May want to be more conservative with thresholds in non-rd mode for now
- // as rate/distortion are derived from model based on prediction residual.
- cr->thresh_rate_sb = (rc->sb64_target_rate * 256);
- cr->thresh_dist_sb = 16 * (int)(q * q);
- }
-
+ cr->thresh_dist_sb = (int)(q * q) << 5;
+ cr->motion_thresh = 32;
// Set up segmentation.
// Clear down the segment map.
vp9_enable_segmentation(&cm->seg);
// and segmentation map.
void vp9_cyclic_refresh_update_segment(struct VP9_COMP *const cpi,
MB_MODE_INFO *const mbmi,
- int mi_row, int mi_col,
- BLOCK_SIZE bsize, int use_rd,
- int64_t rate_sb);
+ int mi_row, int mi_col, BLOCK_SIZE bsize,
+ int64_t rate, int64_t dist);
// Update the segmentation map, and related quantities: cyclic refresh map,
// refresh sb_index, and target number of blocks to be refreshed.
#include "vp9/encoder/vp9_tokenize.h"
#include "vp9/encoder/vp9_write_bit_buffer.h"
-static struct vp9_token intra_mode_encodings[INTRA_MODES];
-static struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS];
-static struct vp9_token partition_encodings[PARTITION_TYPES];
-static struct vp9_token inter_mode_encodings[INTER_MODES];
-
-void vp9_entropy_mode_init() {
- vp9_tokens_from_tree(intra_mode_encodings, vp9_intra_mode_tree);
- vp9_tokens_from_tree(switchable_interp_encodings, vp9_switchable_interp_tree);
- vp9_tokens_from_tree(partition_encodings, vp9_partition_tree);
- vp9_tokens_from_tree(inter_mode_encodings, vp9_inter_mode_tree);
-}
+static const struct vp9_token intra_mode_encodings[INTRA_MODES] = {
+ {0, 1}, {6, 3}, {28, 5}, {30, 5}, {58, 6}, {59, 6}, {126, 7}, {127, 7},
+ {62, 6}, {2, 2}};
+static const struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
+ {{0, 1}, {2, 2}, {3, 2}};
+static const struct vp9_token partition_encodings[PARTITION_TYPES] =
+ {{0, 1}, {2, 2}, {6, 3}, {7, 3}};
+static const struct vp9_token inter_mode_encodings[INTER_MODES] =
+ {{2, 2}, {6, 3}, {0, 1}, {7, 3}};
static void write_intra_mode(vp9_writer *w, PREDICTION_MODE mode,
const vp9_prob *probs) {
if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
if (bsize >= BLOCK_8X8) {
write_inter_mode(w, mode, inter_probs);
- ++cpi->td.counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
}
}
const int j = idy * 2 + idx;
const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
write_inter_mode(w, b_mode, inter_probs);
- ++cpi->td.counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
if (b_mode == NEWMV) {
for (ref = 0; ref < 1 + is_compound; ++ref)
vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
case ONE_LOOP_REDUCED: {
int updates = 0;
int noupdates_before_first = 0;
-
- if (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8) {
- vp9_write_bit(bc, 0);
- return;
- }
-
for (i = 0; i < PLANE_TYPES; ++i) {
for (j = 0; j < REF_TYPES; ++j) {
for (k = 0; k < COEF_BANDS; ++k) {
}
return;
}
-
default:
assert(0);
}
const TX_MODE tx_mode = cpi->common.tx_mode;
const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
TX_SIZE tx_size;
- vp9_coeff_stats frame_branch_ct[TX_SIZES][PLANE_TYPES];
- vp9_coeff_probs_model frame_coef_probs[TX_SIZES][PLANE_TYPES];
-
- for (tx_size = TX_4X4; tx_size <= TX_32X32; ++tx_size)
- build_tree_distribution(cpi, tx_size, frame_branch_ct[tx_size],
- frame_coef_probs[tx_size]);
-
- for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
- update_coef_probs_common(w, cpi, tx_size, frame_branch_ct[tx_size],
- frame_coef_probs[tx_size]);
+ for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) {
+ vp9_coeff_stats frame_branch_ct[PLANE_TYPES];
+ vp9_coeff_probs_model frame_coef_probs[PLANE_TYPES];
+ if (cpi->td.counts->tx.tx_totals[tx_size] == 0 ||
+ (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
+ vp9_write_bit(w, 0);
+ } else {
+ build_tree_distribution(cpi, tx_size, frame_branch_ct,
+ frame_coef_probs);
+ update_coef_probs_common(w, cpi, tx_size, frame_branch_ct,
+ frame_coef_probs);
+ }
+ }
}
static void encode_loopfilter(struct loopfilter *lf,
vp9_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1);
}
vp9_wb_write_literal(wb, cm->color_space, 3);
- if (cm->color_space != SRGB) {
+ if (cm->color_space != VPX_CS_SRGB) {
vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
assert(cm->subsampling_x != 1 || cm->subsampling_y != 1);
prob_diff_update(vp9_inter_mode_tree, cm->fc->inter_mode_probs[i],
counts->inter_mode[i], INTER_MODES, &header_bc);
- vp9_zero(counts->inter_mode);
-
if (cm->interp_filter == SWITCHABLE)
update_switchable_interp_probs(cm, &header_bc, counts);
#include "vp9/encoder/vp9_encoder.h"
-void vp9_entropy_mode_init();
-
void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size);
static INLINE int vp9_preserve_existing_gf(VP9_COMP *cpi) {
int16_t *round;
int64_t quant_thred[2];
- // Zbin Over Quant value
- int16_t zbin_extra;
};
/* The [2] dimension is for whether we skip the EOB node (i.e. if previous
vp9_fdct4x4_c(input, output, stride);
} else {
tran_low_t out[4 * 4];
- tran_low_t *outptr = &out[0];
int i, j;
tran_low_t temp_in[4], temp_out[4];
const transform_2d ht = FHT_4[tx_type];
temp_in[0] += 1;
ht.cols(temp_in, temp_out);
for (j = 0; j < 4; ++j)
- outptr[j * 4 + i] = temp_out[j];
+ out[j * 4 + i] = temp_out[j];
}
// Rows
const int16_t *quant_shift_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr,
- int zbin_oq_value, uint16_t *eob_ptr,
+ uint16_t *eob_ptr,
const int16_t *scan, const int16_t *iscan) {
int eob = -1;
// quantization process is completed.
(void)zbin_ptr;
(void)quant_shift_ptr;
- (void)zbin_oq_value;
(void)iscan;
vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
vp9_fdct8x8_c(input, output, stride);
} else {
tran_low_t out[64];
- tran_low_t *outptr = &out[0];
int i, j;
tran_low_t temp_in[8], temp_out[8];
const transform_2d ht = FHT_8[tx_type];
temp_in[j] = input[j * stride + i] * 4;
ht.cols(temp_in, temp_out);
for (j = 0; j < 8; ++j)
- outptr[j * 8 + i] = temp_out[j];
+ out[j * 8 + i] = temp_out[j];
}
// Rows
vp9_fdct16x16_c(input, output, stride);
} else {
tran_low_t out[256];
- tran_low_t *outptr = &out[0];
int i, j;
tran_low_t temp_in[16], temp_out[16];
const transform_2d ht = FHT_16[tx_type];
temp_in[j] = input[j * stride + i] * 4;
ht.cols(temp_in, temp_out);
for (j = 0; j < 16; ++j)
- outptr[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2;
+ out[j * 16 + i] = (temp_out[j] + 1 + (temp_out[j] < 0)) >> 2;
}
// Rows
}
static unsigned int sse_thresh(BLOCK_SIZE bs, int increase_denoising) {
- return (4 << b_width_log2_lookup[bs]) *
- (4 << b_height_log2_lookup[bs]) *
- (increase_denoising ? 60 : 40);
+ return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 60 : 40);
}
static int sse_diff_thresh(BLOCK_SIZE bs, int increase_denoising,
noise_motion_thresh(bs, increase_denoising)) {
return 0;
} else {
- return (4 << b_width_log2_lookup[bs]) *
- (4 << b_height_log2_lookup[bs]) * 20;
+ return (1 << num_pels_log2_lookup[bs]) * 20;
}
}
int total_adj_strong_thresh(BLOCK_SIZE bs, int increase_denoising) {
- return (4 << b_width_log2_lookup[bs]) *
- (4 << b_height_log2_lookup[bs]) * (increase_denoising ? 3 : 2);
+ return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 3 : 2);
}
static int total_adj_weak_thresh(BLOCK_SIZE bs, int increase_denoising) {
- return (4 << b_width_log2_lookup[bs]) *
- (4 << b_height_log2_lookup[bs]) * (increase_denoising ? 3 : 2);
+ return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 3 : 2);
}
// TODO(jackychen): If increase_denoising is enabled in the future,
return framebuf + (stride * mi_row * 8) + (mi_col * 8);
}
-static void copy_block(uint8_t *dest, int dest_stride,
- const uint8_t *src, int src_stride, BLOCK_SIZE bs) {
- int r;
- for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) {
- vpx_memcpy(dest, src, (4 << b_width_log2_lookup[bs]));
- dest += dest_stride;
- src += src_stride;
- }
-}
-
static VP9_DENOISER_DECISION perform_motion_compensation(VP9_DENOISER *denoiser,
MACROBLOCK *mb,
BLOCK_SIZE bs,
MV_REFERENCE_FRAME frame;
MACROBLOCKD *filter_mbd = &mb->e_mbd;
MB_MODE_INFO *mbmi = &filter_mbd->mi[0].src_mi->mbmi;
-
MB_MODE_INFO saved_mbmi;
int i, j;
struct buf_2d saved_dst[MAX_MB_PLANE];
struct buf_2d saved_pre[MAX_MB_PLANE][2]; // 2 pre buffers
- // We will restore these after motion compensation.
- saved_mbmi = *mbmi;
- for (i = 0; i < MAX_MB_PLANE; ++i) {
- for (j = 0; j < 2; ++j) {
- saved_pre[i][j] = filter_mbd->plane[i].pre[j];
- }
- saved_dst[i] = filter_mbd->plane[i].dst;
- }
-
mv_col = ctx->best_sse_mv.as_mv.col;
mv_row = ctx->best_sse_mv.as_mv.row;
-
*motion_magnitude = mv_row * mv_row + mv_col * mv_col;
-
frame = ctx->best_reference_frame;
+ saved_mbmi = *mbmi;
+
// If the best reference frame uses inter-prediction and there is enough of a
// difference in sum-squared-error, use it.
if (frame != INTRA_FRAME &&
ctx->newmv_sse = ctx->zeromv_sse;
}
+ if (ctx->newmv_sse > sse_thresh(bs, increase_denoising)) {
+ // Restore everything to its original state
+ *mbmi = saved_mbmi;
+ return COPY_BLOCK;
+ }
+ if (mv_row * mv_row + mv_col * mv_col >
+ 8 * noise_motion_thresh(bs, increase_denoising)) {
+ // Restore everything to its original state
+ *mbmi = saved_mbmi;
+ return COPY_BLOCK;
+ }
+
+ // We will restore these after motion compensation.
+ for (i = 0; i < MAX_MB_PLANE; ++i) {
+ for (j = 0; j < 2; ++j) {
+ saved_pre[i][j] = filter_mbd->plane[i].pre[j];
+ }
+ saved_dst[i] = filter_mbd->plane[i].dst;
+ }
+
// Set the pointers in the MACROBLOCKD to point to the buffers in the denoiser
// struct.
for (j = 0; j < 2; ++j) {
mv_row = ctx->best_sse_mv.as_mv.row;
mv_col = ctx->best_sse_mv.as_mv.col;
- if (ctx->newmv_sse > sse_thresh(bs, increase_denoising)) {
- return COPY_BLOCK;
- }
- if (mv_row * mv_row + mv_col * mv_col >
- 8 * noise_motion_thresh(bs, increase_denoising)) {
- return COPY_BLOCK;
- }
return FILTER_BLOCK;
}
}
if (decision == FILTER_BLOCK) {
- copy_block(src.buf, src.stride, avg_start, avg.y_stride, bs);
+ vp9_convolve_copy(avg_start, avg.y_stride, src.buf, src.stride,
+ NULL, 0, NULL, 0,
+ num_4x4_blocks_wide_lookup[bs] << 2,
+ num_4x4_blocks_high_lookup[bs] << 2);
} else { // COPY_BLOCK
- copy_block(avg_start, avg.y_stride, src.buf, src.stride, bs);
+ vp9_convolve_copy(src.buf, src.stride, avg_start, avg.y_stride,
+ NULL, 0, NULL, 0,
+ num_4x4_blocks_wide_lookup[bs] << 2,
+ num_4x4_blocks_high_lookup[bs] << 2);
}
}
int r;
const uint8_t *srcbuf = src.y_buffer;
uint8_t *destbuf = dest.y_buffer;
+
assert(dest.y_width == src.y_width);
assert(dest.y_height == src.y_height);
}
}
+static void swap_frame_buffer(YV12_BUFFER_CONFIG *dest,
+ YV12_BUFFER_CONFIG *src) {
+ uint8_t *tmp_buf = dest->y_buffer;
+ assert(dest->y_width == src->y_width);
+ assert(dest->y_height == src->y_height);
+ dest->y_buffer = src->y_buffer;
+ src->y_buffer = tmp_buf;
+}
+
void vp9_denoiser_update_frame_info(VP9_DENOISER *denoiser,
YV12_BUFFER_CONFIG src,
FRAME_TYPE frame_type,
if (frame_type == KEY_FRAME) {
int i;
// Start at 1 so as not to overwrite the INTRA_FRAME
- for (i = 1; i < MAX_REF_FRAMES; ++i) {
+ for (i = 1; i < MAX_REF_FRAMES; ++i)
copy_frame(denoiser->running_avg_y[i], src);
- }
- } else { /* For non key frames */
- if (refresh_alt_ref_frame) {
- copy_frame(denoiser->running_avg_y[ALTREF_FRAME],
- denoiser->running_avg_y[INTRA_FRAME]);
- }
- if (refresh_golden_frame) {
- copy_frame(denoiser->running_avg_y[GOLDEN_FRAME],
- denoiser->running_avg_y[INTRA_FRAME]);
- }
- if (refresh_last_frame) {
- copy_frame(denoiser->running_avg_y[LAST_FRAME],
- denoiser->running_avg_y[INTRA_FRAME]);
- }
+ return;
+ }
+
+ /* For non key frames */
+ if (refresh_alt_ref_frame) {
+ swap_frame_buffer(&denoiser->running_avg_y[ALTREF_FRAME],
+ &denoiser->running_avg_y[INTRA_FRAME]);
+ }
+ if (refresh_golden_frame) {
+ swap_frame_buffer(&denoiser->running_avg_y[GOLDEN_FRAME],
+ &denoiser->running_avg_y[INTRA_FRAME]);
+ }
+ if (refresh_last_frame) {
+ swap_frame_buffer(&denoiser->running_avg_y[LAST_FRAME],
+ &denoiser->running_avg_y[INTRA_FRAME]);
}
}
ctx->best_zeromv_reference_frame = mbmi->ref_frame[0];
}
- if (mode == NEWMV) {
+ if (mbmi->mv[0].as_int != 0 && sse < ctx->newmv_sse) {
ctx->newmv_sse = sse;
ctx->best_sse_inter_mode = mode;
ctx->best_sse_mv = mbmi->mv[0];
make_grayscale(&denoiser->running_avg_y[i]);
#endif
denoiser->increase_denoising = 0;
+ denoiser->frame_buffer_initialized = 1;
return 0;
}
void vp9_denoiser_free(VP9_DENOISER *denoiser) {
int i;
+ denoiser->frame_buffer_initialized = 0;
if (denoiser == NULL) {
return;
}
for (i = 0; i < MAX_REF_FRAMES; ++i) {
- if (&denoiser->running_avg_y[i] != NULL) {
- vp9_free_frame_buffer(&denoiser->running_avg_y[i]);
- }
- }
- if (&denoiser->mc_running_avg_y != NULL) {
- vp9_free_frame_buffer(&denoiser->mc_running_avg_y);
+ vp9_free_frame_buffer(&denoiser->running_avg_y[i]);
}
+ vp9_free_frame_buffer(&denoiser->mc_running_avg_y);
}
#ifdef OUTPUT_YUV_DENOISED
uint8_t *u = yuv->u_buffer;
uint8_t *v = yuv->v_buffer;
- // The '/2's are there because we have a 440 buffer, but we want to output
- // 420.
- for (r = 0; r < yuv->uv_height / 2; ++r) {
- for (c = 0; c < yuv->uv_width / 2; ++c) {
+ for (r = 0; r < yuv->uv_height; ++r) {
+ for (c = 0; c < yuv->uv_width; ++c) {
u[c] = UINT8_MAX / 2;
v[c] = UINT8_MAX / 2;
}
- u += yuv->uv_stride + yuv->uv_width / 2;
- v += yuv->uv_stride + yuv->uv_width / 2;
+ u += yuv->uv_stride;
+ v += yuv->uv_stride;
}
}
#endif
YV12_BUFFER_CONFIG running_avg_y[MAX_REF_FRAMES];
YV12_BUFFER_CONFIG mc_running_avg_y;
int increase_denoising;
+ int frame_buffer_initialized;
} VP9_DENOISER;
void vp9_denoiser_update_frame_info(VP9_DENOISER *denoiser,
int mi_row, int mi_col, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx);
-// Motion vector component magnitude threshold for defining fast motion.
-#define FAST_MOTION_MV_THRESH 24
-
// This is used as a reference when computing the source variance for the
// purposes of activity masking.
// Eventually this should be replaced by custom no-reference routines,
void *data,
BLOCK_SIZE bsize,
int mi_row,
- int mi_col) {
+ int mi_col,
+ int64_t threshold,
+ BLOCK_SIZE bsize_min) {
VP9_COMMON * const cm = &cpi->common;
variance_node vt;
const int block_width = num_8x8_blocks_wide_lookup[bsize];
const int block_height = num_8x8_blocks_high_lookup[bsize];
- // TODO(marpan): Adjust/tune these thresholds.
- const int threshold_multiplier = cm->frame_type == KEY_FRAME ? 80 : 4;
- int64_t threshold =
- (int64_t)(threshold_multiplier *
- vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth));
- int64_t threshold_bsize_ref = threshold << 6;
- int64_t threshold_low = threshold;
- BLOCK_SIZE bsize_ref = BLOCK_16X16;
assert(block_height == block_width);
tree_to_node(data, bsize, &vt);
- if (cm->frame_type == KEY_FRAME) {
- bsize_ref = BLOCK_8X8;
- // Choose lower thresholds for key frame variance to favor split, but keep
- // threshold for splitting to 4x4 block still fairly high for now.
- threshold_bsize_ref = threshold << 2;
- threshold_low = threshold >> 2;
- }
-
- // For bsize=bsize_ref (16x16/8x8 for 8x8/4x4 downsampling), select if
+ // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
// variance is below threshold, otherwise split will be selected.
// No check for vert/horiz split as too few samples for variance.
- if (bsize == bsize_ref) {
+ if (bsize == bsize_min) {
get_variance(&vt.part_variances->none);
if (mi_col + block_width / 2 < cm->mi_cols &&
mi_row + block_height / 2 < cm->mi_rows &&
- vt.part_variances->none.variance < threshold_bsize_ref) {
+ vt.part_variances->none.variance < threshold) {
set_block_size(cpi, xd, mi_row, mi_col, bsize);
return 1;
}
return 0;
- } else if (bsize > bsize_ref) {
+ } else if (bsize > bsize_min) {
get_variance(&vt.part_variances->none);
- // For key frame, for bsize above 32X32, or very high variance, take split.
+ // For key frame or low_res: for bsize above 32X32 or very high variance,
+ // take split.
if (cm->frame_type == KEY_FRAME &&
(bsize > BLOCK_32X32 ||
- vt.part_variances->none.variance > (threshold << 2))) {
+ vt.part_variances->none.variance > (threshold << 4))) {
return 0;
}
// If variance is low, take the bsize (no split).
if (mi_col + block_width / 2 < cm->mi_cols &&
mi_row + block_height / 2 < cm->mi_rows &&
- vt.part_variances->none.variance < threshold_low) {
+ vt.part_variances->none.variance < threshold) {
set_block_size(cpi, xd, mi_row, mi_col, bsize);
return 1;
}
if (mi_row + block_height / 2 < cm->mi_rows) {
get_variance(&vt.part_variances->vert[0]);
get_variance(&vt.part_variances->vert[1]);
- if (vt.part_variances->vert[0].variance < threshold_low &&
- vt.part_variances->vert[1].variance < threshold_low) {
+ if (vt.part_variances->vert[0].variance < threshold &&
+ vt.part_variances->vert[1].variance < threshold) {
BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
set_block_size(cpi, xd, mi_row, mi_col, subsize);
set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize);
if (mi_col + block_width / 2 < cm->mi_cols) {
get_variance(&vt.part_variances->horz[0]);
get_variance(&vt.part_variances->horz[1]);
- if (vt.part_variances->horz[0].variance < threshold_low &&
- vt.part_variances->horz[1].variance < threshold_low) {
+ if (vt.part_variances->horz[0].variance < threshold &&
+ vt.part_variances->horz[1].variance < threshold) {
BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
set_block_size(cpi, xd, mi_row, mi_col, subsize);
set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize);
}
// This function chooses partitioning based on the variance between source and
-// reconstructed last, where variance is computed for downsampled inputs.
-// Currently 8x8 downsampling is used for delta frames, 4x4 for key frames.
+// reconstructed last, where variance is computed for downs-sampled inputs.
static void choose_partitioning(VP9_COMP *cpi,
const TileInfo *const tile,
MACROBLOCK *x,
int i, j, k, m;
v64x64 vt;
+ v16x16 vt2[16];
uint8_t *s;
const uint8_t *d;
int sp;
int pixels_wide = 64, pixels_high = 64;
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf;
+ // Always use 4x4 partition for key frame.
+ int use_4x4_partition = (cm->frame_type == KEY_FRAME);
+ int variance4x4downsample[16];
+ int low_res = (cm->width <= 352 && cm->height <= 288) ? 1 : 0;
+ const int threshold_multiplier = cm->frame_type == KEY_FRAME ? 80 : 4;
+ int64_t threshold_base;
+ int64_t threshold;
+ int64_t threshold_bsize_min;
+ int64_t threshold_bsize_max;
vp9_clear_system_state();
+ threshold_base = (int64_t)(threshold_multiplier *
+ vp9_convert_qindex_to_q(cm->base_qindex, cm->bit_depth));
+ threshold = threshold_base;
+ threshold_bsize_min = threshold_base << 6;
+ threshold_bsize_max = threshold_base;
+
+ // Modify thresholds for key frame and for low-resolutions (set lower
+ // thresholds to favor split).
+ if (cm->frame_type == KEY_FRAME) {
+ threshold = threshold_base >> 2;
+ threshold_bsize_min = threshold_base << 2;
+ } else if (low_res) {
+ threshold_bsize_min = threshold_base << 3;
+ threshold_bsize_max = threshold_base >> 2;
+ }
+
set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
if (xd->mb_to_right_edge < 0)
#endif // CONFIG_VP9_HIGHBITDEPTH
}
- // Fill in the entire tree of 8x8 variances for splits.
+ // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
+ // for splits.
for (i = 0; i < 4; i++) {
const int x32_idx = ((i & 1) << 5);
const int y32_idx = ((i >> 1) << 5);
+ const int i2 = i << 2;
for (j = 0; j < 4; j++) {
const int x16_idx = x32_idx + ((j & 1) << 4);
const int y16_idx = y32_idx + ((j >> 1) << 4);
v16x16 *vst = &vt.split[i].split[j];
- for (k = 0; k < 4; k++) {
- int x8_idx = x16_idx + ((k & 1) << 3);
- int y8_idx = y16_idx + ((k >> 1) << 3);
- if (cm->frame_type != KEY_FRAME) {
- unsigned int sse = 0;
- int sum = 0;
- if (x8_idx < pixels_wide && y8_idx < pixels_high) {
- int s_avg, d_avg;
+ variance4x4downsample[i2 + j] = 0;
+ if (cm->frame_type != KEY_FRAME) {
+ for (k = 0; k < 4; k++) {
+ int x8_idx = x16_idx + ((k & 1) << 3);
+ int y8_idx = y16_idx + ((k >> 1) << 3);
+ unsigned int sse = 0;
+ int sum = 0;
+ if (x8_idx < pixels_wide && y8_idx < pixels_high) {
+ int s_avg, d_avg;
#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
- d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
- } else {
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
+ d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
+ } else {
+ s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
+ d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
+ }
+#else
s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
- }
-#else
- s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
- d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
#endif
- sum = s_avg - d_avg;
- sse = sum * sum;
- }
- // If variance is based on 8x8 downsampling, we stop here and have
- // one sample for 8x8 block (so use 1 for count in fill_variance),
- // which of course means variance = 0 for 8x8 block.
- fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
- } else {
- // For key frame, go down to 4x4.
- v8x8 *vst2 = &vst->split[k];
+ sum = s_avg - d_avg;
+ sse = sum * sum;
+ }
+ // If variance is based on 8x8 downsampling, we stop here and have
+ // one sample for 8x8 block (so use 1 for count in fill_variance),
+ // which of course means variance = 0 for 8x8 block.
+ fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
+ }
+ fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
+ // For low-resolution, compute the variance based on 8x8 down-sampling,
+ // and if it is large (above the threshold) we go down for 4x4.
+ // For key frame we always go down to 4x4.
+ if (low_res)
+ get_variance(&vt.split[i].split[j].part_variances.none);
+ }
+ if (cm->frame_type == KEY_FRAME || (low_res &&
+ vt.split[i].split[j].part_variances.none.variance >
+ (threshold << 1))) {
+ // Go down to 4x4 down-sampling for variance.
+ variance4x4downsample[i2 + j] = 1;
+ for (k = 0; k < 4; k++) {
+ int x8_idx = x16_idx + ((k & 1) << 3);
+ int y8_idx = y16_idx + ((k >> 1) << 3);
+ v8x8 *vst2 = (cm->frame_type == KEY_FRAME) ? &vst->split[k] :
+ &vt2[i2 + j].split[k];
for (m = 0; m < 4; m++) {
int x4_idx = x8_idx + ((m & 1) << 2);
int y4_idx = y8_idx + ((m >> 1) << 2);
unsigned int sse = 0;
int sum = 0;
if (x4_idx < pixels_wide && y4_idx < pixels_high) {
+ int d_avg = 128;
#if CONFIG_VP9_HIGHBITDEPTH
int s_avg;
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
s_avg = vp9_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
+ if (cm->frame_type != KEY_FRAME)
+ d_avg = vp9_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
} else {
s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
+ if (cm->frame_type != KEY_FRAME)
+ d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
}
#else
int s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
+ if (cm->frame_type != KEY_FRAME)
+ d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
#endif
- // For key frame, reference is set to 128.
- sum = s_avg - 128;
+ sum = s_avg - d_avg;
sse = sum * sum;
}
- // If variance is based on 4x4 downsampling, we stop here and have
+ // If variance is based on 4x4 down-sampling, we stop here and have
// one sample for 4x4 block (so use 1 for count in fill_variance),
// which of course means variance = 0 for 4x4 block.
- fill_variance(sse, sum, 0, &vst2->split[m].part_variances.none);
+ fill_variance(sse, sum, 0, &vst2->split[m].part_variances.none);
}
}
}
}
}
+
// Fill the rest of the variance tree by summing split partition values.
for (i = 0; i < 4; i++) {
+ const int i2 = i << 2;
for (j = 0; j < 4; j++) {
- if (cm->frame_type == KEY_FRAME) {
+ if (variance4x4downsample[i2 + j] == 1) {
+ v16x16 *vtemp = (cm->frame_type != KEY_FRAME) ? &vt2[i2 + j] :
+ &vt.split[i].split[j];
for (m = 0; m < 4; m++) {
- fill_variance_tree(&vt.split[i].split[j].split[m], BLOCK_8X8);
+ fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
}
+ fill_variance_tree(vtemp, BLOCK_16X16);
}
- fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
}
fill_variance_tree(&vt.split[i], BLOCK_32X32);
}
fill_variance_tree(&vt, BLOCK_64X64);
+
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold.
if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
- !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col)) {
+ !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col,
+ threshold_bsize_max, BLOCK_16X16)) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
+ const int i2 = i << 2;
if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32,
- (mi_row + y32_idx), (mi_col + x32_idx))) {
+ (mi_row + y32_idx), (mi_col + x32_idx),
+ threshold, BLOCK_16X16)) {
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
const int y16_idx = ((j >> 1) << 1);
- // Note: If 8x8 downsampling is used for variance calculation we
- // cannot really select block size 8x8 (or even 8x16/16x8), since we
- // don't have sufficient samples for variance. So on delta frames,
- // 8x8 partition is only set if variance of the 16x16 block is very
- // high. For key frames, 4x4 downsampling is used, so we can better
- // select 8x16/16x8 and 8x8. 4x4 partition can potentially be set
- // used here too, but for now 4x4 is not allowed.
- if (!set_vt_partitioning(cpi, xd, &vt.split[i].split[j],
- BLOCK_16X16,
+ // TODO(marpan): Allow 4x4 partitions for inter-frames.
+ // use_4x4_partition = (variance4x4downsample[i2 + j] == 1);
+ // If 4x4 partition is not used, then 8x8 partition will be selected
+ // if variance of 16x16 block is very high, so use larger threshold
+ // for 16x16 (threshold_bsize_min) in that case.
+ uint64_t threshold_16x16 = (use_4x4_partition) ? threshold :
+ threshold_bsize_min;
+ BLOCK_SIZE bsize_min = (use_4x4_partition) ? BLOCK_8X8 : BLOCK_16X16;
+ // For inter frames: if variance4x4downsample[] == 1 for this 16x16
+ // block, then the variance is based on 4x4 down-sampling, so use vt2
+ // in set_vt_partioning(), otherwise use vt.
+ v16x16 *vtemp = (cm->frame_type != KEY_FRAME &&
+ variance4x4downsample[i2 + j] == 1) ?
+ &vt2[i2 + j] : &vt.split[i].split[j];
+ if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16,
mi_row + y32_idx + y16_idx,
- mi_col + x32_idx + x16_idx)) {
+ mi_col + x32_idx + x16_idx,
+ threshold_16x16, bsize_min)) {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
- if (cm->frame_type == KEY_FRAME) {
- if (!set_vt_partitioning(cpi, xd,
- &vt.split[i].split[j].split[k],
+ if (use_4x4_partition) {
+ if (!set_vt_partitioning(cpi, xd, &vtemp->split[k],
BLOCK_8X8,
mi_row + y32_idx + y16_idx + y8_idx,
- mi_col + x32_idx + x16_idx + x8_idx)) {
- set_block_size(cpi, xd,
- (mi_row + y32_idx + y16_idx + y8_idx),
- (mi_col + x32_idx + x16_idx + x8_idx),
- BLOCK_4X4);
+ mi_col + x32_idx + x16_idx + x8_idx,
+ threshold_bsize_min, BLOCK_8X8)) {
+ set_block_size(cpi, xd,
+ (mi_row + y32_idx + y16_idx + y8_idx),
+ (mi_col + x32_idx + x16_idx + x8_idx),
+ BLOCK_4X4);
}
} else {
set_block_size(cpi, xd,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx),
BLOCK_8X8);
- }
+ }
}
}
}
// Else for cyclic refresh mode update the segment map, set the segment id
// and then update the quantizer.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
- vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi,
- mi_row, mi_col, bsize, 1, ctx->rate);
+ vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0].src_mi->mbmi, mi_row,
+ mi_col, bsize, ctx->rate, ctx->dist);
}
}
const MACROBLOCKD *const xd = &x->e_mbd;
const MODE_INFO *const mi = xd->mi[0].src_mi;
const MB_MODE_INFO *const mbmi = &mi->mbmi;
+ const BLOCK_SIZE bsize = mbmi->sb_type;
if (!frame_is_intra_only(cm)) {
+ FRAME_COUNTS *const counts = td->counts;
+ const int inter_block = is_inter_block(mbmi);
const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
SEG_LVL_REF_FRAME);
if (!seg_ref_active) {
- FRAME_COUNTS *const counts = td->counts;
- const int inter_block = is_inter_block(mbmi);
-
counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++;
-
// If the segment reference feature is enabled we have only a single
// reference frame allowed for the segment so exclude it from
// the reference frame counts used to work out probabilities.
if (inter_block) {
const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
-
if (cm->reference_mode == REFERENCE_MODE_SELECT)
counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
[has_second_ref(mbmi)]++;
}
}
}
+ if (inter_block &&
+ !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+ const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
+ if (bsize >= BLOCK_8X8) {
+ const PREDICTION_MODE mode = mbmi->mode;
+ ++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
+ } else {
+ const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
+ const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
+ int idx, idy;
+ for (idy = 0; idy < 2; idy += num_4x4_h) {
+ for (idx = 0; idx < 2; idx += num_4x4_w) {
+ const int j = idy * 2 + idx;
+ const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
+ ++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
+ }
+ }
+ }
+ }
}
}
: cm->last_frame_seg_map;
mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
} else {
- // Setting segmentation map for cyclic_refresh
- vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize, 1,
- ctx->rate);
+ // Setting segmentation map for cyclic_refresh.
+ vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize,
+ ctx->rate, ctx->dist);
}
vp9_init_plane_quantizers(cpi, x);
}
const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
++td->counts->switchable_interp[pred_ctx][mbmi->interp_filter];
}
+
+ if (mbmi->sb_type < BLOCK_8X8) {
+ mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
+ mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
+ }
}
if (cm->use_prev_frame_mvs) {
hybrid_intra_mode_search(cpi, x, rd_cost, bsize, ctx);
else if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
set_mode_info_seg_skip(x, cm->tx_mode, rd_cost, bsize);
- else
+ else if (bsize >= BLOCK_8X8)
vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col,
rd_cost, bsize, ctx);
+ else
+ vp9_pick_inter_mode_sub8x8(cpi, x, tile_data, mi_row, mi_col,
+ rd_cost, bsize, ctx);
duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
pc_tree->vertical[0].skip = x->skip;
encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
subsize, &pc_tree->vertical[0]);
- if (mi_col + hbs < cm->mi_cols) {
+ if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
pc_tree->vertical[1].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
dummy_cost, subsize, &pc_tree->vertical[1]);
encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
subsize, &pc_tree->horizontal[0]);
- if (mi_row + hbs < cm->mi_rows) {
+ if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
pc_tree->horizontal[1].pred_pixel_ready = 1;
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
dummy_cost, subsize, &pc_tree->horizontal[1]);
// Set the partition type of the 64X64 block
switch (sf->partition_search_type) {
case VAR_BASED_PARTITION:
- // TODO(jingning) Only key frame coding supports sub8x8 block at this
- // point. To be continued to enable sub8x8 block mode decision for
- // P frames.
+ // TODO(jingning, marpan): The mode decision and encoding process
+ // support both intra and inter sub8x8 block coding for RTC mode.
+ // Tune the thresholds accordingly to use sub8x8 block coding for
+ // coding performance improvement.
choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
}
}
+ vpx_memset(cpi->td.counts->tx.tx_totals, 0,
+ sizeof(cpi->td.counts->tx.tx_totals));
+
if (cpi->sf.frame_parameter_update) {
int i;
count16x16_lp += counts->tx.p32x32[i][TX_16X16];
count32x32 += counts->tx.p32x32[i][TX_32X32];
}
-
if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
count32x32 == 0) {
cm->tx_mode = ALLOW_8X8;
set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
- vp9_update_zbin_extra(x);
-
if (!is_inter_block(mbmi)) {
int plane;
mbmi->skip = 1;
if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
mi_8x8[mis * y + x].src_mi->mbmi.tx_size = tx_size;
}
+ ++td->counts->tx.tx_totals[mbmi->tx_size];
+ ++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])];
}
}
int rate;
int error;
int next;
- signed char token;
+ int16_t token;
short qc;
} vp9_token_state;
int next = eob, sz = 0;
int64_t rdmult = mb->rdmult * plane_rd_mult[type], rddiv = mb->rddiv;
int64_t rd_cost0, rd_cost1;
- int rate0, rate1, error0, error1, t0, t1;
+ int rate0, rate1, error0, error1;
+ int16_t t0, t1;
+ EXTRABIT e0;
int best, band, pt, i, final_eob;
- const TOKENVALUE *dct_value_tokens;
- const int16_t *dct_value_cost;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int16_t *cat6_high_cost = vp9_get_high_cost_table(xd->bd);
+#else
+ const int16_t *cat6_high_cost = vp9_get_high_cost_table(8);
+#endif
assert((!type && !plane) || (type && plane));
assert(eob <= default_eob);
tokens[eob][0].qc = 0;
tokens[eob][1] = tokens[eob][0];
-#if CONFIG_VP9_HIGHBITDEPTH
- if (xd->bd == 12) {
- dct_value_tokens = vp9_dct_value_tokens_high12_ptr;
- dct_value_cost = vp9_dct_value_cost_high12_ptr;
- } else if (xd->bd == 10) {
- dct_value_tokens = vp9_dct_value_tokens_high10_ptr;
- dct_value_cost = vp9_dct_value_cost_high10_ptr;
- } else {
- dct_value_tokens = vp9_dct_value_tokens_ptr;
- dct_value_cost = vp9_dct_value_cost_ptr;
- }
-#else
- dct_value_tokens = vp9_dct_value_tokens_ptr;
- dct_value_cost = vp9_dct_value_cost_ptr;
-#endif
for (i = 0; i < eob; i++)
token_cache[scan[i]] =
- vp9_pt_energy_class[dct_value_tokens[qcoeff[scan[i]]].token];
+ vp9_pt_energy_class[vp9_get_token(qcoeff[scan[i]])];
for (i = eob; i-- > 0;) {
int base_bits, d2, dx;
/* Evaluate the first possibility for this state. */
rate0 = tokens[next][0].rate;
rate1 = tokens[next][1].rate;
- t0 = (dct_value_tokens + x)->token;
+ vp9_get_token_extra(x, &t0, &e0);
/* Consider both possible successor states. */
if (next < default_eob) {
band = band_translate[i + 1];
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
- base_bits = dct_value_cost[x];
+ base_bits = vp9_get_cost(t0, e0, cat6_high_cost);
dx = mul * (dqcoeff[rc] - coeff[rc]);
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
*/
t0 = tokens[next][0].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
+ e0 = 0;
} else {
- t0 = t1 = (dct_value_tokens + x)->token;
+ vp9_get_token_extra(x, &t0, &e0);
+ t1 = t0;
}
if (next < default_eob) {
band = band_translate[i + 1];
UPDATE_RD_COST();
/* And pick the best. */
best = rd_cost1 < rd_cost0;
- base_bits = dct_value_cost[x];
+ base_bits = vp9_get_cost(t0, e0, cat6_high_cost);
if (shortcut) {
#if CONFIG_VP9_HIGHBITDEPTH
vp9_highbd_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin,
p->round_fp, p->quant_fp, p->quant_shift,
qcoeff, dqcoeff, pd->dequant,
- p->zbin_extra, eob, scan_order->scan,
+ eob, scan_order->scan,
scan_order->iscan);
break;
case TX_16X16:
vp9_highbd_fdct16x16(src_diff, coeff, diff_stride);
vp9_highbd_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_8X8:
vp9_highbd_fdct8x8(src_diff, coeff, diff_stride);
vp9_highbd_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vp9_highbd_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
default:
fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
vp9_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob, scan_order->scan,
+ pd->dequant, eob, scan_order->scan,
scan_order->iscan);
break;
case TX_16X16:
vp9_fdct16x16(src_diff, coeff, diff_stride);
vp9_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_8X8:
vp9_fdct8x8_quant(src_diff, diff_stride, coeff, 64,
x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vp9_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
default:
highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
vp9_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
p->round, p->quant, p->quant_shift, qcoeff,
- dqcoeff, pd->dequant, p->zbin_extra, eob,
+ dqcoeff, pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_16X16:
vp9_highbd_fdct16x16(src_diff, coeff, diff_stride);
vp9_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_8X8:
vp9_highbd_fdct8x8(src_diff, coeff, diff_stride);
vp9_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vp9_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
default:
fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
vp9_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob, scan_order->scan,
+ pd->dequant, eob, scan_order->scan,
scan_order->iscan);
break;
case TX_16X16:
vp9_fdct16x16(src_diff, coeff, diff_stride);
vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_8X8:
vp9_fdct8x8(src_diff, coeff, diff_stride);
vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
case TX_4X4:
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
break;
default:
highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
vp9_highbd_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin,
p->round, p->quant, p->quant_shift,
- qcoeff, dqcoeff, pd->dequant,
- p->zbin_extra, eob,
+ qcoeff, dqcoeff, pd->dequant, eob,
scan_order->scan, scan_order->iscan);
}
if (!x->skip_encode && *eob) {
vp9_highbd_fht16x16(src_diff, coeff, diff_stride, tx_type);
vp9_highbd_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
}
if (!x->skip_encode && *eob) {
vp9_highbd_fht8x8(src_diff, coeff, diff_stride, tx_type);
vp9_highbd_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
}
if (!x->skip_encode && *eob) {
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vp9_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob,
+ pd->dequant, eob,
scan_order->scan, scan_order->iscan);
}
fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
vp9_quantize_b_32x32(coeff, 1024, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob, scan_order->scan,
+ pd->dequant, eob, scan_order->scan,
scan_order->iscan);
}
if (!x->skip_encode && *eob)
vp9_fht16x16(src_diff, coeff, diff_stride, tx_type);
vp9_quantize_b(coeff, 256, x->skip_block, p->zbin, p->round,
p->quant, p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob, scan_order->scan,
+ pd->dequant, eob, scan_order->scan,
scan_order->iscan);
}
if (!x->skip_encode && *eob)
vp9_fht8x8(src_diff, coeff, diff_stride, tx_type);
vp9_quantize_b(coeff, 64, x->skip_block, p->zbin, p->round, p->quant,
p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob, scan_order->scan,
+ pd->dequant, eob, scan_order->scan,
scan_order->iscan);
}
if (!x->skip_encode && *eob)
x->fwd_txm4x4(src_diff, coeff, diff_stride);
vp9_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
p->quant_shift, qcoeff, dqcoeff,
- pd->dequant, p->zbin_extra, eob, scan_order->scan,
+ pd->dequant, eob, scan_order->scan,
scan_order->iscan);
}
#include "vp9/encoder/vp9_resize.h"
#include "vp9/encoder/vp9_svc_layercontext.h"
-void vp9_coef_tree_initialize();
#define SHARP_FILTER_QTHRESH 0 /* Q threshold for 8-tap sharp filter */
cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
}
-void vp9_initialize_enc() {
- static int init_done = 0;
+void vp9_initialize_enc(void) {
+ static volatile int init_done = 0;
if (!init_done) {
vp9_rtcd();
vp9_init_intra_predictors();
- vp9_coef_tree_initialize();
- vp9_tokenize_initialize();
vp9_init_me_luts();
vp9_rc_init_minq_luts();
vp9_entropy_mv_init();
- vp9_entropy_mode_init();
vp9_temporal_filter_init();
init_done = 1;
}
// Delete sementation map
vpx_free(cpi->segmentation_map);
cpi->segmentation_map = NULL;
- vpx_free(cm->last_frame_seg_map);
- cm->last_frame_seg_map = NULL;
vpx_free(cpi->coding_context.last_frame_seg_map_copy);
cpi->coding_context.last_frame_seg_map_copy = NULL;
VP9_COMMON *cm = &cpi->common;
const VP9EncoderConfig *oxcf = &cpi->oxcf;
- cpi->lookahead = vp9_lookahead_init(oxcf->width, oxcf->height,
- cm->subsampling_x, cm->subsampling_y,
+ if (!cpi->lookahead)
+ cpi->lookahead = vp9_lookahead_init(oxcf->width, oxcf->height,
+ cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth = oxcf->use_highbitdepth;
#endif
- cm->color_space = UNKNOWN;
+ cm->color_space = oxcf->color_space;
cm->width = oxcf->width;
cm->height = oxcf->height;
if (cm->profile != oxcf->profile)
cm->profile = oxcf->profile;
cm->bit_depth = oxcf->bit_depth;
+ cm->color_space = oxcf->color_space;
if (cm->profile <= PROFILE_1)
assert(cm->bit_depth == VPX_BITS_8);
cm->display_width = cpi->oxcf.width;
cm->display_height = cpi->oxcf.height;
+ cm->width = cpi->oxcf.width;
+ cm->height = cpi->oxcf.height;
if (cpi->initial_width) {
- // Increasing the size of the frame beyond the first seen frame, or some
- // otherwise signaled maximum size, is not supported.
- // TODO(jkoleszar): exit gracefully.
- assert(cm->width <= cpi->initial_width);
- assert(cm->height <= cpi->initial_height);
+ if (cm->width > cpi->initial_width || cm->height > cpi->initial_height) {
+ vp9_free_context_buffers(cm);
+ vp9_alloc_context_buffers(cm, cm->width, cm->height);
+ cpi->initial_width = cpi->initial_height = 0;
+ }
}
update_frame_size(cpi);
#if CONFIG_VP9_HIGHBITDEPTH
highbd_set_var_fns(cpi);
#endif
-
-#if CONFIG_VP9_TEMPORAL_DENOISING
- if (cpi->oxcf.noise_sensitivity > 0) {
- vp9_denoiser_alloc(&(cpi->denoiser), cm->width, cm->height,
- cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VP9_HIGHBITDEPTH
- cm->use_highbitdepth,
-#endif
- VP9_ENC_BORDER_IN_PIXELS);
- }
-#endif
}
#ifndef M_LOG2_E
}
-VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf) {
+VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
+ BufferPool *const pool) {
unsigned int i;
- VP9_COMP *const cpi = vpx_memalign(32, sizeof(VP9_COMP));
- VP9_COMMON *const cm = cpi != NULL ? &cpi->common : NULL;
+ VP9_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP9_COMP));
+ VP9_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
if (!cm)
return NULL;
sizeof(*cm->frame_contexts)));
cpi->use_svc = 0;
+ cpi->common.buffer_pool = pool;
init_config(cpi, oxcf);
vp9_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
}
#if CONFIG_VP9_TEMPORAL_DENOISING
- if (cpi->oxcf.noise_sensitivity > 0) {
- vp9_denoiser_free(&(cpi->denoiser));
- }
+ vp9_denoiser_free(&(cpi->denoiser));
#endif
for (t = 0; t < cpi->num_workers; ++t) {
VP9Worker *const worker = &cpi->workers[t];
- EncWorkerData *const thread_data = (EncWorkerData*)worker->data1;
+ EncWorkerData *const thread_data = &cpi->tile_thr_data[t];
// Deallocate allocated threads.
vp9_get_worker_interface()->end(worker);
vp9_free_pc_tree(thread_data->td);
vpx_free(thread_data->td);
}
-
- vpx_free(worker->data1);
}
+ vpx_free(cpi->tile_thr_data);
vpx_free(cpi->workers);
+ if (cpi->num_workers > 1)
+ vp9_loop_filter_dealloc(&cpi->lf_row_sync);
+
dealloc_compressor_data(cpi);
for (i = 0; i < sizeof(cpi->mbgraph_stats) /
} while (--h);
src = s->u_buffer;
- h = s->uv_height / 2;
+ h = s->uv_height;
do {
- fwrite(src, s->uv_width / 2, 1, f);
- src += s->uv_stride + s->uv_width / 2;
+ fwrite(src, s->uv_width, 1, f);
+ src += s->uv_stride;
} while (--h);
src = s->v_buffer;
- h = s->uv_height / 2;
+ h = s->uv_height;
do {
- fwrite(src, s->uv_width / 2, 1, f);
- src += s->uv_stride + s->uv_width / 2;
+ fwrite(src, s->uv_width, 1, f);
+ src += s->uv_stride;
} while (--h);
}
#endif
void vp9_update_reference_frames(VP9_COMP *cpi) {
VP9_COMMON * const cm = &cpi->common;
+ BufferPool *const pool = cm->buffer_pool;
// At this point the new frame has been encoded.
// If any buffer copy / swapping is signaled it should be done here.
if (cm->frame_type == KEY_FRAME) {
- ref_cnt_fb(cm->frame_bufs,
+ ref_cnt_fb(pool->frame_bufs,
&cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
- ref_cnt_fb(cm->frame_bufs,
+ ref_cnt_fb(pool->frame_bufs,
&cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
} else if (vp9_preserve_existing_gf(cpi)) {
// We have decided to preserve the previously existing golden frame as our
// slot and, if we're updating the GF, the current frame becomes the new GF.
int tmp;
- ref_cnt_fb(cm->frame_bufs,
+ ref_cnt_fb(pool->frame_bufs,
&cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
tmp = cpi->alt_fb_idx;
arf_idx = gf_group->arf_update_idx[gf_group->index];
}
- ref_cnt_fb(cm->frame_bufs,
+ ref_cnt_fb(pool->frame_bufs,
&cm->ref_frame_map[arf_idx], cm->new_fb_idx);
vpx_memcpy(cpi->interp_filter_selected[ALTREF_FRAME],
cpi->interp_filter_selected[0],
}
if (cpi->refresh_golden_frame) {
- ref_cnt_fb(cm->frame_bufs,
+ ref_cnt_fb(pool->frame_bufs,
&cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
if (!cpi->rc.is_src_frame_alt_ref)
vpx_memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
}
if (cpi->refresh_last_frame) {
- ref_cnt_fb(cm->frame_bufs,
+ ref_cnt_fb(pool->frame_bufs,
&cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx);
if (!cpi->rc.is_src_frame_alt_ref)
vpx_memcpy(cpi->interp_filter_selected[LAST_FRAME],
}
if (lf->filter_level > 0) {
- vp9_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
+ if (cpi->num_workers > 1)
+ vp9_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
+ lf->filter_level, 0, 0,
+ cpi->workers, cpi->num_workers,
+ &cpi->lf_row_sync);
+ else
+ vp9_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
}
vp9_extend_frame_inner_borders(cm->frame_to_show);
// Need to convert from VP9_REFFRAME to index into ref_mask (subtract 1).
if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) {
const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)];
- const YV12_BUFFER_CONFIG *const ref = &cm->frame_bufs[idx].buf;
+ BufferPool *const pool = cm->buffer_pool;
+ const YV12_BUFFER_CONFIG *const ref = &pool->frame_bufs[idx].buf;
#if CONFIG_VP9_HIGHBITDEPTH
if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
const int new_fb = get_free_fb(cm);
- cm->cur_frame = &cm->frame_bufs[new_fb];
- vp9_realloc_frame_buffer(&cm->frame_bufs[new_fb].buf,
+ cm->cur_frame = &pool->frame_bufs[new_fb];
+ vp9_realloc_frame_buffer(&pool->frame_bufs[new_fb].buf,
cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
cm->use_highbitdepth,
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL);
- scale_and_extend_frame(ref, &cm->frame_bufs[new_fb].buf,
+ scale_and_extend_frame(ref, &pool->frame_bufs[new_fb].buf,
(int)cm->bit_depth);
#else
if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
const int new_fb = get_free_fb(cm);
- vp9_realloc_frame_buffer(&cm->frame_bufs[new_fb].buf,
+ vp9_realloc_frame_buffer(&pool->frame_bufs[new_fb].buf,
cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
NULL, NULL, NULL);
- scale_and_extend_frame(ref, &cm->frame_bufs[new_fb].buf);
+ scale_and_extend_frame(ref, &pool->frame_bufs[new_fb].buf);
#endif // CONFIG_VP9_HIGHBITDEPTH
cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
- if (cm->frame_bufs[new_fb].mvs == NULL ||
- cm->frame_bufs[new_fb].mi_rows < cm->mi_rows ||
- cm->frame_bufs[new_fb].mi_cols < cm->mi_cols) {
- vpx_free(cm->frame_bufs[new_fb].mvs);
- cm->frame_bufs[new_fb].mvs =
+ if (pool->frame_bufs[new_fb].mvs == NULL ||
+ pool->frame_bufs[new_fb].mi_rows < cm->mi_rows ||
+ pool->frame_bufs[new_fb].mi_cols < cm->mi_cols) {
+ vpx_free(pool->frame_bufs[new_fb].mvs);
+ pool->frame_bufs[new_fb].mvs =
(MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
- sizeof(*cm->frame_bufs[new_fb].mvs));
- cm->frame_bufs[new_fb].mi_rows = cm->mi_rows;
- cm->frame_bufs[new_fb].mi_cols = cm->mi_cols;
+ sizeof(*pool->frame_bufs[new_fb].mvs));
+ pool->frame_bufs[new_fb].mi_rows = cm->mi_rows;
+ pool->frame_bufs[new_fb].mi_cols = cm->mi_cols;
}
} else {
cpi->scaled_ref_idx[ref_frame - 1] = idx;
- ++cm->frame_bufs[idx].ref_count;
+ ++pool->frame_bufs[idx].ref_count;
}
} else {
cpi->scaled_ref_idx[ref_frame - 1] = INVALID_REF_BUFFER_IDX;
int i;
for (i = 0; i < MAX_REF_FRAMES; ++i) {
const int idx = cpi->scaled_ref_idx[i];
- RefCntBuffer *const buf =
- idx != INVALID_REF_BUFFER_IDX ? &cm->frame_bufs[idx] : NULL;
+ RefCntBuffer *const buf = idx != INVALID_REF_BUFFER_IDX ?
+ &cm->buffer_pool->frame_bufs[idx] : NULL;
if (buf != NULL) {
--buf->ref_count;
cpi->scaled_ref_idx[i] = INVALID_REF_BUFFER_IDX;
static void output_frame_level_debug_stats(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
FILE *const f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w");
- int recon_err;
+ int64_t recon_err;
vp9_clear_system_state();
"%7.2lf %7.2lf %7.2lf %7.2lf %7.2lf"
"%6d %6d %5d %5d %5d "
"%10"PRId64" %10.3lf"
- "%10lf %8u %10d %10d %10d\n",
+ "%10lf %8u %10"PRId64" %10d %10d\n",
cpi->common.current_video_frame, cpi->rc.this_frame_target,
cpi->rc.projected_frame_size,
cpi->rc.projected_frame_size / cpi->common.MBs,
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)];
- YV12_BUFFER_CONFIG *const buf = &cm->frame_bufs[idx].buf;
+ YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[idx].buf;
RefBuffer *const ref_buf = &cm->frame_refs[ref_frame - 1];
ref_buf->buf = buf;
ref_buf->idx = idx;
rc->this_key_frame_forced &&
(rc->projected_frame_size < rc->max_frame_bandwidth)) {
int last_q = q;
- int kf_err;
+ int64_t kf_err;
- int high_err_target = cpi->ambient_err;
- int low_err_target = cpi->ambient_err >> 1;
+ int64_t high_err_target = cpi->ambient_err;
+ int64_t low_err_target = cpi->ambient_err >> 1;
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- kf_err = vp9_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm),
- cm->bit_depth);
+ kf_err = vp9_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
} else {
kf_err = vp9_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
}
q_high = q > q_low ? q - 1 : q_low;
// Adjust Q
- q = (q * high_err_target) / kf_err;
+ q = (int)((q * high_err_target) / kf_err);
q = MIN(q, (q_high + q_low) >> 1);
} else if (kf_err < low_err_target &&
rc->projected_frame_size >= frame_under_shoot_limit) {
q_low = q < q_high ? q + 1 : q_high;
// Adjust Q
- q = (q * low_err_target) / kf_err;
+ q = (int)((q * low_err_target) / kf_err);
q = MIN(q, (q_high + q_low + 1) >> 1);
}
if (undershoot_seen || loop_count > 1) {
// Update rate_correction_factor unless
- vp9_rc_update_rate_correction_factors(cpi, 1);
+ vp9_rc_update_rate_correction_factors(cpi);
q = (q_high + q_low + 1) / 2;
} else {
// Update rate_correction_factor unless
- vp9_rc_update_rate_correction_factors(cpi, 0);
+ vp9_rc_update_rate_correction_factors(cpi);
q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
bottom_index, MAX(q_high, top_index));
while (q < q_low && retries < 10) {
- vp9_rc_update_rate_correction_factors(cpi, 0);
+ vp9_rc_update_rate_correction_factors(cpi);
q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
bottom_index, MAX(q_high, top_index));
retries++;
q_high = q > q_low ? q - 1 : q_low;
if (overshoot_seen || loop_count > 1) {
- vp9_rc_update_rate_correction_factors(cpi, 1);
+ vp9_rc_update_rate_correction_factors(cpi);
q = (q_high + q_low) / 2;
} else {
- vp9_rc_update_rate_correction_factors(cpi, 0);
+ vp9_rc_update_rate_correction_factors(cpi);
q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
bottom_index, top_index);
// Special case reset for qlow for constrained quality.
}
while (q > q_high && retries < 10) {
- vp9_rc_update_rate_correction_factors(cpi, 0);
+ vp9_rc_update_rate_correction_factors(cpi);
q = vp9_rc_regulate_q(cpi, rc->this_frame_target,
bottom_index, top_index);
retries++;
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
cpi->ambient_err = vp9_highbd_get_y_sse(cpi->Source,
- get_frame_new_buffer(cm),
- cm->bit_depth);
+ get_frame_new_buffer(cm));
} else {
cpi->ambient_err = vp9_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
}
}
}
+#if CONFIG_VP9_TEMPORAL_DENOISING
+static void setup_denoiser_buffer(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ if (cpi->oxcf.noise_sensitivity > 0 &&
+ !cpi->denoiser.frame_buffer_initialized) {
+ vp9_denoiser_alloc(&(cpi->denoiser), cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+ cm->use_highbitdepth,
+#endif
+ VP9_ENC_BORDER_IN_PIXELS);
+ }
+}
+#endif
int vp9_receive_raw_frame(VP9_COMP *cpi, unsigned int frame_flags,
YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
check_initial_width(cpi, subsampling_x, subsampling_y);
#endif // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_VP9_TEMPORAL_DENOISING
+ setup_denoiser_buffer(cpi);
+#endif
vpx_usec_timer_start(&timer);
- if (vp9_lookahead_push(cpi->lookahead, sd, time_stamp, end_time, frame_flags))
+ if (vp9_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
+#if CONFIG_VP9_HIGHBITDEPTH
+ use_highbitdepth,
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ frame_flags))
res = -1;
vpx_usec_timer_mark(&timer);
cpi->time_receive_data += vpx_usec_timer_elapsed(&timer);
if ((cm->profile == PROFILE_0 || cm->profile == PROFILE_2) &&
(subsampling_x != 1 || subsampling_y != 1)) {
vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM,
- "Non-4:2:0 color space requires profile 1 or 3");
+ "Non-4:2:0 color format requires profile 1 or 3");
res = -1;
}
if ((cm->profile == PROFILE_1 || cm->profile == PROFILE_3) &&
(subsampling_x == 1 && subsampling_y == 1)) {
vpx_internal_error(&cm->error, VPX_CODEC_INVALID_PARAM,
- "4:2:0 color space requires profile 0 or 2");
+ "4:2:0 color format requires profile 0 or 2");
res = -1;
}
int64_t *time_stamp, int64_t *time_end, int flush) {
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
VP9_COMMON *const cm = &cpi->common;
+ BufferPool *const pool = cm->buffer_pool;
RATE_CONTROL *const rc = &cpi->rc;
struct vpx_usec_timer cmptimer;
YV12_BUFFER_CONFIG *force_src_buffer = NULL;
// Find a free buffer for the new frame, releasing the reference previously
// held.
- cm->frame_bufs[cm->new_fb_idx].ref_count--;
+ pool->frame_bufs[cm->new_fb_idx].ref_count--;
cm->new_fb_idx = get_free_fb(cm);
- cm->cur_frame = &cm->frame_bufs[cm->new_fb_idx];
+ cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
if (!cpi->use_svc && cpi->multi_arf_allowed) {
if (cm->frame_type == KEY_FRAME) {
check_initial_width(cpi, 1, 1);
#endif // CONFIG_VP9_HIGHBITDEPTH
+#if CONFIG_VP9_TEMPORAL_DENOISING
+ setup_denoiser_buffer(cpi);
+#endif
+
if (width) {
cm->width = width;
if (cm->width > cpi->initial_width) {
return;
}
-int vp9_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b) {
+int64_t vp9_get_y_sse(const YV12_BUFFER_CONFIG *a,
+ const YV12_BUFFER_CONFIG *b) {
assert(a->y_crop_width == b->y_crop_width);
assert(a->y_crop_height == b->y_crop_height);
- return (int)get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride,
- a->y_crop_width, a->y_crop_height);
+ return get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride,
+ a->y_crop_width, a->y_crop_height);
}
#if CONFIG_VP9_HIGHBITDEPTH
-int vp9_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
- const YV12_BUFFER_CONFIG *b,
- vpx_bit_depth_t bit_depth) {
- unsigned int sse;
- int sum;
+int64_t vp9_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
+ const YV12_BUFFER_CONFIG *b) {
assert(a->y_crop_width == b->y_crop_width);
assert(a->y_crop_height == b->y_crop_height);
assert((a->flags & YV12_FLAG_HIGHBITDEPTH) != 0);
assert((b->flags & YV12_FLAG_HIGHBITDEPTH) != 0);
- switch (bit_depth) {
- case VPX_BITS_8:
- highbd_variance(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride,
- a->y_crop_width, a->y_crop_height, &sse, &sum);
- return (int) sse;
- case VPX_BITS_10:
- highbd_10_variance(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride,
- a->y_crop_width, a->y_crop_height, &sse, &sum);
- return (int) sse;
- case VPX_BITS_12:
- highbd_12_variance(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride,
- a->y_crop_width, a->y_crop_height, &sse, &sum);
- return (int) sse;
- default:
- assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
- return -1;
- }
+
+ return highbd_get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride,
+ a->y_crop_width, a->y_crop_height);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
#include "vp9/common/vp9_ppflags.h"
#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_loopfilter_thread.h"
#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/common/vp9_thread.h"
#include "vp9/encoder/vp9_svc_layercontext.h"
#include "vp9/encoder/vp9_tokenize.h"
#include "vp9/encoder/vp9_variance.h"
+
#if CONFIG_VP9_TEMPORAL_DENOISING
#include "vp9/encoder/vp9_denoiser.h"
#endif
#if CONFIG_VP9_HIGHBITDEPTH
int use_highbitdepth;
#endif
+ vpx_color_space_t color_space;
} VP9EncoderConfig;
static INLINE int is_lossless_requested(const VP9EncoderConfig *cfg) {
PC_TREE *pc_root;
} ThreadData;
+struct EncWorkerData;
+
typedef struct VP9_COMP {
QUANTS quants;
ThreadData td;
unsigned int tok_count[4][1 << 6];
// Ambient reconstruction err target for force key frames
- int ambient_err;
+ int64_t ambient_err;
RD_OPT rd;
// Multi-threading
int num_workers;
VP9Worker *workers;
+ struct EncWorkerData *tile_thr_data;
+ VP9LfSync lf_row_sync;
} VP9_COMP;
-void vp9_initialize_enc();
+void vp9_initialize_enc(void);
-struct VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf);
+struct VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
+ BufferPool *const pool);
void vp9_remove_compressor(VP9_COMP *cpi);
void vp9_change_config(VP9_COMP *cpi, const VP9EncoderConfig *oxcf);
static INLINE YV12_BUFFER_CONFIG *get_ref_frame_buffer(
VP9_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
- VP9_COMMON * const cm = &cpi->common;
- return &cm->frame_bufs[cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)]]
+ VP9_COMMON *const cm = &cpi->common;
+ BufferPool *const pool = cm->buffer_pool;
+ return &pool->frame_bufs[cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)]]
.buf;
}
return get_token_alloc(tile_mb_rows, tile_mb_cols);
}
-int vp9_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
+int64_t vp9_get_y_sse(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b);
#if CONFIG_VP9_HIGHBITDEPTH
-int vp9_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
- const YV12_BUFFER_CONFIG *b,
- vpx_bit_depth_t bit_depth);
+int64_t vp9_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
+ const YV12_BUFFER_CONFIG *b);
#endif // CONFIG_VP9_HIGHBITDEPTH
void vp9_alloc_compressor_data(VP9_COMP *cpi);
CHECK_MEM_ERROR(cm, cpi->workers,
vpx_malloc(num_workers * sizeof(*cpi->workers)));
+ CHECK_MEM_ERROR(cm, cpi->tile_thr_data,
+ vpx_calloc(num_workers, sizeof(*cpi->tile_thr_data)));
+
for (i = 0; i < num_workers; i++) {
VP9Worker *const worker = &cpi->workers[i];
- EncWorkerData *thread_data;
+ EncWorkerData *thread_data = &cpi->tile_thr_data[i];
++cpi->num_workers;
-
winterface->init(worker);
- CHECK_MEM_ERROR(cm, worker->data1,
- (EncWorkerData*)vpx_calloc(1, sizeof(EncWorkerData)));
- thread_data = (EncWorkerData*)worker->data1;
if (i < num_workers - 1) {
thread_data->cpi = cpi;
// Allocate thread data.
CHECK_MEM_ERROR(cm, thread_data->td,
- vpx_calloc(1, sizeof(*thread_data->td)));
+ vpx_memalign(32, sizeof(*thread_data->td)));
+ vp9_zero(*thread_data->td);
+
// Set up pc_tree.
thread_data->td->leaf_tree = NULL;
thread_data->td->pc_tree = NULL;
thread_data->td = &cpi->td;
}
- // data2 is unused.
- worker->data2 = NULL;
-
winterface->sync(worker);
- worker->hook = (VP9WorkerHook)enc_worker_hook;
}
}
for (i = 0; i < num_workers; i++) {
VP9Worker *const worker = &cpi->workers[i];
- EncWorkerData *const thread_data = (EncWorkerData*)worker->data1;
+ EncWorkerData *thread_data;
+
+ worker->hook = (VP9WorkerHook)enc_worker_hook;
+ worker->data1 = &cpi->tile_thr_data[i];
+ worker->data2 = NULL;
+ thread_data = (EncWorkerData*)worker->data1;
// Before encoding a frame, copy the thread data from cpi.
thread_data->td->mb = cpi->td.mb;
#define OUTPUT_FPF 0
#define ARF_STATS_OUTPUT 0
+#define GROUP_ADAPTIVE_MAXQ 0
+
#define BOOST_BREAKOUT 12.5
#define BOOST_FACTOR 12.5
#define ERR_DIVISOR 128.0
#define NEW_MV_MODE_PENALTY 32
#define SVC_FACTOR_PT_LOW 0.45
#define DARK_THRESH 64
+#define DEFAULT_GRP_WEIGHT 1.0
#define DOUBLE_DIVIDE_CHECK(x) ((x) < 0 ? (x) - 0.000001 : (x) + 0.000001)
}
if (cpi->ref_frame_flags & VP9_GOLD_FLAG) {
+ BufferPool *const pool = cm->buffer_pool;
const int ref_idx =
cm->ref_frame_map[get_ref_frame_idx(cpi, GOLDEN_FRAME)];
const int scaled_idx = cpi->scaled_ref_idx[GOLDEN_FRAME - 1];
- gld_yv12 = (scaled_idx != ref_idx) ? &cm->frame_bufs[scaled_idx].buf :
+ gld_yv12 = (scaled_idx != ref_idx) ? &pool->frame_bufs[scaled_idx].buf :
get_ref_frame_buffer(cpi, GOLDEN_FRAME);
} else {
gld_yv12 = NULL;
#define EDIV_SIZE_FACTOR 800
static int get_twopass_worst_quality(const VP9_COMP *cpi,
- const FIRSTPASS_STATS *stats,
- int section_target_bandwidth) {
+ const double section_err,
+ int section_target_bandwidth,
+ double group_weight_factor) {
const RATE_CONTROL *const rc = &cpi->rc;
const VP9EncoderConfig *const oxcf = &cpi->oxcf;
} else {
const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE)
? cpi->initial_mbs : cpi->common.MBs;
- const double section_err = stats->coded_error / stats->count;
const double err_per_mb = section_err / num_mbs;
const double speed_term = 1.0 + 0.04 * oxcf->speed;
const double ediv_size_correction = num_mbs / EDIV_SIZE_FACTOR;
int q;
int is_svc_upper_layer = 0;
+
if (is_two_pass_svc(cpi) && cpi->svc.spatial_layer_id > 0)
is_svc_upper_layer = 1;
+
// Try and pick a max Q that will be high enough to encode the
// content at the given rate.
for (q = rc->best_quality; q < rc->worst_quality; ++q) {
is_svc_upper_layer ? SVC_FACTOR_PT_LOW :
FACTOR_PT_LOW, FACTOR_PT_HIGH, q,
cpi->common.bit_depth);
- const int bits_per_mb = vp9_rc_bits_per_mb(INTER_FRAME, q,
- factor * speed_term,
- cpi->common.bit_depth);
+ const int bits_per_mb =
+ vp9_rc_bits_per_mb(INTER_FRAME, q,
+ factor * speed_term * group_weight_factor,
+ cpi->common.bit_depth);
if (bits_per_mb <= target_norm_bits_per_mb)
break;
}
double boost_score = 0.0;
double old_boost_score = 0.0;
double gf_group_err = 0.0;
+#if GROUP_ADAPTIVE_MAXQ
+ double gf_group_raw_error = 0.0;
+#endif
double gf_first_frame_err = 0.0;
double mod_frame_err = 0.0;
// If this is a key frame or the overlay from a previous arf then
// the error score / cost of this frame has already been accounted for.
- if (cpi->common.frame_type == KEY_FRAME || rc->source_alt_ref_active)
+ if (cpi->common.frame_type == KEY_FRAME || rc->source_alt_ref_active) {
gf_group_err -= gf_first_frame_err;
+#if GROUP_ADAPTIVE_MAXQ
+ gf_group_raw_error -= this_frame->coded_error;
+#endif
+ }
// Motion breakout threshold for loop below depends on image size.
mv_ratio_accumulator_thresh =
// Accumulate error score of frames in this gf group.
mod_frame_err = calculate_modified_err(twopass, oxcf, this_frame);
gf_group_err += mod_frame_err;
+#if GROUP_ADAPTIVE_MAXQ
+ gf_group_raw_error += this_frame->coded_error;
+#endif
if (EOF == input_stats(twopass, &next_frame))
break;
twopass->gf_zeromotion_pct = (int)(zero_motion_accumulator * 1000.0);
+ // Was the group length constrained by the requirement for a new KF?
+ rc->constrained_gf_group = (i >= rc->frames_to_key) ? 1 : 0;
+
// Set the interval until the next gf.
if (cpi->common.frame_type == KEY_FRAME || rc->source_alt_ref_active)
rc->baseline_gf_interval = i - 1;
if (EOF == input_stats(twopass, this_frame))
break;
gf_group_err += calculate_modified_err(twopass, oxcf, this_frame);
+#if GROUP_ADAPTIVE_MAXQ
+ gf_group_raw_error += this_frame->coded_error;
+#endif
}
rc->baseline_gf_interval = new_gf_interval;
}
// Calculate the bits to be allocated to the gf/arf group as a whole
gf_group_bits = calculate_total_gf_group_bits(cpi, gf_group_err);
+#if GROUP_ADAPTIVE_MAXQ
+ // Calculate an estimate of the maxq needed for the group.
+ // We are more agressive about correcting for sections
+ // where there could be significant overshoot than for easier
+ // sections where we do not wish to risk creating an overshoot
+ // of the allocated bit budget.
+ if ((cpi->oxcf.rc_mode != VPX_Q) && (rc->baseline_gf_interval > 1)) {
+ const int vbr_group_bits_per_frame =
+ (int)(gf_group_bits / rc->baseline_gf_interval);
+ const double group_av_err = gf_group_raw_error / rc->baseline_gf_interval;
+ const int tmp_q =
+ get_twopass_worst_quality(cpi, group_av_err, vbr_group_bits_per_frame,
+ twopass->kfgroup_inter_fraction);
+
+ if (tmp_q < twopass->baseline_worst_quality) {
+ twopass->active_worst_quality =
+ (tmp_q + twopass->baseline_worst_quality + 1) / 2;
+ } else {
+ twopass->active_worst_quality = tmp_q;
+ }
+ }
+#endif
+
// Calculate the extra bits to be used for boosted frame(s)
gf_arf_bits = calculate_boost_bits(rc->baseline_gf_interval,
rc->gfu_boost, gf_group_bits);
// Reset to the start of the group.
reset_fpf_position(twopass, start_position);
- kf_group_err = 0;
+ kf_group_err = 0.0;
// Rescan to get the correct error data for the forced kf group.
for (i = 0; i < rc->frames_to_key; ++i) {
kf_bits = calculate_boost_bits((rc->frames_to_key - 1),
rc->kf_boost, twopass->kf_group_bits);
+ // Work out the fraction of the kf group bits reserved for the inter frames
+ // within the group after discounting the bits for the kf itself.
+ if (twopass->kf_group_bits) {
+ twopass->kfgroup_inter_fraction =
+ (double)(twopass->kf_group_bits - kf_bits) /
+ (double)twopass->kf_group_bits;
+ } else {
+ twopass->kfgroup_inter_fraction = 1.0;
+ }
+
twopass->kf_group_bits -= kf_bits;
// Save the bits to spend on the key frame.
GF_GROUP *const gf_group = &twopass->gf_group;
int frames_left;
FIRSTPASS_STATS this_frame;
- FIRSTPASS_STATS this_frame_copy;
int target_rate;
LAYER_CONTEXT *const lc = is_two_pass_svc(cpi) ?
// Special case code for first frame.
const int section_target_bandwidth = (int)(twopass->bits_left /
frames_left);
- const int tmp_q = get_twopass_worst_quality(cpi, &twopass->total_left_stats,
- section_target_bandwidth);
+ const double section_error =
+ twopass->total_left_stats.coded_error / twopass->total_left_stats.count;
+ const int tmp_q =
+ get_twopass_worst_quality(cpi, section_error,
+ section_target_bandwidth, DEFAULT_GRP_WEIGHT);
+
twopass->active_worst_quality = tmp_q;
+ twopass->baseline_worst_quality = tmp_q;
rc->ni_av_qi = tmp_q;
rc->last_q[INTER_FRAME] = tmp_q;
rc->avg_q = vp9_convert_qindex_to_q(tmp_q, cm->bit_depth);
if (EOF == input_stats(twopass, &this_frame))
return;
- // Local copy of the current frame's first pass stats.
- this_frame_copy = this_frame;
-
// Keyframe and section processing.
if (rc->frames_to_key == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY)) {
+ FIRSTPASS_STATS this_frame_copy;
+ this_frame_copy = this_frame;
// Define next KF group and assign bits to it.
- find_next_key_frame(cpi, &this_frame_copy);
+ find_next_key_frame(cpi, &this_frame);
+ this_frame = this_frame_copy;
} else {
cm->frame_type = INTER_FRAME;
}
// Define a new GF/ARF group. (Should always enter here for key frames).
if (rc->frames_till_gf_update_due == 0) {
- define_gf_group(cpi, &this_frame_copy);
+ define_gf_group(cpi, &this_frame);
rc->frames_till_gf_update_due = rc->baseline_gf_interval;
if (lc != NULL)
// Error score of frames still to be coded in kf group
int64_t kf_group_error_left;
+
+ // The fraction for a kf groups total bits allocated to the inter frames
+ double kfgroup_inter_fraction;
+
int sr_update_lag;
int kf_zeromotion_pct;
int last_kfgroup_zeromotion_pct;
int gf_zeromotion_pct;
+ int baseline_worst_quality;
int active_worst_quality;
int extend_minq;
int extend_maxq;
#define USE_PARTIAL_COPY 0
int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
- int64_t ts_start, int64_t ts_end, unsigned int flags) {
+ int64_t ts_start, int64_t ts_end,
+#if CONFIG_VP9_HIGHBITDEPTH
+ int use_highbitdepth,
+#endif
+ unsigned int flags) {
struct lookahead_entry *buf;
#if USE_PARTIAL_COPY
int row, col, active_end;
int mb_rows = (src->y_height + 15) >> 4;
int mb_cols = (src->y_width + 15) >> 4;
#endif
+ int width = src->y_crop_width;
+ int height = src->y_crop_height;
+ int uv_width = src->uv_crop_width;
+ int uv_height = src->uv_crop_height;
+ int subsampling_x = src->subsampling_x;
+ int subsampling_y = src->subsampling_y;
+ int larger_dimensions, new_dimensions;
if (ctx->sz + 1 + MAX_PRE_FRAMES > ctx->max_sz)
return 1;
ctx->sz++;
buf = pop(ctx, &ctx->write_idx);
+ new_dimensions = width != buf->img.y_crop_width ||
+ height != buf->img.y_crop_height ||
+ uv_width != buf->img.uv_crop_width ||
+ uv_height != buf->img.uv_crop_height;
+ larger_dimensions = width > buf->img.y_width ||
+ height > buf->img.y_height ||
+ uv_width > buf->img.uv_width ||
+ uv_height > buf->img.uv_height;
+ assert(!larger_dimensions || new_dimensions);
+
#if USE_PARTIAL_COPY
// TODO(jkoleszar): This is disabled for now, as
// vp9_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
// 1. Lookahead queue has has size of 1.
// 2. Active map is provided.
// 3. This is not a key frame, golden nor altref frame.
- if (ctx->max_sz == 1 && active_map && !flags) {
+ if (!new_dimensions && ctx->max_sz == 1 && active_map && !flags) {
for (row = 0; row < mb_rows; ++row) {
col = 0;
active_map += mb_cols;
}
} else {
+#endif
+ if (larger_dimensions) {
+ YV12_BUFFER_CONFIG new_img;
+ memset(&new_img, 0, sizeof(new_img));
+ if (vp9_alloc_frame_buffer(&new_img,
+ width, height, subsampling_x, subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+ use_highbitdepth,
+#endif
+ VP9_ENC_BORDER_IN_PIXELS,
+ 0))
+ return 1;
+ vp9_free_frame_buffer(&buf->img);
+ buf->img = new_img;
+ } else if (new_dimensions) {
+ buf->img.y_crop_width = src->y_crop_width;
+ buf->img.y_crop_height = src->y_crop_height;
+ buf->img.uv_crop_width = src->uv_crop_width;
+ buf->img.uv_crop_height = src->uv_crop_height;
+ buf->img.subsampling_x = src->subsampling_x;
+ buf->img.subsampling_y = src->subsampling_y;
+ }
+ // Partial copy not implemented yet
vp9_copy_and_extend_frame(src, &buf->img);
+#if USE_PARTIAL_COPY
}
-#else
- // Partial copy not implemented yet
- vp9_copy_and_extend_frame(src, &buf->img);
#endif
buf->ts_start = ts_start;
* \param[in] active_map Map that specifies which macroblock is active
*/
int vp9_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
- int64_t ts_start, int64_t ts_end, unsigned int flags);
+ int64_t ts_start, int64_t ts_end,
+#if CONFIG_VP9_HIGHBITDEPTH
+ int use_highbitdepth,
+#endif
+ unsigned int flags);
/**\brief Get the next source buffer to encode
}
-static int try_filter_frame(const YV12_BUFFER_CONFIG *sd, VP9_COMP *const cpi,
- int filt_level, int partial_frame) {
+static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd,
+ VP9_COMP *const cpi,
+ int filt_level, int partial_frame) {
VP9_COMMON *const cm = &cpi->common;
- int filt_err;
+ int64_t filt_err;
+
+ if (cpi->num_workers > 1)
+ vp9_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
+ filt_level, 1, partial_frame,
+ cpi->workers, cpi->num_workers, &cpi->lf_row_sync);
+ else
+ vp9_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
+ 1, partial_frame);
- vp9_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level, 1,
- partial_frame);
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth) {
- filt_err = vp9_highbd_get_y_sse(sd, cm->frame_to_show, cm->bit_depth);
+ filt_err = vp9_highbd_get_y_sse(sd, cm->frame_to_show);
} else {
filt_err = vp9_get_y_sse(sd, cm->frame_to_show);
}
const int min_filter_level = 0;
const int max_filter_level = get_max_filter_level(cpi);
int filt_direction = 0;
- int best_err, filt_best;
+ int64_t best_err;
+ int filt_best;
// Start the search at the previous frame filter level unless it is now out of
// range.
int filt_mid = clamp(lf->filter_level, min_filter_level, max_filter_level);
int filter_step = filt_mid < 16 ? 4 : filt_mid / 4;
// Sum squared error at each filter level
- int ss_err[MAX_LOOP_FILTER + 1];
+ int64_t ss_err[MAX_LOOP_FILTER + 1];
// Set each entry to -1
vpx_memset(ss_err, 0xFF, sizeof(ss_err));
const int filt_low = MAX(filt_mid - filter_step, min_filter_level);
// Bias against raising loop filter in favor of lowering it.
- int bias = (best_err >> (15 - (filt_mid / 8))) * filter_step;
+ int64_t bias = (best_err >> (15 - (filt_mid / 8))) * filter_step;
if ((cpi->oxcf.pass == 2) && (cpi->twopass.section_intra_rating < 20))
bias = (bias * cpi->twopass.section_intra_rating) / 20;
args->dist += dist;
}
-static const THR_MODES mode_idx[MAX_REF_FRAMES][4] = {
+static const THR_MODES mode_idx[MAX_REF_FRAMES - 1][4] = {
{THR_DC, THR_H_PRED, THR_V_PRED, THR_TM},
{THR_NEARESTMV, THR_NEARMV, THR_ZEROMV, THR_NEWMV},
{THR_NEARESTG, THR_NEARG, THR_ZEROG, THR_NEWG},
- {THR_NEARESTA, THR_NEARA, THR_ZEROA, THR_NEWA},
};
static const PREDICTION_MODE intra_mode_list[] = {
cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth) >> reduction_fac;
const int64_t inter_mode_thresh = RDCOST(x->rdmult, x->rddiv,
intra_cost_penalty, 0);
- const int8_t segment_id = mbmi->segment_id;
- const int *const rd_threshes = cpi->rd.threshes[segment_id][bsize];
+ const int *const rd_threshes = cpi->rd.threshes[mbmi->segment_id][bsize];
const int *const rd_thresh_freq_fact = tile_data->thresh_freq_fact[bsize];
INTERP_FILTER filter_ref;
const int bsl = mi_width_log2_lookup[bsize];
tx_mode_to_biggest_tx_size[cm->tx_mode]);
mbmi->interp_filter = cm->interp_filter == SWITCHABLE ?
EIGHTTAP : cm->interp_filter;
- mbmi->segment_id = segment_id;
+
+#if CONFIG_VP9_TEMPORAL_DENOISING
+ vp9_denoiser_reset_frame_stats(ctx);
+#endif
for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
x->pred_mv_sad[ref_frame] = INT_MAX;
if (cm->use_prev_frame_mvs)
vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0].src_mi, ref_frame,
- candidates, mi_row, mi_col);
+ candidates, mi_row, mi_col, NULL, NULL);
else
const_motion[ref_frame] = mv_refs_rt(cm, xd, tile_info,
xd->mi[0].src_mi,
clamp_mv2(&frame_mv[NEARMV][ref_frame].as_mv, xd);
mbmi->ref_frame[0] = ref_frame;
+ set_ref_ptrs(cm, xd, ref_frame, NONE);
for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
int rate_mv = 0;
// Perform intra prediction search, if the best SAD is above a certain
// threshold.
- if (!x->skip && best_rdc.rdcost > inter_mode_thresh &&
- bsize <= cpi->sf.max_intra_bsize) {
+ if (best_rdc.rdcost == INT64_MAX ||
+ (!x->skip && best_rdc.rdcost > inter_mode_thresh &&
+ bsize <= cpi->sf.max_intra_bsize)) {
struct estimate_block_intra_args args = { cpi, x, DC_PRED, 0, 0 };
const TX_SIZE intra_tx_size =
MIN(max_txsize_lookup[bsize],
tx_mode_to_biggest_tx_size[cpi->common.tx_mode]);
int i;
+ TX_SIZE best_intra_tx_size = TX_SIZES;
if (reuse_inter_pred && best_pred != NULL) {
if (best_pred->data == orig_dst.buf) {
if (this_rdc.rdcost < best_rdc.rdcost) {
best_rdc = this_rdc;
mbmi->mode = this_mode;
- mbmi->tx_size = intra_tx_size;
+ best_intra_tx_size = mbmi->tx_size;
mbmi->ref_frame[0] = INTRA_FRAME;
mbmi->uv_mode = this_mode;
mbmi->mv[0].as_int = INVALID_MV;
if (mbmi->ref_frame[0] != INTRA_FRAME) {
x->skip_txfm[0] = best_mode_skip_txfm;
mbmi->tx_size = best_tx_size;
+ } else {
+ mbmi->tx_size = best_intra_tx_size;
}
}
}
}
- if (is_inter_block(mbmi))
- vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
- cpi->sf.adaptive_rd_thresh, bsize,
- mode_idx[best_ref_frame][INTER_OFFSET(mbmi->mode)]);
- else
- vp9_update_rd_thresh_fact(tile_data->thresh_freq_fact,
- cpi->sf.adaptive_rd_thresh, bsize,
- mode_idx[INTRA_FRAME][mbmi->mode]);
+ if (cpi->sf.adaptive_rd_thresh) {
+ THR_MODES best_mode_idx = is_inter_block(mbmi) ?
+ mode_idx[best_ref_frame][INTER_OFFSET(mbmi->mode)] :
+ mode_idx[INTRA_FRAME][mbmi->mode];
+ PREDICTION_MODE this_mode;
+ for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
+ for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
+ THR_MODES thr_mode_idx = mode_idx[ref_frame][INTER_OFFSET(this_mode)];
+ int *freq_fact = &tile_data->thresh_freq_fact[bsize][thr_mode_idx];
+ if (thr_mode_idx == best_mode_idx)
+ *freq_fact -= (*freq_fact >> 4);
+ else
+ *freq_fact = MIN(*freq_fact + RD_THRESH_INC,
+ cpi->sf.adaptive_rd_thresh * RD_THRESH_MAX_FACT);
+ }
+ }
+ }
*rd_cost = best_rdc;
}
+
+void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
+ TileDataEnc *tile_data,
+ int mi_row, int mi_col, RD_COST *rd_cost,
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
+ VP9_COMMON *const cm = &cpi->common;
+ TileInfo *const tile_info = &tile_data->tile_info;
+ SPEED_FEATURES *const sf = &cpi->sf;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ const struct segmentation *const seg = &cm->seg;
+ MV_REFERENCE_FRAME ref_frame, second_ref_frame = NONE;
+ MV_REFERENCE_FRAME best_ref_frame = NONE;
+ unsigned char segment_id = mbmi->segment_id;
+ struct buf_2d yv12_mb[4][MAX_MB_PLANE];
+ static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
+ VP9_ALT_FLAG };
+ int64_t best_rd = INT64_MAX;
+ b_mode_info bsi[MAX_REF_FRAMES][4];
+ int ref_frame_skip_mask = 0;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ int idx, idy;
+
+ x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
+ ctx->pred_pixel_ready = 0;
+
+ for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
+ int_mv dummy_mv[2];
+ x->pred_mv_sad[ref_frame] = INT_MAX;
+
+ if (cpi->ref_frame_flags & flag_list[ref_frame]) {
+ const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
+ int_mv *const candidates = mbmi->ref_mvs[ref_frame];
+ const struct scale_factors *const sf =
+ &cm->frame_refs[ref_frame - 1].sf;
+ vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
+ sf, sf);
+ vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0].src_mi, ref_frame,
+ candidates, mi_row, mi_col, NULL, NULL);
+
+ vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
+ &dummy_mv[0], &dummy_mv[1]);
+ } else {
+ ref_frame_skip_mask |= (1 << ref_frame);
+ }
+ }
+
+ mbmi->sb_type = bsize;
+ mbmi->tx_size = TX_4X4;
+ mbmi->uv_mode = DC_PRED;
+ mbmi->ref_frame[0] = LAST_FRAME;
+ mbmi->ref_frame[1] = NONE;
+ mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
+ : cm->interp_filter;
+
+ for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
+ int64_t this_rd = 0;
+ int plane;
+
+ if (ref_frame_skip_mask & (1 << ref_frame))
+ continue;
+
+ // TODO(jingning, agrange): Scaling reference frame not supported for
+ // sub8x8 blocks. Is this supported now?
+ if (ref_frame > INTRA_FRAME &&
+ vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
+ continue;
+
+ // If the segment reference frame feature is enabled....
+ // then do nothing if the current ref frame is not allowed..
+ if (vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
+ vp9_get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame)
+ continue;
+
+ mbmi->ref_frame[0] = ref_frame;
+ x->skip = 0;
+ set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
+
+ // Select prediction reference frames.
+ for (plane = 0; plane < MAX_MB_PLANE; plane++)
+ xd->plane[plane].pre[0] = yv12_mb[ref_frame][plane];
+
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+ int_mv b_mv[MB_MODE_COUNT];
+ int64_t b_best_rd = INT64_MAX;
+ const int i = idy * 2 + idx;
+ PREDICTION_MODE this_mode;
+ int b_rate = 0;
+ int64_t b_dist = 0;
+ RD_COST this_rdc;
+ unsigned int var_y, sse_y;
+
+ struct macroblock_plane *p = &x->plane[0];
+ struct macroblockd_plane *pd = &xd->plane[0];
+
+ const struct buf_2d orig_src = p->src;
+ const struct buf_2d orig_dst = pd->dst;
+ struct buf_2d orig_pre[2];
+ vpx_memcpy(orig_pre, xd->plane[0].pre, sizeof(orig_pre));
+
+ // set buffer pointers for sub8x8 motion search.
+ p->src.buf =
+ &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+ pd->dst.buf =
+ &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
+ pd->pre[0].buf =
+ &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8,
+ i, pd->pre[0].stride)];
+
+ b_mv[ZEROMV].as_int = 0;
+ b_mv[NEWMV].as_int = INVALID_MV;
+ vp9_append_sub8x8_mvs_for_idx(cm, xd, tile_info, i, 0, mi_row, mi_col,
+ &b_mv[NEARESTMV],
+ &b_mv[NEARMV]);
+
+ for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
+ xd->mi[0].bmi[i].as_mv[0].as_int = b_mv[this_mode].as_int;
+
+ if (this_mode == NEWMV) {
+ const int step_param = cpi->sf.mv.fullpel_search_step_param;
+ MV mvp_full;
+ MV tmp_mv;
+ int cost_list[5];
+ const int tmp_col_min = x->mv_col_min;
+ const int tmp_col_max = x->mv_col_max;
+ const int tmp_row_min = x->mv_row_min;
+ const int tmp_row_max = x->mv_row_max;
+ int dummy_dist;
+
+ if (i == 0) {
+ mvp_full.row = b_mv[NEARESTMV].as_mv.row >> 3;
+ mvp_full.col = b_mv[NEARESTMV].as_mv.col >> 3;
+ } else {
+ mvp_full.row = xd->mi[0].bmi[0].as_mv[0].as_mv.row >> 3;
+ mvp_full.col = xd->mi[0].bmi[0].as_mv[0].as_mv.col >> 3;
+ }
+
+ vp9_set_mv_search_range(x, &mbmi->ref_mvs[0]->as_mv);
+
+ vp9_full_pixel_search(
+ cpi, x, bsize, &mvp_full, step_param, x->sadperbit4,
+ cond_cost_list(cpi, cost_list),
+ &mbmi->ref_mvs[ref_frame][0].as_mv, &tmp_mv,
+ INT_MAX, 0);
+
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+
+ // calculate the bit cost on motion vector
+ mvp_full.row = tmp_mv.row * 8;
+ mvp_full.col = tmp_mv.col * 8;
+
+ b_rate += vp9_mv_bit_cost(&mvp_full,
+ &mbmi->ref_mvs[ref_frame][0].as_mv,
+ x->nmvjointcost, x->mvcost,
+ MV_COST_WEIGHT);
+
+ b_rate += cpi->inter_mode_cost[mbmi->mode_context[ref_frame]]
+ [INTER_OFFSET(NEWMV)];
+ if (RDCOST(x->rdmult, x->rddiv, b_rate, 0) > b_best_rd)
+ continue;
+
+ cpi->find_fractional_mv_step(x, &tmp_mv,
+ &mbmi->ref_mvs[ref_frame][0].as_mv,
+ cpi->common.allow_high_precision_mv,
+ x->errorperbit,
+ &cpi->fn_ptr[bsize],
+ cpi->sf.mv.subpel_force_stop,
+ cpi->sf.mv.subpel_iters_per_step,
+ cond_cost_list(cpi, cost_list),
+ x->nmvjointcost, x->mvcost,
+ &dummy_dist,
+ &x->pred_sse[ref_frame], NULL, 0, 0);
+
+ xd->mi[0].bmi[i].as_mv[0].as_mv = tmp_mv;
+ }
+
+ vp9_build_inter_predictor(pd->pre[0].buf, pd->pre[0].stride,
+ pd->dst.buf, pd->dst.stride,
+ &xd->mi[0].bmi[i].as_mv[0].as_mv,
+ &xd->block_refs[0]->sf,
+ 4 * num_4x4_blocks_wide,
+ 4 * num_4x4_blocks_high, 0,
+ vp9_get_interp_kernel(mbmi->interp_filter),
+ MV_PRECISION_Q3,
+ mi_col * MI_SIZE + 4 * (i & 0x01),
+ mi_row * MI_SIZE + 4 * (i >> 1));
+ model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
+ &var_y, &sse_y);
+
+ this_rdc.rate += b_rate;
+ this_rdc.dist += b_dist;
+ this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
+ this_rdc.rate, this_rdc.dist);
+ if (this_rdc.rdcost < b_best_rd) {
+ b_best_rd = this_rdc.rdcost;
+ bsi[ref_frame][i].as_mode = this_mode;
+ bsi[ref_frame][i].as_mv[0].as_mv = xd->mi[0].bmi[i].as_mv[0].as_mv;
+ }
+ } // mode search
+
+ // restore source and prediction buffer pointers.
+ p->src = orig_src;
+ pd->pre[0] = orig_pre[0];
+ pd->dst = orig_dst;
+ this_rd += b_best_rd;
+
+ xd->mi[0].bmi[i] = bsi[ref_frame][i];
+ if (num_4x4_blocks_wide > 1)
+ xd->mi[0].bmi[i + 1] = xd->mi[0].bmi[i];
+ if (num_4x4_blocks_high > 1)
+ xd->mi[0].bmi[i + 2] = xd->mi[0].bmi[i];
+ }
+ } // loop through sub8x8 blocks
+
+ if (this_rd < best_rd) {
+ best_rd = this_rd;
+ best_ref_frame = ref_frame;
+ }
+ } // reference frames
+
+ mbmi->tx_size = TX_4X4;
+ mbmi->ref_frame[0] = best_ref_frame;
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+ const int block = idy * 2 + idx;
+ xd->mi[0].bmi[block] = bsi[best_ref_frame][block];
+ if (num_4x4_blocks_wide > 1)
+ xd->mi[0].bmi[block + 1] = bsi[best_ref_frame][block];
+ if (num_4x4_blocks_high > 1)
+ xd->mi[0].bmi[block + 2] = bsi[best_ref_frame][block];
+ }
+ }
+ mbmi->mode = xd->mi[0].bmi[3].as_mode;
+ ctx->mic = *(xd->mi[0].src_mi);
+ ctx->skip_txfm[0] = 0;
+ ctx->skip = 0;
+ // Dummy assignment for speed -5. No effect in speed -6.
+ rd_cost->rdcost = best_rd;
+}
BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx);
+void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
+ TileDataEnc *tile_data,
+ int mi_row, int mi_col, RD_COST *rd_cost,
+ BLOCK_SIZE bsize,
+ PICK_MODE_CONTEXT *ctx);
+
#ifdef __cplusplus
} // extern "C"
#endif
const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr,
- int zbin_oq_value, uint16_t *eob_ptr,
+ uint16_t *eob_ptr,
const int16_t *scan, const int16_t *iscan) {
int i, eob = -1;
// TODO(jingning) Decide the need of these arguments after the
// quantization process is completed.
(void)zbin_ptr;
(void)quant_shift_ptr;
- (void)zbin_oq_value;
(void)iscan;
vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr,
- int zbin_oq_value,
uint16_t *eob_ptr,
const int16_t *scan,
const int16_t *iscan) {
// quantization process is completed.
(void)zbin_ptr;
(void)quant_shift_ptr;
- (void)zbin_oq_value;
(void)iscan;
vpx_memset(qcoeff_ptr, 0, count * sizeof(*qcoeff_ptr));
const int16_t *quant_shift_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr,
- int zbin_oq_value, uint16_t *eob_ptr,
+ uint16_t *eob_ptr,
const int16_t *scan, const int16_t *iscan) {
int i, eob = -1;
(void)zbin_ptr;
(void)quant_shift_ptr;
- (void)zbin_oq_value;
(void)iscan;
vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr,
- int zbin_oq_value, uint16_t *eob_ptr,
+ uint16_t *eob_ptr,
const int16_t *scan, const int16_t *iscan) {
int i, eob = -1;
(void)zbin_ptr;
(void)quant_shift_ptr;
- (void)zbin_oq_value;
(void)iscan;
vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr,
- int zbin_oq_value, uint16_t *eob_ptr,
+ uint16_t *eob_ptr,
const int16_t *scan, const int16_t *iscan) {
int i, non_zero_count = (int)n_coeffs, eob = -1;
- const int zbins[2] = { zbin_ptr[0] + zbin_oq_value,
- zbin_ptr[1] + zbin_oq_value };
- const int nzbins[2] = { zbins[0] * -1,
- zbins[1] * -1 };
+ const int zbins[2] = {zbin_ptr[0], zbin_ptr[1]};
+ const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1};
(void)iscan;
vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
- const int16_t *dequant_ptr, int zbin_oq_value,
+ const int16_t *dequant_ptr,
uint16_t *eob_ptr, const int16_t *scan,
const int16_t *iscan) {
int i, non_zero_count = (int)n_coeffs, eob = -1;
- const int zbins[2] = { zbin_ptr[0] + zbin_oq_value,
- zbin_ptr[1] + zbin_oq_value };
- const int nzbins[2] = { zbins[0] * -1,
- zbins[1] * -1 };
+ const int zbins[2] = {zbin_ptr[0], zbin_ptr[1]};
+ const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1};
(void)iscan;
vpx_memset(qcoeff_ptr, 0, n_coeffs * sizeof(*qcoeff_ptr));
const int16_t *quant_shift_ptr,
tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr,
- int zbin_oq_value, uint16_t *eob_ptr,
+ uint16_t *eob_ptr,
const int16_t *scan, const int16_t *iscan) {
- const int zbins[2] = { ROUND_POWER_OF_TWO(zbin_ptr[0] + zbin_oq_value, 1),
- ROUND_POWER_OF_TWO(zbin_ptr[1] + zbin_oq_value, 1) };
+ const int zbins[2] = {ROUND_POWER_OF_TWO(zbin_ptr[0], 1),
+ ROUND_POWER_OF_TWO(zbin_ptr[1], 1)};
const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1};
int idx = 0;
tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr,
- int zbin_oq_value, uint16_t *eob_ptr,
+ uint16_t *eob_ptr,
const int16_t *scan, const int16_t *iscan) {
- const int zbins[2] = { ROUND_POWER_OF_TWO(zbin_ptr[0] + zbin_oq_value, 1),
- ROUND_POWER_OF_TWO(zbin_ptr[1] + zbin_oq_value, 1) };
- const int nzbins[2] = { zbins[0] * -1, zbins[1] * -1 };
+ const int zbins[2] = {ROUND_POWER_OF_TWO(zbin_ptr[0], 1),
+ ROUND_POWER_OF_TWO(zbin_ptr[1], 1)};
+ const int nzbins[2] = {zbins[0] * -1, zbins[1] * -1};
int idx = 0;
int idx_arr[1024];
p->zbin, p->round, p->quant, p->quant_shift,
BLOCK_OFFSET(p->qcoeff, block),
BLOCK_OFFSET(pd->dqcoeff, block),
- pd->dequant, p->zbin_extra, &p->eobs[block],
+ pd->dequant, &p->eobs[block],
scan, iscan);
return;
}
p->zbin, p->round, p->quant, p->quant_shift,
BLOCK_OFFSET(p->qcoeff, block),
BLOCK_OFFSET(pd->dqcoeff, block),
- pd->dequant, p->zbin_extra, &p->eobs[block], scan, iscan);
+ pd->dequant, &p->eobs[block], scan, iscan);
}
static void invert_quant(int16_t *quant, int16_t *shift, int d) {
const int segment_id = xd->mi[0].src_mi->mbmi.segment_id;
const int qindex = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
const int rdmult = vp9_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q);
- // TODO(paulwilkins): 0 value for zbin for now pending follow on patch.
- const int zbin = 0;
int i;
// Y
x->plane[0].quant_shift = quants->y_quant_shift[qindex];
x->plane[0].zbin = quants->y_zbin[qindex];
x->plane[0].round = quants->y_round[qindex];
- x->plane[0].zbin_extra = (int16_t)((cm->y_dequant[qindex][1] * zbin) >> 7);
xd->plane[0].dequant = cm->y_dequant[qindex];
- x->plane[0].quant_thred[0] = (x->plane[0].zbin[0] + x->plane[0].zbin_extra) *
- (x->plane[0].zbin[0] + x->plane[0].zbin_extra);
- x->plane[0].quant_thred[1] = (x->plane[0].zbin[1] + x->plane[0].zbin_extra) *
- (x->plane[0].zbin[1] + x->plane[0].zbin_extra);
+ x->plane[0].quant_thred[0] = x->plane[0].zbin[0] * x->plane[0].zbin[0];
+ x->plane[0].quant_thred[1] = x->plane[0].zbin[1] * x->plane[0].zbin[1];
// UV
for (i = 1; i < 3; i++) {
x->plane[i].quant_shift = quants->uv_quant_shift[qindex];
x->plane[i].zbin = quants->uv_zbin[qindex];
x->plane[i].round = quants->uv_round[qindex];
- x->plane[i].zbin_extra = (int16_t)((cm->uv_dequant[qindex][1] * zbin) >> 7);
xd->plane[i].dequant = cm->uv_dequant[qindex];
- x->plane[i].quant_thred[0] =
- (x->plane[i].zbin[0] + x->plane[i].zbin_extra) *
- (x->plane[i].zbin[0] + x->plane[i].zbin_extra);
- x->plane[i].quant_thred[1] =
- (x->plane[i].zbin[1] + x->plane[i].zbin_extra) *
- (x->plane[i].zbin[1] + x->plane[i].zbin_extra);
+ x->plane[i].quant_thred[0] = x->plane[i].zbin[0] * x->plane[i].zbin[0];
+ x->plane[i].quant_thred[1] = x->plane[i].zbin[1] * x->plane[i].zbin[1];
}
x->skip_block = vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP);
vp9_initialize_me_consts(cpi, x->q_index);
}
-void vp9_update_zbin_extra(MACROBLOCK *x) {
- const int y_zbin_extra = 0;
- const int uv_zbin_extra = 0;
-
- x->plane[0].zbin_extra = (int16_t)y_zbin_extra;
- x->plane[1].zbin_extra = (int16_t)uv_zbin_extra;
- x->plane[2].zbin_extra = (int16_t)uv_zbin_extra;
-}
-
void vp9_frame_init_quantizer(VP9_COMP *cpi) {
vp9_init_plane_quantizers(cpi, &cpi->td.mb);
}
void vp9_frame_init_quantizer(struct VP9_COMP *cpi);
-void vp9_update_zbin_extra(MACROBLOCK *x);
-
void vp9_init_plane_quantizers(struct VP9_COMP *cpi, MACROBLOCK *x);
void vp9_init_quantizer(struct VP9_COMP *cpi);
}
}
-void vp9_rc_update_rate_correction_factors(VP9_COMP *cpi, int damp_var) {
+void vp9_rc_update_rate_correction_factors(VP9_COMP *cpi) {
const VP9_COMMON *const cm = &cpi->common;
int correction_factor = 100;
double rate_correction_factor = get_rate_correction_factor(cpi);
// More heavily damped adjustment used if we have been oscillating either side
// of target.
- switch (damp_var) {
- case 0:
- adjustment_limit = 0.75;
- break;
- case 1:
- adjustment_limit = 0.25 +
- 0.5 * MIN(1, fabs(log10(0.01 * correction_factor)));
- break;
- case 2:
- default:
- adjustment_limit = 0.25;
- break;
- }
+ adjustment_limit = 0.25 +
+ 0.5 * MIN(1, fabs(log10(0.01 * correction_factor)));
cpi->rc.q_2_frame = cpi->rc.q_1_frame;
cpi->rc.q_1_frame = cm->base_qindex;
rc->projected_frame_size = (int)(bytes_used << 3);
// Post encode loop adjustment of Q prediction.
- vp9_rc_update_rate_correction_factors(
- cpi, (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) ? 2 :
- ((oxcf->rc_mode == VPX_CBR) ? 1 : 0));
+ vp9_rc_update_rate_correction_factors(cpi);
// Keep a record of last Q and ambient average Q.
if (cm->frame_type == KEY_FRAME) {
// better than that already stored.
// This is used to help set quality in forced key frames to reduce popping
if ((qindex < rc->last_boosted_qindex) ||
- (((cm->frame_type == KEY_FRAME) || cpi->refresh_alt_ref_frame ||
+ (cm->frame_type == KEY_FRAME) ||
+ (!rc->constrained_gf_group &&
+ (cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !rc->is_src_frame_alt_ref)))) {
rc->last_boosted_qindex = qindex;
}
rc->baseline_gf_interval = DEFAULT_GF_INTERVAL;
rc->frames_till_gf_update_due = rc->baseline_gf_interval;
// NOTE: frames_till_gf_update_due must be <= frames_to_key.
- if (rc->frames_till_gf_update_due > rc->frames_to_key)
+ if (rc->frames_till_gf_update_due > rc->frames_to_key) {
rc->frames_till_gf_update_due = rc->frames_to_key;
+ rc->constrained_gf_group = 1;
+ } else {
+ rc->constrained_gf_group = 0;
+ }
cpi->refresh_golden_frame = 1;
rc->source_alt_ref_pending = USE_ALTREF_FOR_ONE_PASS;
rc->gfu_boost = DEFAULT_GF_BOOST;
int max_gf_interval;
int static_scene_max_gf_interval;
int baseline_gf_interval;
+ int constrained_gf_group;
int frames_to_key;
int frames_since_key;
int this_key_frame_forced;
// Updates rate correction factors
// Changes only the rate correction factors in the rate control structure.
-void vp9_rc_update_rate_correction_factors(struct VP9_COMP *cpi, int damp_var);
+void vp9_rc_update_rate_correction_factors(struct VP9_COMP *cpi);
// Decide if we should drop this frame: For 1-pass CBR.
// Changes only the decimation count in the rate control structure
}
}
+int vp9_raster_block_offset(BLOCK_SIZE plane_bsize,
+ int raster_block, int stride) {
+ const int bw = b_width_log2_lookup[plane_bsize];
+ const int y = 4 * (raster_block >> bw);
+ const int x = 4 * (raster_block & ((1 << bw) - 1));
+ return y * stride + x;
+}
+
+int16_t* vp9_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+ int raster_block, int16_t *base) {
+ const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+ return base + vp9_raster_block_offset(plane_bsize, raster_block, stride);
+}
+
const YV12_BUFFER_CONFIG *vp9_get_scaled_ref_frame(const VP9_COMP *cpi,
int ref_frame) {
const VP9_COMMON *const cm = &cpi->common;
const int ref_idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)];
const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1];
- return (scaled_idx != ref_idx) ? &cm->frame_bufs[scaled_idx].buf : NULL;
+ return (scaled_idx != ref_idx) ?
+ &cm->buffer_pool->frame_bufs[scaled_idx].buf : NULL;
}
int vp9_get_switchable_rate(const VP9_COMP *cpi, const MACROBLOCKD *const xd) {
rd->thresh_mult[THR_NEWA] += 1000;
rd->thresh_mult[THR_NEWG] += 1000;
- // Adjust threshold only in real time mode, which only uses last
- // reference frame.
- rd->thresh_mult[THR_NEWMV] += sf->elevate_newmv_thresh;
-
rd->thresh_mult[THR_NEARMV] += 1000;
rd->thresh_mult[THR_NEARA] += 1000;
rd->thresh_mult[THR_COMP_NEARESTLA] += 1000;
int vp9_get_switchable_rate(const struct VP9_COMP *cpi,
const MACROBLOCKD *const xd);
+int vp9_raster_block_offset(BLOCK_SIZE plane_bsize,
+ int raster_block, int stride);
+
+int16_t* vp9_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+ int raster_block, int16_t *base);
+
const YV12_BUFFER_CONFIG *vp9_get_scaled_ref_frame(const struct VP9_COMP *cpi,
int ref_frame);
{{INTRA_FRAME, NONE}},
};
-static int raster_block_offset(BLOCK_SIZE plane_bsize,
- int raster_block, int stride) {
- const int bw = b_width_log2_lookup[plane_bsize];
- const int y = 4 * (raster_block >> bw);
- const int x = 4 * (raster_block & ((1 << bw) - 1));
- return y * stride + x;
-}
-static int16_t* raster_block_offset_int16(BLOCK_SIZE plane_bsize,
- int raster_block, int16_t *base) {
- const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
- return base + raster_block_offset(plane_bsize, raster_block, stride);
-}
-
static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx,
int m, int n, int min_plane, int max_plane) {
int i;
uint8_t token_cache[32 * 32];
int pt = combine_entropy_contexts(*A, *L);
int c, cost;
+#if CONFIG_VP9_HIGHBITDEPTH
+ const int16_t *cat6_high_cost = vp9_get_high_cost_table(xd->bd);
+#else
+ const int16_t *cat6_high_cost = vp9_get_high_cost_table(8);
+#endif
+
// Check for consistency of tx_size with mode info
assert(type == PLANE_TYPE_Y ? mbmi->tx_size == tx_size
: get_uv_tx_size(mbmi, pd) == tx_size);
// dc token
int v = qcoeff[0];
- int prev_t = vp9_dct_value_tokens_ptr[v].token;
- cost = (*token_costs)[0][pt][prev_t] + vp9_dct_value_cost_ptr[v];
+ int16_t prev_t;
+ EXTRABIT e;
+ vp9_get_token_extra(v, &prev_t, &e);
+ cost = (*token_costs)[0][pt][prev_t] +
+ vp9_get_cost(prev_t, e, cat6_high_cost);
+
token_cache[0] = vp9_pt_energy_class[prev_t];
++token_costs;
// ac tokens
for (c = 1; c < eob; c++) {
const int rc = scan[c];
- int t;
+ int16_t t;
v = qcoeff[rc];
- t = vp9_dct_value_tokens_ptr[v].token;
+ vp9_get_token_extra(v, &t, &e);
if (use_fast_coef_costing) {
- cost += (*token_costs)[!prev_t][!prev_t][t] + vp9_dct_value_cost_ptr[v];
+ cost += (*token_costs)[!prev_t][!prev_t][t] +
+ vp9_get_cost(t, e, cat6_high_cost);
} else {
pt = get_coef_context(nb, token_cache, c);
- cost += (*token_costs)[!prev_t][pt][t] + vp9_dct_value_cost_ptr[v];
+ cost += (*token_costs)[!prev_t][pt][t] +
+ vp9_get_cost(t, e, cat6_high_cost);
token_cache[rc] = vp9_pt_energy_class[t];
}
prev_t = t;
struct macroblockd_plane *pd = &xd->plane[0];
const int src_stride = p->src.stride;
const int dst_stride = pd->dst.stride;
- const uint8_t *src_init = &p->src.buf[raster_block_offset(BLOCK_8X8, ib,
- src_stride)];
- uint8_t *dst_init = &pd->dst.buf[raster_block_offset(BLOCK_8X8, ib,
- dst_stride)];
+ const uint8_t *src_init = &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, ib,
+ src_stride)];
+ uint8_t *dst_init = &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, ib,
+ dst_stride)];
ENTROPY_CONTEXT ta[2], tempa[2];
ENTROPY_CONTEXT tl[2], templ[2];
const int block = ib + idy * 2 + idx;
const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
- int16_t *const src_diff = raster_block_offset_int16(BLOCK_8X8, block,
- p->src_diff);
+ int16_t *const src_diff = vp9_raster_block_offset_int16(BLOCK_8X8,
+ block,
+ p->src_diff);
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
xd->mi[0].src_mi->bmi[block].as_mode = mode;
vp9_predict_intra_block(xd, block, 1,
const int block = ib + idy * 2 + idx;
const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
- int16_t *const src_diff = raster_block_offset_int16(BLOCK_8X8, block,
- p->src_diff);
+ int16_t *const src_diff =
+ vp9_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
xd->mi[0].src_mi->bmi[block].as_mode = mode;
vp9_predict_intra_block(xd, block, 1,
const int height = 4 * num_4x4_blocks_high_lookup[plane_bsize];
int idx, idy;
- const uint8_t *const src = &p->src.buf[raster_block_offset(BLOCK_8X8, i,
- p->src.stride)];
- uint8_t *const dst = &pd->dst.buf[raster_block_offset(BLOCK_8X8, i,
- pd->dst.stride)];
+ const uint8_t *const src =
+ &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+ uint8_t *const dst = &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i,
+ pd->dst.stride)];
int64_t thisdistortion = 0, thissse = 0;
int thisrate = 0, ref;
const scan_order *so = &vp9_default_scan_orders[TX_4X4];
const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
for (ref = 0; ref < 1 + is_compound; ++ref) {
- const uint8_t *pre = &pd->pre[ref].buf[raster_block_offset(BLOCK_8X8, i,
+ const uint8_t *pre = &pd->pre[ref].buf[vp9_raster_block_offset(BLOCK_8X8, i,
pd->pre[ref].stride)];
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
vp9_highbd_subtract_block(
- height, width, raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8,
- src, p->src.stride, dst, pd->dst.stride, xd->bd);
+ height, width, vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
+ 8, src, p->src.stride, dst, pd->dst.stride, xd->bd);
} else {
vp9_subtract_block(
- height, width, raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8,
- src, p->src.stride, dst, pd->dst.stride);
+ height, width, vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
+ 8, src, p->src.stride, dst, pd->dst.stride);
}
#else
vp9_subtract_block(height, width,
- raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8,
- src, p->src.stride, dst, pd->dst.stride);
+ vp9_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
+ 8, src, p->src.stride, dst, pd->dst.stride);
#endif // CONFIG_VP9_HIGHBITDEPTH
k = i;
k += (idy * 2 + idx);
coeff = BLOCK_OFFSET(p->coeff, k);
- x->fwd_txm4x4(raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
+ x->fwd_txm4x4(vp9_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
coeff, 8);
vp9_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
#if CONFIG_VP9_HIGHBITDEPTH
struct macroblock_plane *const p = &x->plane[0];
struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
- p->src.buf = &p->src.buf[raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+ p->src.buf = &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i,
+ p->src.stride)];
assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
- pd->pre[0].buf = &pd->pre[0].buf[raster_block_offset(BLOCK_8X8, i,
- pd->pre[0].stride)];
+ pd->pre[0].buf = &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8, i,
+ pd->pre[0].stride)];
if (has_second_ref(mbmi))
- pd->pre[1].buf = &pd->pre[1].buf[raster_block_offset(BLOCK_8X8, i,
- pd->pre[1].stride)];
+ pd->pre[1].buf = &pd->pre[1].buf[vp9_raster_block_offset(BLOCK_8X8, i,
+ pd->pre[1].stride)];
}
static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
// Gets an initial list of candidate vectors from neighbours and orders them
- vp9_find_mv_refs(cm, xd, tile, mi, ref_frame, candidates, mi_row, mi_col);
+ vp9_find_mv_refs(cm, xd, tile, mi, ref_frame, candidates, mi_row, mi_col,
+ NULL, NULL);
// Candidate refinement carried out at encoder and decoder
vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
}
}
- if (ref_frame_skip_mask[0] & (1 << ref_frame) &&
- ref_frame_skip_mask[1] & (1 << MAX(0, second_ref_frame)))
+ if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
+ (ref_frame_skip_mask[1] & (1 << MAX(0, second_ref_frame))))
continue;
if (mode_skip_mask[ref_frame] & (1 << this_mode))
}
}
- if (ref_frame_skip_mask[0] & (1 << ref_frame) &&
- ref_frame_skip_mask[1] & (1 << MAX(0, second_ref_frame)))
+ if ((ref_frame_skip_mask[0] & (1 << ref_frame)) &&
+ (ref_frame_skip_mask[1] & (1 << MAX(0, second_ref_frame))))
continue;
// Test best rd so far against threshold for trying this mode.
sf->always_this_block_size = BLOCK_16X16;
sf->search_type_check_frequency = 50;
sf->encode_breakout_thresh = 0;
- sf->elevate_newmv_thresh = 0;
// Recode loop tolerance %.
sf->recode_tolerance = 25;
sf->default_interp_filter = SWITCHABLE;
// enabled in real time mode.
int encode_breakout_thresh;
- // In real time encoding, increase the threshold for NEWMV.
- int elevate_newmv_thresh;
-
// default interp filter choice
INTERP_FILTER default_interp_filter;
int vp9_svc_start_frame(VP9_COMP *const cpi) {
int width = 0, height = 0;
LAYER_CONTEXT *lc;
+ struct lookahead_entry *buf;
int count = 1 << (cpi->svc.number_temporal_layers - 1);
cpi->svc.spatial_layer_id = cpi->svc.spatial_layer_to_encode;
// since its previous frame could be changed during decoding time. The idea is
// we put a empty invisible frame in front of them, then we will not use
// prev_mi when encoding these frames.
+
+ buf = vp9_lookahead_peek(cpi->lookahead, 0);
if (cpi->oxcf.error_resilient_mode == 0 && cpi->oxcf.pass == 2 &&
- cpi->svc.encode_empty_frame_state == NEED_TO_ENCODE) {
+ cpi->svc.encode_empty_frame_state == NEED_TO_ENCODE &&
+ lc->rc.frames_to_key != 0 &&
+ !(buf != NULL && (buf->flags & VPX_EFLAG_FORCE_KF))) {
if ((cpi->svc.number_temporal_layers > 1 &&
cpi->svc.temporal_layer_id < cpi->svc.number_temporal_layers - 1) ||
(cpi->svc.number_spatial_layers > 1 &&
#include "vp9/encoder/vp9_encoder.h"
#include "vp9/encoder/vp9_tokenize.h"
-static TOKENVALUE dct_value_tokens[DCT_MAX_VALUE * 2];
-const TOKENVALUE *vp9_dct_value_tokens_ptr;
-static int16_t dct_value_cost[DCT_MAX_VALUE * 2];
-const int16_t *vp9_dct_value_cost_ptr;
-
-#if CONFIG_VP9_HIGHBITDEPTH
-static TOKENVALUE dct_value_tokens_high10[DCT_MAX_VALUE_HIGH10 * 2];
-const TOKENVALUE *vp9_dct_value_tokens_high10_ptr;
-static int16_t dct_value_cost_high10[DCT_MAX_VALUE_HIGH10 * 2];
-const int16_t *vp9_dct_value_cost_high10_ptr;
-
-static TOKENVALUE dct_value_tokens_high12[DCT_MAX_VALUE_HIGH12 * 2];
-const TOKENVALUE *vp9_dct_value_tokens_high12_ptr;
-static int16_t dct_value_cost_high12[DCT_MAX_VALUE_HIGH12 * 2];
-const int16_t *vp9_dct_value_cost_high12_ptr;
-#endif
+static const TOKENVALUE dct_cat_lt_10_value_tokens[] = {
+ {9, 63}, {9, 61}, {9, 59}, {9, 57}, {9, 55}, {9, 53}, {9, 51}, {9, 49},
+ {9, 47}, {9, 45}, {9, 43}, {9, 41}, {9, 39}, {9, 37}, {9, 35}, {9, 33},
+ {9, 31}, {9, 29}, {9, 27}, {9, 25}, {9, 23}, {9, 21}, {9, 19}, {9, 17},
+ {9, 15}, {9, 13}, {9, 11}, {9, 9}, {9, 7}, {9, 5}, {9, 3}, {9, 1},
+ {8, 31}, {8, 29}, {8, 27}, {8, 25}, {8, 23}, {8, 21},
+ {8, 19}, {8, 17}, {8, 15}, {8, 13}, {8, 11}, {8, 9},
+ {8, 7}, {8, 5}, {8, 3}, {8, 1},
+ {7, 15}, {7, 13}, {7, 11}, {7, 9}, {7, 7}, {7, 5}, {7, 3}, {7, 1},
+ {6, 7}, {6, 5}, {6, 3}, {6, 1}, {5, 3}, {5, 1},
+ {4, 1}, {3, 1}, {2, 1}, {1, 1}, {0, 0},
+ {1, 0}, {2, 0}, {3, 0}, {4, 0},
+ {5, 0}, {5, 2}, {6, 0}, {6, 2}, {6, 4}, {6, 6},
+ {7, 0}, {7, 2}, {7, 4}, {7, 6}, {7, 8}, {7, 10}, {7, 12}, {7, 14},
+ {8, 0}, {8, 2}, {8, 4}, {8, 6}, {8, 8}, {8, 10}, {8, 12},
+ {8, 14}, {8, 16}, {8, 18}, {8, 20}, {8, 22}, {8, 24},
+ {8, 26}, {8, 28}, {8, 30}, {9, 0}, {9, 2},
+ {9, 4}, {9, 6}, {9, 8}, {9, 10}, {9, 12}, {9, 14}, {9, 16},
+ {9, 18}, {9, 20}, {9, 22}, {9, 24}, {9, 26}, {9, 28},
+ {9, 30}, {9, 32}, {9, 34}, {9, 36}, {9, 38}, {9, 40},
+ {9, 42}, {9, 44}, {9, 46}, {9, 48}, {9, 50}, {9, 52},
+ {9, 54}, {9, 56}, {9, 58}, {9, 60}, {9, 62}
+};
+const TOKENVALUE *vp9_dct_cat_lt_10_value_tokens = dct_cat_lt_10_value_tokens +
+ (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens))
+ / 2;
// Array indices are identical to previously-existing CONTEXT_NODE indices
const vp9_tree_index vp9_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
-CATEGORY5_TOKEN, -CATEGORY6_TOKEN // 7 = CAT_FIVE
};
-static vp9_tree_index cat1[2], cat2[4], cat3[6], cat4[8], cat5[10], cat6[28];
+static const vp9_tree_index cat1[2] = {0, 0};
+static const vp9_tree_index cat2[4] = {2, 2, 0, 0};
+static const vp9_tree_index cat3[6] = {2, 2, 4, 4, 0, 0};
+static const vp9_tree_index cat4[8] = {2, 2, 4, 4, 6, 6, 0, 0};
+static const vp9_tree_index cat5[10] = {2, 2, 4, 4, 6, 6, 8, 8, 0, 0};
+static const vp9_tree_index cat6[28] = {2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12,
+ 14, 14, 16, 16, 18, 18, 20, 20, 22, 22, 24, 24, 26, 26, 0, 0};
+
+static const int16_t zero_cost[] = {0};
+static const int16_t one_cost[] = {255, 257};
+static const int16_t two_cost[] = {255, 257};
+static const int16_t three_cost[] = {255, 257};
+static const int16_t four_cost[] = {255, 257};
+static const int16_t cat1_cost[] = {429, 431, 616, 618};
+static const int16_t cat2_cost[] = {624, 626, 727, 729, 848, 850, 951, 953};
+static const int16_t cat3_cost[] = {
+ 820, 822, 893, 895, 940, 942, 1013, 1015, 1096, 1098, 1169, 1171, 1216, 1218,
+ 1289, 1291
+};
+static const int16_t cat4_cost[] = {
+ 1032, 1034, 1075, 1077, 1105, 1107, 1148, 1150, 1194, 1196, 1237, 1239,
+ 1267, 1269, 1310, 1312, 1328, 1330, 1371, 1373, 1401, 1403, 1444, 1446,
+ 1490, 1492, 1533, 1535, 1563, 1565, 1606, 1608
+};
+static const int16_t cat5_cost[] = {
+ 1269, 1271, 1283, 1285, 1306, 1308, 1320,
+ 1322, 1347, 1349, 1361, 1363, 1384, 1386, 1398, 1400, 1443, 1445, 1457,
+ 1459, 1480, 1482, 1494, 1496, 1521, 1523, 1535, 1537, 1558, 1560, 1572,
+ 1574, 1592, 1594, 1606, 1608, 1629, 1631, 1643, 1645, 1670, 1672, 1684,
+ 1686, 1707, 1709, 1721, 1723, 1766, 1768, 1780, 1782, 1803, 1805, 1817,
+ 1819, 1844, 1846, 1858, 1860, 1881, 1883, 1895, 1897
+};
+const int16_t vp9_cat6_low_cost[256] = {
+ 1638, 1640, 1646, 1648, 1652, 1654, 1660, 1662,
+ 1670, 1672, 1678, 1680, 1684, 1686, 1692, 1694, 1711, 1713, 1719, 1721,
+ 1725, 1727, 1733, 1735, 1743, 1745, 1751, 1753, 1757, 1759, 1765, 1767,
+ 1787, 1789, 1795, 1797, 1801, 1803, 1809, 1811, 1819, 1821, 1827, 1829,
+ 1833, 1835, 1841, 1843, 1860, 1862, 1868, 1870, 1874, 1876, 1882, 1884,
+ 1892, 1894, 1900, 1902, 1906, 1908, 1914, 1916, 1940, 1942, 1948, 1950,
+ 1954, 1956, 1962, 1964, 1972, 1974, 1980, 1982, 1986, 1988, 1994, 1996,
+ 2013, 2015, 2021, 2023, 2027, 2029, 2035, 2037, 2045, 2047, 2053, 2055,
+ 2059, 2061, 2067, 2069, 2089, 2091, 2097, 2099, 2103, 2105, 2111, 2113,
+ 2121, 2123, 2129, 2131, 2135, 2137, 2143, 2145, 2162, 2164, 2170, 2172,
+ 2176, 2178, 2184, 2186, 2194, 2196, 2202, 2204, 2208, 2210, 2216, 2218,
+ 2082, 2084, 2090, 2092, 2096, 2098, 2104, 2106, 2114, 2116, 2122, 2124,
+ 2128, 2130, 2136, 2138, 2155, 2157, 2163, 2165, 2169, 2171, 2177, 2179,
+ 2187, 2189, 2195, 2197, 2201, 2203, 2209, 2211, 2231, 2233, 2239, 2241,
+ 2245, 2247, 2253, 2255, 2263, 2265, 2271, 2273, 2277, 2279, 2285, 2287,
+ 2304, 2306, 2312, 2314, 2318, 2320, 2326, 2328, 2336, 2338, 2344, 2346,
+ 2350, 2352, 2358, 2360, 2384, 2386, 2392, 2394, 2398, 2400, 2406, 2408,
+ 2416, 2418, 2424, 2426, 2430, 2432, 2438, 2440, 2457, 2459, 2465, 2467,
+ 2471, 2473, 2479, 2481, 2489, 2491, 2497, 2499, 2503, 2505, 2511, 2513,
+ 2533, 2535, 2541, 2543, 2547, 2549, 2555, 2557, 2565, 2567, 2573, 2575,
+ 2579, 2581, 2587, 2589, 2606, 2608, 2614, 2616, 2620, 2622, 2628, 2630,
+ 2638, 2640, 2646, 2648, 2652, 2654, 2660, 2662
+};
+const int16_t vp9_cat6_high_cost[128] = {
+ 72, 892, 1183, 2003, 1448, 2268, 2559, 3379,
+ 1709, 2529, 2820, 3640, 3085, 3905, 4196, 5016, 2118, 2938, 3229, 4049,
+ 3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686, 5131, 5951, 6242, 7062,
+ 2118, 2938, 3229, 4049, 3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686,
+ 5131, 5951, 6242, 7062, 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471,
+ 5801, 6621, 6912, 7732, 7177, 7997, 8288, 9108, 2118, 2938, 3229, 4049,
+ 3494, 4314, 4605, 5425, 3755, 4575, 4866, 5686, 5131, 5951, 6242, 7062,
+ 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471, 5801, 6621, 6912, 7732,
+ 7177, 7997, 8288, 9108, 4164, 4984, 5275, 6095, 5540, 6360, 6651, 7471,
+ 5801, 6621, 6912, 7732, 7177, 7997, 8288, 9108, 6210, 7030, 7321, 8141,
+ 7586, 8406, 8697, 9517, 7847, 8667, 8958, 9778, 9223, 10043, 10334, 11154
+};
#if CONFIG_VP9_HIGHBITDEPTH
-static vp9_tree_index cat1_high10[2];
-static vp9_tree_index cat2_high10[4];
-static vp9_tree_index cat3_high10[6];
-static vp9_tree_index cat4_high10[8];
-static vp9_tree_index cat5_high10[10];
-static vp9_tree_index cat6_high10[32];
-static vp9_tree_index cat1_high12[2];
-static vp9_tree_index cat2_high12[4];
-static vp9_tree_index cat3_high12[6];
-static vp9_tree_index cat4_high12[8];
-static vp9_tree_index cat5_high12[10];
-static vp9_tree_index cat6_high12[36];
+const int16_t vp9_cat6_high10_high_cost[512] = {
+ 74, 894, 1185, 2005, 1450, 2270, 2561,
+ 3381, 1711, 2531, 2822, 3642, 3087, 3907, 4198, 5018, 2120, 2940, 3231,
+ 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133, 5953, 6244,
+ 7064, 2120, 2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868,
+ 5688, 5133, 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
+ 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 2120, 2940, 3231,
+ 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133, 5953, 6244,
+ 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914,
+ 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
+ 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323,
+ 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336,
+ 11156, 2120, 2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868,
+ 5688, 5133, 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
+ 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277,
+ 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290,
+ 9110, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960,
+ 9780, 9225, 10045, 10336, 11156, 4166, 4986, 5277, 6097, 5542, 6362, 6653,
+ 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323,
+ 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336,
+ 11156, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960,
+ 9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454,
+ 10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 2120,
+ 2940, 3231, 4051, 3496, 4316, 4607, 5427, 3757, 4577, 4868, 5688, 5133,
+ 5953, 6244, 7064, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803,
+ 6623, 6914, 7734, 7179, 7999, 8290, 9110, 4166, 4986, 5277, 6097, 5542,
+ 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212,
+ 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225,
+ 10045, 10336, 11156, 4166, 4986, 5277, 6097, 5542, 6362, 6653, 7473, 5803,
+ 6623, 6914, 7734, 7179, 7999, 8290, 9110, 6212, 7032, 7323, 8143, 7588,
+ 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, 11156, 6212,
+ 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960, 9780, 9225,
+ 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454, 10745, 11565,
+ 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 4166, 4986, 5277,
+ 6097, 5542, 6362, 6653, 7473, 5803, 6623, 6914, 7734, 7179, 7999, 8290,
+ 9110, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669, 8960,
+ 9780, 9225, 10045, 10336, 11156, 6212, 7032, 7323, 8143, 7588, 8408, 8699,
+ 9519, 7849, 8669, 8960, 9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369,
+ 10189, 9634, 10454, 10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091,
+ 12382, 13202, 6212, 7032, 7323, 8143, 7588, 8408, 8699, 9519, 7849, 8669,
+ 8960, 9780, 9225, 10045, 10336, 11156, 8258, 9078, 9369, 10189, 9634, 10454,
+ 10745, 11565, 9895, 10715, 11006, 11826, 11271, 12091, 12382, 13202, 8258,
+ 9078, 9369, 10189, 9634, 10454, 10745, 11565, 9895, 10715, 11006, 11826,
+ 11271, 12091, 12382, 13202, 10304, 11124, 11415, 12235, 11680, 12500, 12791,
+ 13611, 11941, 12761, 13052, 13872, 13317, 14137, 14428, 15248,
+};
+const int16_t vp9_cat6_high12_high_cost[2048] = {
+ 76, 896, 1187, 2007, 1452, 2272, 2563,
+ 3383, 1713, 2533, 2824, 3644, 3089, 3909, 4200, 5020, 2122, 2942, 3233,
+ 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246,
+ 7066, 2122, 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870,
+ 5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
+ 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 2122, 2942, 3233,
+ 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246,
+ 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916,
+ 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
+ 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325,
+ 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
+ 11158, 2122, 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870,
+ 5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
+ 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279,
+ 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
+ 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
+ 9782, 9227, 10047, 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655,
+ 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325,
+ 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
+ 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
+ 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456,
+ 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 2122,
+ 2942, 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135,
+ 5955, 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805,
+ 6625, 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544,
+ 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214,
+ 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
+ 10047, 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805,
+ 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590,
+ 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214,
+ 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
+ 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
+ 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 4168, 4988, 5279,
+ 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
+ 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
+ 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
+ 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371,
+ 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
+ 12384, 13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671,
+ 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456,
+ 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260,
+ 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
+ 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
+ 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 2122, 2942,
+ 3233, 4053, 3498, 4318, 4609, 5429, 3759, 4579, 4870, 5690, 5135, 5955,
+ 6246, 7066, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625,
+ 6916, 7736, 7181, 8001, 8292, 9112, 4168, 4988, 5279, 6099, 5544, 6364,
+ 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034,
+ 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
+ 10338, 11158, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625,
+ 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410,
+ 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034,
+ 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
+ 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897,
+ 10717, 11008, 11828, 11273, 12093, 12384, 13204, 4168, 4988, 5279, 6099,
+ 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112,
+ 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782,
+ 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521,
+ 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191,
+ 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384,
+ 13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
+ 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456,
+ 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260,
+ 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
+ 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
+ 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 4168, 4988,
+ 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001,
+ 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671,
+ 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410,
+ 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080,
+ 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273,
+ 12093, 12384, 13204, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851,
+ 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636,
+ 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204,
+ 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008,
+ 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502,
+ 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 6214,
+ 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227,
+ 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
+ 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371,
+ 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
+ 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943,
+ 12763, 13054, 13874, 13319, 14139, 14430, 15250, 8260, 9080, 9371, 10191,
+ 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384,
+ 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763,
+ 13054, 13874, 13319, 14139, 14430, 15250, 10306, 11126, 11417, 12237, 11682,
+ 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250,
+ 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100,
+ 15920, 15365, 16185, 16476, 17296, 2122, 2942, 3233, 4053, 3498, 4318, 4609,
+ 5429, 3759, 4579, 4870, 5690, 5135, 5955, 6246, 7066, 4168, 4988, 5279,
+ 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
+ 9112, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916,
+ 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
+ 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 4168, 4988, 5279,
+ 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916, 7736, 7181, 8001, 8292,
+ 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962,
+ 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
+ 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371,
+ 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093,
+ 12384, 13204, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625,
+ 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410,
+ 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034,
+ 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
+ 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897,
+ 10717, 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034, 7325, 8145,
+ 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158,
+ 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008,
+ 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456,
+ 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306,
+ 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874,
+ 13319, 14139, 14430, 15250, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475,
+ 5805, 6625, 6916, 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145,
+ 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158,
+ 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782,
+ 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747,
+ 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034,
+ 7325, 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047,
+ 10338, 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897,
+ 10717, 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191,
+ 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384,
+ 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763,
+ 13054, 13874, 13319, 14139, 14430, 15250, 6214, 7034, 7325, 8145, 7590,
+ 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260,
+ 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
+ 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456, 10747,
+ 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126,
+ 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319,
+ 14139, 14430, 15250, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
+ 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417,
+ 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139,
+ 14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943,
+ 12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283,
+ 13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476,
+ 17296, 4168, 4988, 5279, 6099, 5544, 6364, 6655, 7475, 5805, 6625, 6916,
+ 7736, 7181, 8001, 8292, 9112, 6214, 7034, 7325, 8145, 7590, 8410, 8701,
+ 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 6214, 7034, 7325,
+ 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
+ 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717,
+ 11008, 11828, 11273, 12093, 12384, 13204, 6214, 7034, 7325, 8145, 7590,
+ 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260,
+ 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
+ 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636, 10456, 10747,
+ 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126,
+ 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319,
+ 14139, 14430, 15250, 6214, 7034, 7325, 8145, 7590, 8410, 8701, 9521, 7851,
+ 8671, 8962, 9782, 9227, 10047, 10338, 11158, 8260, 9080, 9371, 10191, 9636,
+ 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204,
+ 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008,
+ 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502,
+ 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 8260,
+ 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717, 11008, 11828,
+ 11273, 12093, 12384, 13204, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
+ 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 10306, 11126,
+ 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319,
+ 14139, 14430, 15250, 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659,
+ 13989, 14809, 15100, 15920, 15365, 16185, 16476, 17296, 6214, 7034, 7325,
+ 8145, 7590, 8410, 8701, 9521, 7851, 8671, 8962, 9782, 9227, 10047, 10338,
+ 11158, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567, 9897, 10717,
+ 11008, 11828, 11273, 12093, 12384, 13204, 8260, 9080, 9371, 10191, 9636,
+ 10456, 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204,
+ 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054,
+ 13874, 13319, 14139, 14430, 15250, 8260, 9080, 9371, 10191, 9636, 10456,
+ 10747, 11567, 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306,
+ 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874,
+ 13319, 14139, 14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502, 12793,
+ 13613, 11943, 12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172,
+ 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365,
+ 16185, 16476, 17296, 8260, 9080, 9371, 10191, 9636, 10456, 10747, 11567,
+ 9897, 10717, 11008, 11828, 11273, 12093, 12384, 13204, 10306, 11126, 11417,
+ 12237, 11682, 12502, 12793, 13613, 11943, 12763, 13054, 13874, 13319, 14139,
+ 14430, 15250, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943,
+ 12763, 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283,
+ 13728, 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476,
+ 17296, 10306, 11126, 11417, 12237, 11682, 12502, 12793, 13613, 11943, 12763,
+ 13054, 13874, 13319, 14139, 14430, 15250, 12352, 13172, 13463, 14283, 13728,
+ 14548, 14839, 15659, 13989, 14809, 15100, 15920, 15365, 16185, 16476, 17296,
+ 12352, 13172, 13463, 14283, 13728, 14548, 14839, 15659, 13989, 14809, 15100,
+ 15920, 15365, 16185, 16476, 17296, 14398, 15218, 15509, 16329, 15774, 16594,
+ 16885, 17705, 16035, 16855, 17146, 17966, 17411, 18231, 18522, 19342
+};
#endif
-static void init_bit_tree(vp9_tree_index *p, int n) {
- int i = 0;
-
- while (++i < n) {
- p[0] = p[1] = i << 1;
- p += 2;
- }
-
- p[0] = p[1] = 0;
-}
-
-static void init_bit_trees() {
- init_bit_tree(cat1, 1);
- init_bit_tree(cat2, 2);
- init_bit_tree(cat3, 3);
- init_bit_tree(cat4, 4);
- init_bit_tree(cat5, 5);
- init_bit_tree(cat6, 14);
#if CONFIG_VP9_HIGHBITDEPTH
- init_bit_tree(cat1_high10, 1);
- init_bit_tree(cat2_high10, 2);
- init_bit_tree(cat3_high10, 3);
- init_bit_tree(cat4_high10, 4);
- init_bit_tree(cat5_high10, 5);
- init_bit_tree(cat6_high10, 16);
- init_bit_tree(cat1_high12, 1);
- init_bit_tree(cat2_high12, 2);
- init_bit_tree(cat3_high12, 3);
- init_bit_tree(cat4_high12, 4);
- init_bit_tree(cat5_high12, 5);
- init_bit_tree(cat6_high12, 18);
+static const vp9_tree_index cat1_high10[2] = {0, 0};
+static const vp9_tree_index cat2_high10[4] = {2, 2, 0, 0};
+static const vp9_tree_index cat3_high10[6] = {2, 2, 4, 4, 0, 0};
+static const vp9_tree_index cat4_high10[8] = {2, 2, 4, 4, 6, 6, 0, 0};
+static const vp9_tree_index cat5_high10[10] = {2, 2, 4, 4, 6, 6, 8, 8, 0, 0};
+static const vp9_tree_index cat6_high10[32] = {2, 2, 4, 4, 6, 6, 8, 8, 10, 10,
+ 12, 12, 14, 14, 16, 16, 18, 18, 20, 20, 22, 22, 24, 24, 26, 26, 28, 28,
+ 30, 30, 0, 0};
+static const vp9_tree_index cat1_high12[2] = {0, 0};
+static const vp9_tree_index cat2_high12[4] = {2, 2, 0, 0};
+static const vp9_tree_index cat3_high12[6] = {2, 2, 4, 4, 0, 0};
+static const vp9_tree_index cat4_high12[8] = {2, 2, 4, 4, 6, 6, 0, 0};
+static const vp9_tree_index cat5_high12[10] = {2, 2, 4, 4, 6, 6, 8, 8, 0, 0};
+static const vp9_tree_index cat6_high12[36] = {2, 2, 4, 4, 6, 6, 8, 8, 10, 10,
+ 12, 12, 14, 14, 16, 16, 18, 18, 20, 20, 22, 22, 24, 24, 26, 26, 28, 28,
+ 30, 30, 32, 32, 34, 34, 0, 0};
#endif
-}
const vp9_extra_bit vp9_extra_bits[ENTROPY_TOKENS] = {
- {0, 0, 0, 0}, // ZERO_TOKEN
- {0, 0, 0, 1}, // ONE_TOKEN
- {0, 0, 0, 2}, // TWO_TOKEN
- {0, 0, 0, 3}, // THREE_TOKEN
- {0, 0, 0, 4}, // FOUR_TOKEN
- {cat1, vp9_cat1_prob, 1, CAT1_MIN_VAL}, // CATEGORY1_TOKEN
- {cat2, vp9_cat2_prob, 2, CAT2_MIN_VAL}, // CATEGORY2_TOKEN
- {cat3, vp9_cat3_prob, 3, CAT3_MIN_VAL}, // CATEGORY3_TOKEN
- {cat4, vp9_cat4_prob, 4, CAT4_MIN_VAL}, // CATEGORY4_TOKEN
- {cat5, vp9_cat5_prob, 5, CAT5_MIN_VAL}, // CATEGORY5_TOKEN
- {cat6, vp9_cat6_prob, 14, CAT6_MIN_VAL}, // CATEGORY6_TOKEN
- {0, 0, 0, 0} // EOB_TOKEN
+ {0, 0, 0, 0, zero_cost}, // ZERO_TOKEN
+ {0, 0, 0, 1, one_cost}, // ONE_TOKEN
+ {0, 0, 0, 2, two_cost}, // TWO_TOKEN
+ {0, 0, 0, 3, three_cost}, // THREE_TOKEN
+ {0, 0, 0, 4, four_cost}, // FOUR_TOKEN
+ {cat1, vp9_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost}, // CATEGORY1_TOKEN
+ {cat2, vp9_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost}, // CATEGORY2_TOKEN
+ {cat3, vp9_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost}, // CATEGORY3_TOKEN
+ {cat4, vp9_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost}, // CATEGORY4_TOKEN
+ {cat5, vp9_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost}, // CATEGORY5_TOKEN
+ {cat6, vp9_cat6_prob, 14, CAT6_MIN_VAL, 0}, // CATEGORY6_TOKEN
+ {0, 0, 0, 0, zero_cost} // EOB_TOKEN
};
#if CONFIG_VP9_HIGHBITDEPTH
const vp9_extra_bit vp9_extra_bits_high10[ENTROPY_TOKENS] = {
- {0, 0, 0, 0}, // ZERO_TOKEN
- {0, 0, 0, 1}, // ONE_TOKEN
- {0, 0, 0, 2}, // TWO_TOKEN
- {0, 0, 0, 3}, // THREE_TOKEN
- {0, 0, 0, 4}, // FOUR_TOKEN
- {cat1_high10, vp9_cat1_prob_high10, 1, CAT1_MIN_VAL}, // CATEGORY1_TOKEN
- {cat2_high10, vp9_cat2_prob_high10, 2, CAT2_MIN_VAL}, // CATEGORY2_TOKEN
- {cat3_high10, vp9_cat3_prob_high10, 3, CAT3_MIN_VAL}, // CATEGORY3_TOKEN
- {cat4_high10, vp9_cat4_prob_high10, 4, CAT4_MIN_VAL}, // CATEGORY4_TOKEN
- {cat5_high10, vp9_cat5_prob_high10, 5, CAT5_MIN_VAL}, // CATEGORY5_TOKEN
- {cat6_high10, vp9_cat6_prob_high10, 16, CAT6_MIN_VAL}, // CATEGORY6_TOKEN
- {0, 0, 0, 0} // EOB_TOKEN
+ {0, 0, 0, 0, zero_cost}, // ZERO
+ {0, 0, 0, 1, one_cost}, // ONE
+ {0, 0, 0, 2, two_cost}, // TWO
+ {0, 0, 0, 3, three_cost}, // THREE
+ {0, 0, 0, 4, four_cost}, // FOUR
+ {cat1_high10, vp9_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost}, // CAT1
+ {cat2_high10, vp9_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost}, // CAT2
+ {cat3_high10, vp9_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost}, // CAT3
+ {cat4_high10, vp9_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost}, // CAT4
+ {cat5_high10, vp9_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost}, // CAT5
+ {cat6_high10, vp9_cat6_prob_high10, 16, CAT6_MIN_VAL, 0}, // CAT6
+ {0, 0, 0, 0, zero_cost} // EOB
};
const vp9_extra_bit vp9_extra_bits_high12[ENTROPY_TOKENS] = {
- {0, 0, 0, 0}, // ZERO_TOKEN
- {0, 0, 0, 1}, // ONE_TOKEN
- {0, 0, 0, 2}, // TWO_TOKEN
- {0, 0, 0, 3}, // THREE_TOKEN
- {0, 0, 0, 4}, // FOUR_TOKEN
- {cat1_high12, vp9_cat1_prob_high12, 1, CAT1_MIN_VAL}, // CATEGORY1_TOKEN
- {cat2_high12, vp9_cat2_prob_high12, 2, CAT2_MIN_VAL}, // CATEGORY2_TOKEN
- {cat3_high12, vp9_cat3_prob_high12, 3, CAT3_MIN_VAL}, // CATEGORY3_TOKEN
- {cat4_high12, vp9_cat4_prob_high12, 4, CAT4_MIN_VAL}, // CATEGORY4_TOKEN
- {cat5_high12, vp9_cat5_prob_high12, 5, CAT5_MIN_VAL}, // CATEGORY5_TOKEN
- {cat6_high12, vp9_cat6_prob_high12, 18, CAT6_MIN_VAL}, // CATEGORY6_TOKEN
- {0, 0, 0, 0} // EOB_TOKEN
+ {0, 0, 0, 0, zero_cost}, // ZERO
+ {0, 0, 0, 1, one_cost}, // ONE
+ {0, 0, 0, 2, two_cost}, // TWO
+ {0, 0, 0, 3, three_cost}, // THREE
+ {0, 0, 0, 4, four_cost}, // FOUR
+ {cat1_high12, vp9_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost}, // CAT1
+ {cat2_high12, vp9_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost}, // CAT2
+ {cat3_high12, vp9_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost}, // CAT3
+ {cat4_high12, vp9_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost}, // CAT4
+ {cat5_high12, vp9_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost}, // CAT5
+ {cat6_high12, vp9_cat6_prob_high12, 18, CAT6_MIN_VAL, 0}, // CAT6
+ {0, 0, 0, 0, zero_cost} // EOB
};
#endif
-struct vp9_token vp9_coef_encodings[ENTROPY_TOKENS];
-
-void vp9_coef_tree_initialize() {
- init_bit_trees();
- vp9_tokens_from_tree(vp9_coef_encodings, vp9_coef_tree);
-}
-
-static void tokenize_init_one(TOKENVALUE *t, const vp9_extra_bit *const e,
- int16_t *value_cost, int max_value) {
- int i = -max_value;
- int sign = 1;
-
- do {
- if (!i)
- sign = 0;
-
- {
- const int a = sign ? -i : i;
- int eb = sign;
-
- if (a > 4) {
- int j = 4;
-
- while (++j < 11 && e[j].base_val <= a) {}
-
- t[i].token = --j;
- eb |= (a - e[j].base_val) << 1;
- } else {
- t[i].token = a;
- }
- t[i].extra = eb;
- }
-
- // initialize the cost for extra bits for all possible coefficient value.
- {
- int cost = 0;
- const vp9_extra_bit *p = &e[t[i].token];
-
- if (p->base_val) {
- const int extra = t[i].extra;
- const int length = p->len;
-
- if (length)
- cost += treed_cost(p->tree, p->prob, extra >> 1, length);
-
- cost += vp9_cost_bit(vp9_prob_half, extra & 1); /* sign */
- value_cost[i] = cost;
- }
- }
- } while (++i < max_value);
-}
-
-void vp9_tokenize_initialize() {
- vp9_dct_value_tokens_ptr = dct_value_tokens + DCT_MAX_VALUE;
- vp9_dct_value_cost_ptr = dct_value_cost + DCT_MAX_VALUE;
+const struct vp9_token vp9_coef_encodings[ENTROPY_TOKENS] = {
+ {2, 2}, {6, 3}, {28, 5}, {58, 6}, {59, 6}, {60, 6}, {61, 6}, {124, 7},
+ {125, 7}, {126, 7}, {127, 7}, {0, 1}
+};
- tokenize_init_one(dct_value_tokens + DCT_MAX_VALUE, vp9_extra_bits,
- dct_value_cost + DCT_MAX_VALUE, DCT_MAX_VALUE);
-#if CONFIG_VP9_HIGHBITDEPTH
- vp9_dct_value_tokens_high10_ptr = dct_value_tokens_high10 +
- DCT_MAX_VALUE_HIGH10;
- vp9_dct_value_cost_high10_ptr = dct_value_cost_high10 + DCT_MAX_VALUE_HIGH10;
-
- tokenize_init_one(dct_value_tokens_high10 + DCT_MAX_VALUE_HIGH10,
- vp9_extra_bits_high10,
- dct_value_cost_high10 + DCT_MAX_VALUE_HIGH10,
- DCT_MAX_VALUE_HIGH10);
- vp9_dct_value_tokens_high12_ptr = dct_value_tokens_high12 +
- DCT_MAX_VALUE_HIGH12;
- vp9_dct_value_cost_high12_ptr = dct_value_cost_high12 + DCT_MAX_VALUE_HIGH12;
-
- tokenize_init_one(dct_value_tokens_high12 + DCT_MAX_VALUE_HIGH12,
- vp9_extra_bits_high12,
- dct_value_cost_high12 + DCT_MAX_VALUE_HIGH12,
- DCT_MAX_VALUE_HIGH12);
-#endif
-}
struct tokenize_b_args {
VP9_COMP *cpi;
td->counts->eob_branch[tx_size][type][ref];
const uint8_t *const band = get_band_translate(tx_size);
const int seg_eob = get_tx_eob(&cpi->common.seg, segment_id, tx_size);
- const TOKENVALUE *dct_value_tokens;
-
+ int16_t token;
+ EXTRABIT extra;
int aoff, loff;
txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &aoff, &loff);
scan = so->scan;
nb = so->neighbors;
c = 0;
-#if CONFIG_VP9_HIGHBITDEPTH
- if (cpi->common.profile >= PROFILE_2) {
- dct_value_tokens = (cpi->common.bit_depth == VPX_BITS_10 ?
- vp9_dct_value_tokens_high10_ptr :
- vp9_dct_value_tokens_high12_ptr);
- } else {
- dct_value_tokens = vp9_dct_value_tokens_ptr;
- }
-#else
- dct_value_tokens = vp9_dct_value_tokens_ptr;
-#endif
while (c < eob) {
int v = 0;
v = qcoeff[scan[c]];
}
- add_token(&t, coef_probs[band[c]][pt],
- dct_value_tokens[v].extra,
- (uint8_t)dct_value_tokens[v].token,
- (uint8_t)skip_eob,
- counts[band[c]][pt]);
+ vp9_get_token_extra(v, &token, &extra);
+
+ add_token(&t, coef_probs[band[c]][pt], extra, (uint8_t)token,
+ (uint8_t)skip_eob, counts[band[c]][pt]);
eob_branch[band[c]][pt] += !skip_eob;
- token_cache[scan[c]] = vp9_pt_energy_class[dct_value_tokens[v].token];
+ token_cache[scan[c]] = vp9_pt_energy_class[token];
++c;
pt = get_coef_context(nb, token_cache, c);
}
extern "C" {
#endif
-void vp9_tokenize_initialize();
-
#define EOSB_TOKEN 127 // Not signalled, encoder only
-typedef struct {
- int16_t token;
#if CONFIG_VP9_HIGHBITDEPTH
- int32_t extra;
+ typedef int32_t EXTRABIT;
#else
- int16_t extra;
+ typedef int16_t EXTRABIT;
#endif
+
+
+typedef struct {
+ int16_t token;
+ EXTRABIT extra;
} TOKENVALUE;
typedef struct {
const vp9_prob *context_tree;
-#if CONFIG_VP9_HIGHBITDEPTH
- int32_t extra;
-#else
- int16_t extra;
-#endif
- uint8_t token;
- uint8_t skip_eob_node;
+ EXTRABIT extra;
+ uint8_t token;
+ uint8_t skip_eob_node;
} TOKENEXTRA;
extern const vp9_tree_index vp9_coef_tree[];
extern const vp9_tree_index vp9_coef_con_tree[];
-extern struct vp9_token vp9_coef_encodings[];
+extern const struct vp9_token vp9_coef_encodings[];
int vp9_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
int vp9_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
* fields are not.
*/
extern const TOKENVALUE *vp9_dct_value_tokens_ptr;
+extern const TOKENVALUE *vp9_dct_cat_lt_10_value_tokens;
+extern const int16_t vp9_cat6_low_cost[256];
+extern const int16_t vp9_cat6_high_cost[128];
+extern const int16_t vp9_cat6_high10_high_cost[512];
+extern const int16_t vp9_cat6_high12_high_cost[2048];
+static INLINE int16_t vp9_get_cost(int16_t token, EXTRABIT extrabits,
+ const int16_t *cat6_high_table) {
+ if (token != CATEGORY6_TOKEN)
+ return vp9_extra_bits[token].cost[extrabits];
+ return vp9_cat6_low_cost[extrabits & 0xff]
+ + cat6_high_table[extrabits >> 8];
+}
+
#if CONFIG_VP9_HIGHBITDEPTH
-extern const int16_t *vp9_dct_value_cost_high10_ptr;
-extern const TOKENVALUE *vp9_dct_value_tokens_high10_ptr;
-extern const int16_t *vp9_dct_value_cost_high12_ptr;
-extern const TOKENVALUE *vp9_dct_value_tokens_high12_ptr;
+static INLINE const int16_t* vp9_get_high_cost_table(int bit_depth) {
+ return bit_depth == 8 ? vp9_cat6_high_cost
+ : (bit_depth == 10 ? vp9_cat6_high10_high_cost :
+ vp9_cat6_high12_high_cost);
+}
+#else
+static INLINE const int16_t* vp9_get_high_cost_table(int bit_depth) {
+ (void) bit_depth;
+ return vp9_cat6_high_cost;
+}
#endif // CONFIG_VP9_HIGHBITDEPTH
+static INLINE void vp9_get_token_extra(int v, int16_t *token, EXTRABIT *extra) {
+ if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) {
+ *token = CATEGORY6_TOKEN;
+ if (v >= CAT6_MIN_VAL)
+ *extra = 2 * v - 2 * CAT6_MIN_VAL;
+ else
+ *extra = -2 * v - 2 * CAT6_MIN_VAL + 1;
+ return;
+ }
+ *token = vp9_dct_cat_lt_10_value_tokens[v].token;
+ *extra = vp9_dct_cat_lt_10_value_tokens[v].extra;
+}
+static INLINE int16_t vp9_get_token(int v) {
+ if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL)
+ return 10;
+ return vp9_dct_cat_lt_10_value_tokens[v].token;
+}
+
+
#ifdef __cplusplus
} // extern "C"
#endif
const int16_t* round_ptr, const int16_t* quant_ptr,
const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
- int zbin_oq_value, uint16_t* eob_ptr,
+ uint16_t* eob_ptr,
const int16_t* scan_ptr,
const int16_t* iscan_ptr) {
__m128i zero;
(void)scan_ptr;
(void)zbin_ptr;
(void)quant_shift_ptr;
- (void)zbin_oq_value;
(void)coeff_ptr;
// Pre-condition input (shift by two)
const int16_t* quant_shift_ptr,
int16_t* qcoeff_ptr,
int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
- int zbin_oq_value, uint16_t* eob_ptr,
+ uint16_t* eob_ptr,
const int16_t* scan_ptr,
const int16_t* iscan_ptr) {
__m128i zero;
(void)scan_ptr;
(void)zbin_ptr;
(void)quant_shift_ptr;
- (void)zbin_oq_value;
(void)coeff_ptr;
// Pre-condition input (shift by two)
tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr,
- int zbin_oq_value,
uint16_t *eob_ptr,
const int16_t *scan,
const int16_t *iscan) {
__m128i zbins[2];
__m128i nzbins[2];
- zbins[0] = _mm_set_epi32((int)(zbin_ptr[1] + zbin_oq_value),
- (int)(zbin_ptr[1] + zbin_oq_value),
- (int)(zbin_ptr[1] + zbin_oq_value),
- (int)(zbin_ptr[0] + zbin_oq_value));
- zbins[1] = _mm_set1_epi32((int)(zbin_ptr[1] + zbin_oq_value));
+ zbins[0] = _mm_set_epi32((int)zbin_ptr[1],
+ (int)zbin_ptr[1],
+ (int)zbin_ptr[1],
+ (int)zbin_ptr[0]);
+ zbins[1] = _mm_set1_epi32((int)zbin_ptr[1]);
nzbins[0] = _mm_setzero_si128();
nzbins[1] = _mm_setzero_si128();
tran_low_t *qcoeff_ptr,
tran_low_t *dqcoeff_ptr,
const int16_t *dequant_ptr,
- int zbin_oq_value,
uint16_t *eob_ptr,
const int16_t *scan,
const int16_t *iscan) {
int idx = 0;
int idx_arr[1024];
int i, eob = -1;
- const int zbin0_tmp = ROUND_POWER_OF_TWO(zbin_ptr[0] + zbin_oq_value, 1);
- const int zbin1_tmp = ROUND_POWER_OF_TWO(zbin_ptr[1] + zbin_oq_value, 1);
+ const int zbin0_tmp = ROUND_POWER_OF_TWO(zbin_ptr[0], 1);
+ const int zbin1_tmp = ROUND_POWER_OF_TWO(zbin_ptr[1], 1);
(void)scan;
- zbins[0] = _mm_set_epi32((zbin1_tmp + zbin_oq_value),
- (zbin1_tmp + zbin_oq_value),
- (zbin1_tmp + zbin_oq_value),
- (zbin0_tmp + zbin_oq_value));
- zbins[1] = _mm_set1_epi32((zbin1_tmp + zbin_oq_value));
+ zbins[0] = _mm_set_epi32(zbin1_tmp,
+ zbin1_tmp,
+ zbin1_tmp,
+ zbin0_tmp);
+ zbins[1] = _mm_set1_epi32(zbin1_tmp);
nzbins[0] = _mm_setzero_si128();
nzbins[1] = _mm_setzero_si128();
const int16_t* round_ptr, const int16_t* quant_ptr,
const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
- int zbin_oq_value, uint16_t* eob_ptr,
+ uint16_t* eob_ptr,
const int16_t* scan_ptr,
const int16_t* iscan_ptr) {
__m128i zero;
// Setup global values
{
- __m128i zbin_oq;
__m128i pw_1;
- zbin_oq = _mm_set1_epi16(zbin_oq_value);
zbin = _mm_load_si128((const __m128i*)zbin_ptr);
round = _mm_load_si128((const __m128i*)round_ptr);
quant = _mm_load_si128((const __m128i*)quant_ptr);
- zbin = _mm_add_epi16(zbin, zbin_oq);
pw_1 = _mm_set1_epi16(1);
zbin = _mm_sub_epi16(zbin, pw_1);
dequant = _mm_load_si128((const __m128i*)dequant_ptr);
const int16_t* round_ptr, const int16_t* quant_ptr,
const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
int16_t* dqcoeff_ptr, const int16_t* dequant_ptr,
- int zbin_oq_value, uint16_t* eob_ptr,
+ uint16_t* eob_ptr,
const int16_t* scan_ptr,
const int16_t* iscan_ptr) {
__m128i zero;
(void)scan_ptr;
(void)zbin_ptr;
(void)quant_shift_ptr;
- (void)zbin_oq_value;
coeff_ptr += n_coeffs;
iscan_ptr += n_coeffs;
%macro QUANTIZE_FN 2
cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
- shift, qcoeff, dqcoeff, dequant, zbin_oq, \
+ shift, qcoeff, dqcoeff, dequant, \
eob, scan, iscan
cmp dword skipm, 0
jne .blank
movifnidn zbinq, zbinmp
movifnidn roundq, roundmp
movifnidn quantq, quantmp
- movd m4, dword zbin_oqm ; m4 = zbin_oq
mova m0, [zbinq] ; m0 = zbin
- punpcklwd m4, m4
mova m1, [roundq] ; m1 = round
- pshufd m4, m4, 0
mova m2, [quantq] ; m2 = quant
- paddw m0, m4 ; m0 = zbin + zbin_oq
%ifidn %1, b_32x32
pcmpeqw m5, m5
psrlw m5, 15
psllw m4, 1
%endif
pxor m5, m5 ; m5 = dedicated zero
- DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, d6, eob
+ DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, eob
lea coeffq, [ coeffq+ncoeffq*2]
lea iscanq, [ iscanq+ncoeffq*2]
lea qcoeffq, [ qcoeffq+ncoeffq*2]
%macro QUANTIZE_FP 2
cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
- shift, qcoeff, dqcoeff, dequant, zbin_oq, \
+ shift, qcoeff, dqcoeff, dequant, \
eob, scan, iscan
cmp dword skipm, 0
jne .blank
psllw m2, 1
%endif
pxor m5, m5 ; m5 = dedicated zero
- DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, d6, eob
+ DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, eob
lea coeffq, [ coeffq+ncoeffq*2]
lea iscanq, [ iscanq+ncoeffq*2]
lea qcoeffq, [ qcoeffq+ncoeffq*2]
return *sse;
}
+// The 2 unused parameters are place holders for PIC enabled build.
#define DECL(w, opt) \
int vp9_sub_pixel_variance##w##xh_##opt(const uint8_t *src, \
ptrdiff_t src_stride, \
int x_offset, int y_offset, \
const uint8_t *dst, \
ptrdiff_t dst_stride, \
- int height, unsigned int *sse)
+ int height, unsigned int *sse, \
+ void *unused0, void *unused)
#define DECLS(opt1, opt2) \
DECL(4, opt2); \
DECL(8, opt1); \
unsigned int sse; \
int se = vp9_sub_pixel_variance##wf##xh_##opt(src, src_stride, x_offset, \
y_offset, dst, dst_stride, \
- h, &sse); \
+ h, &sse, NULL, NULL); \
if (w > wf) { \
unsigned int sse2; \
int se2 = vp9_sub_pixel_variance##wf##xh_##opt(src + 16, src_stride, \
x_offset, y_offset, \
dst + 16, dst_stride, \
- h, &sse2); \
+ h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
if (w > wf * 2) { \
se2 = vp9_sub_pixel_variance##wf##xh_##opt(src + 32, src_stride, \
x_offset, y_offset, \
dst + 32, dst_stride, \
- h, &sse2); \
+ h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
se2 = vp9_sub_pixel_variance##wf##xh_##opt(src + 48, src_stride, \
x_offset, y_offset, \
dst + 48, dst_stride, \
- h, &sse2); \
+ h, &sse2, NULL, NULL); \
se += se2; \
sse += sse2; \
} \
#undef FNS
#undef FN
+// The 2 unused parameters are place holders for PIC enabled build.
#define DECL(w, opt) \
int vp9_sub_pixel_avg_variance##w##xh_##opt(const uint8_t *src, \
ptrdiff_t src_stride, \
ptrdiff_t dst_stride, \
const uint8_t *sec, \
ptrdiff_t sec_stride, \
- int height, unsigned int *sse)
+ int height, unsigned int *sse, \
+ void *unused0, void *unused)
#define DECLS(opt1, opt2) \
DECL(4, opt2); \
DECL(8, opt1); \
unsigned int sse; \
int se = vp9_sub_pixel_avg_variance##wf##xh_##opt(src, src_stride, x_offset, \
y_offset, dst, dst_stride, \
- sec, w, h, &sse); \
+ sec, w, h, &sse, NULL, \
+ NULL); \
if (w > wf) { \
unsigned int sse2; \
int se2 = vp9_sub_pixel_avg_variance##wf##xh_##opt(src + 16, src_stride, \
x_offset, y_offset, \
dst + 16, dst_stride, \
- sec + 16, w, h, &sse2); \
+ sec + 16, w, h, &sse2, \
+ NULL, NULL); \
se += se2; \
sse += sse2; \
if (w > wf * 2) { \
se2 = vp9_sub_pixel_avg_variance##wf##xh_##opt(src + 32, src_stride, \
x_offset, y_offset, \
dst + 32, dst_stride, \
- sec + 32, w, h, &sse2); \
+ sec + 32, w, h, &sse2, \
+ NULL, NULL); \
se += se2; \
sse += sse2; \
se2 = vp9_sub_pixel_avg_variance##wf##xh_##opt(src + 48, src_stride, \
x_offset, y_offset, \
dst + 48, dst_stride, \
- sec + 48, w, h, &sse2); \
+ sec + 48, w, h, &sse2, \
+ NULL, NULL); \
se += se2; \
sse += sse2; \
} \
VP9_COMMON_SRCS-yes += common/vp9_enums.h
VP9_COMMON_SRCS-yes += common/vp9_idct.h
VP9_COMMON_SRCS-yes += common/vp9_loopfilter.h
+VP9_COMMON_SRCS-yes += common/vp9_loopfilter_thread.h
VP9_COMMON_SRCS-yes += common/vp9_mv.h
VP9_COMMON_SRCS-yes += common/vp9_onyxc_int.h
VP9_COMMON_SRCS-yes += common/vp9_pred_common.h
VP9_COMMON_SRCS-yes += common/vp9_tile_common.c
VP9_COMMON_SRCS-yes += common/vp9_loopfilter.c
VP9_COMMON_SRCS-yes += common/vp9_loopfilter_filters.c
+VP9_COMMON_SRCS-yes += common/vp9_loopfilter_thread.c
VP9_COMMON_SRCS-yes += common/vp9_mvref_common.c
VP9_COMMON_SRCS-yes += common/vp9_mvref_common.h
VP9_COMMON_SRCS-yes += common/vp9_quant_common.c
VP9_COMMON_SRCS-$(HAVE_AVX2) += common/x86/vp9_subpixel_8t_intrin_avx2.c
VP9_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp9_subpixel_8t_intrin_ssse3.c
ifeq ($(CONFIG_VP9_POSTPROC),yes)
+VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_mfqe_sse2.asm
VP9_COMMON_SRCS-$(HAVE_SSE2) += common/x86/vp9_postproc_sse2.asm
endif
VP9_COMMON_SRCS-$(HAVE_SSSE3) += common/x86/vp9_idct_ssse3_x86_64.asm
endif
+VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_loopfilter_16_neon_asm$(ASM)
+VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_loopfilter_8_neon_asm$(ASM)
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_mb_lpf_neon$(ASM)
VP9_COMMON_SRCS-$(HAVE_NEON_ASM) += common/arm/neon/vp9_save_reg_neon$(ASM)
VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_iht4x4_add_neon.c
VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_iht8x8_add_neon.c
+VP9_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/vp9_loopfilter_neon.c
# neon with assembly and intrinsics implementations. If both are available
# prefer assembly.
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct4x4_add_neon_asm$(ASM)
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct8x8_1_add_neon_asm$(ASM)
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct8x8_add_neon_asm$(ASM)
-VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_16_neon_asm$(ASM)
-VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_neon_asm$(ASM)
-VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_16_neon.c
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_4_neon_asm$(ASM)
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_reconintra_neon_asm$(ASM)
else
ifeq ($(HAVE_NEON), yes)
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct4x4_add_neon.c
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct8x8_1_add_neon.c
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_idct8x8_add_neon.c
-VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_neon.c
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_16_neon.c
+VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_4_neon.c
+# TODO(johannkoenig): re-enable when chromium build is fixed
+# # https://code.google.com/p/chromium/issues/detail?id=443839
+#VP9_COMMON_SRCS-yes += common/arm/neon/vp9_loopfilter_8_neon.c
VP9_COMMON_SRCS-yes += common/arm/neon/vp9_reconintra_neon.c
endif # HAVE_NEON
endif # HAVE_NEON_ASM
#include "./vpx_config.h"
#include "vpx/vpx_codec.h"
+#include "vpx_ports/vpx_once.h"
#include "vpx/internal/vpx_codec_internal.h"
#include "./vpx_version.h"
#include "vp9/encoder/vp9_encoder.h"
unsigned int frame_periodic_boost;
vpx_bit_depth_t bit_depth;
vp9e_tune_content content;
+ vpx_color_space_t color_space;
};
static struct vp9_extracfg default_extra_cfg = {
NO_AQ, // aq_mode
0, // frame_periodic_delta_q
VPX_BITS_8, // Bit depth
- VP9E_CONTENT_DEFAULT // content
+ VP9E_CONTENT_DEFAULT, // content
+ VPX_CS_UNKNOWN, // color space
};
struct vpx_codec_alg_priv {
size_t pending_frame_sizes[8];
size_t pending_frame_magnitude;
vpx_image_t preview_img;
+ vpx_enc_frame_flags_t next_frame_flags;
vp8_postproc_cfg_t preview_ppcfg;
vpx_codec_pkt_list_decl(256) pkt_list;
unsigned int fixed_kf_cntr;
vpx_codec_priv_output_cx_pkt_cb_pair_t output_cx_pkt_cb;
+ // BufferPool that holds all reference frames.
+ BufferPool *buffer_pool;
};
static VP9_REFFRAME ref_frame_to_vp9_reframe(vpx_ref_frame_type_t frame) {
cfg->g_bit_depth == VPX_BITS_8) {
ERROR("Codec bit-depth 8 not supported in profile > 1");
}
-
+ RANGE_CHECK(extra_cfg, color_space, VPX_CS_UNKNOWN, VPX_CS_SRGB);
return VPX_CODEC_OK;
}
}
static vpx_codec_err_t set_encoder_config(
- VP9EncoderConfig *oxcf,
- const vpx_codec_enc_cfg_t *cfg,
- const struct vp9_extracfg *extra_cfg) {
+ VP9EncoderConfig *oxcf,
+ const vpx_codec_enc_cfg_t *cfg,
+ const struct vp9_extracfg *extra_cfg) {
const int is_vbr = cfg->rc_end_usage == VPX_VBR;
oxcf->profile = cfg->g_profile;
oxcf->max_threads = (int)cfg->g_threads;
oxcf->firstpass_mb_stats_in = cfg->rc_firstpass_mb_stats_in;
#endif
+ oxcf->color_space = extra_cfg->color_space;
oxcf->arnr_max_frames = extra_cfg->arnr_max_frames;
oxcf->arnr_strength = extra_cfg->arnr_strength;
static vpx_codec_err_t encoder_set_config(vpx_codec_alg_priv_t *ctx,
const vpx_codec_enc_cfg_t *cfg) {
vpx_codec_err_t res;
-
- if (cfg->g_w != ctx->cfg.g_w || cfg->g_h != ctx->cfg.g_h)
- ERROR("Cannot change width or height after initialization");
+ int force_key = 0;
+
+ if (cfg->g_w != ctx->cfg.g_w || cfg->g_h != ctx->cfg.g_h) {
+ if (cfg->g_lag_in_frames > 1 || cfg->g_pass != VPX_RC_ONE_PASS)
+ ERROR("Cannot change width or height after initialization");
+ if (!valid_ref_frame_size(ctx->cfg.g_w, ctx->cfg.g_h, cfg->g_w, cfg->g_h) ||
+ (ctx->cpi->initial_width && (int)cfg->g_w > ctx->cpi->initial_width) ||
+ (ctx->cpi->initial_height && (int)cfg->g_h > ctx->cpi->initial_height))
+ force_key = 1;
+ }
// Prevent increasing lag_in_frames. This check is stricter than it needs
// to be -- the limit is not increasing past the first lag_in_frames
vp9_change_config(ctx->cpi, &ctx->oxcf);
}
+ if (force_key)
+ ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF;
+
return res;
}
ctx->priv = (vpx_codec_priv_t *)priv;
ctx->priv->init_flags = ctx->init_flags;
ctx->priv->enc.total_encoders = 1;
+ priv->buffer_pool =
+ (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
+ if (priv->buffer_pool == NULL)
+ return VPX_CODEC_MEM_ERROR;
if (ctx->config.enc) {
// Update the reference to the config structure to an internal copy.
}
priv->extra_cfg = default_extra_cfg;
- vp9_initialize_enc();
+ once(vp9_initialize_enc);
res = validate_config(priv, &priv->cfg, &priv->extra_cfg);
priv->oxcf.use_highbitdepth =
(ctx->init_flags & VPX_CODEC_USE_HIGHBITDEPTH) ? 1 : 0;
#endif
- priv->cpi = vp9_create_compressor(&priv->oxcf);
+ priv->cpi = vp9_create_compressor(&priv->oxcf, priv->buffer_pool);
if (priv->cpi == NULL)
res = VPX_CODEC_MEM_ERROR;
else
static vpx_codec_err_t encoder_destroy(vpx_codec_alg_priv_t *ctx) {
free(ctx->cx_data);
vp9_remove_compressor(ctx->cpi);
+ vpx_free(ctx->buffer_pool);
vpx_free(ctx);
return VPX_CODEC_OK;
}
// Store the original flags in to the frame buffer. Will extract the
// key frame flag when we actually encode this frame.
- if (vp9_receive_raw_frame(cpi, flags,
+ if (vp9_receive_raw_frame(cpi, flags | ctx->next_frame_flags,
&sd, dst_time_stamp, dst_end_time_stamp)) {
res = update_error_state(ctx, &cpi->common.error);
}
+ ctx->next_frame_flags = 0;
}
cx_data = ctx->cx_data;
return update_extra_cfg(ctx, &extra_cfg);
}
+static vpx_codec_err_t ctrl_set_color_space(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ struct vp9_extracfg extra_cfg = ctx->extra_cfg;
+ extra_cfg.color_space = CAST(VP9E_SET_COLOR_SPACE, args);
+ return update_extra_cfg(ctx, &extra_cfg);
+}
+
static vpx_codec_ctrl_fn_map_t encoder_ctrl_maps[] = {
{VP8_COPY_REFERENCE, ctrl_copy_reference},
{VP8E_UPD_ENTROPY, ctrl_update_entropy},
{VP9E_REGISTER_CX_CALLBACK, ctrl_register_cx_callback},
{VP9E_SET_SVC_LAYER_ID, ctrl_set_svc_layer_id},
{VP9E_SET_TUNE_CONTENT, ctrl_set_tune_content},
+ {VP9E_SET_COLOR_SPACE, ctrl_set_color_space},
{VP9E_SET_NOISE_SENSITIVITY, ctrl_set_noise_sensitivity},
// Getters
#include "vpx/vp8dx.h"
#include "vpx/vpx_decoder.h"
+#include "vp9/common/vp9_alloccommon.h"
#include "vp9/common/vp9_frame_buffers.h"
+#include "vp9/common/vp9_thread.h"
#include "vp9/decoder/vp9_decoder.h"
#include "vp9/decoder/vp9_decodeframe.h"
typedef vpx_codec_stream_info_t vp9_stream_info_t;
+// This limit is due to framebuffer numbers.
+// TODO(hkuang): Remove this limit after implementing ondemand framebuffers.
+#define FRAME_CACHE_SIZE 6 // Cache maximum 6 decoded frames.
+
+typedef struct cache_frame {
+ int fb_idx;
+ vpx_image_t img;
+} cache_frame;
+
struct vpx_codec_alg_priv {
vpx_codec_priv_t base;
vpx_codec_dec_cfg_t cfg;
vp9_stream_info_t si;
- struct VP9Decoder *pbi;
int postproc_cfg_set;
vp8_postproc_cfg_t postproc_cfg;
vpx_decrypt_cb decrypt_cb;
- void *decrypt_state;
+ void *decrypt_state;
vpx_image_t img;
int img_avail;
int flushed;
int invert_tile_order;
+ int last_show_frame; // Index of last output frame.
+
+ // Frame parallel related.
int frame_parallel_decode; // frame-based threading.
int byte_alignment;
+ VP9Worker *frame_workers;
+ int num_frame_workers;
+ int next_submit_worker_id;
+ int last_submit_worker_id;
+ int next_output_worker_id;
+ int available_threads;
+ cache_frame frame_cache[FRAME_CACHE_SIZE];
+ int frame_cache_write;
+ int frame_cache_read;
+ int num_cache_frames;
+
+ // BufferPool that holds all reference frames. Shared by all the FrameWorkers.
+ BufferPool *buffer_pool;
// External frame buffer info to save for VP9 common.
void *ext_priv; // Private data associated with the external frame buffers.
ctx->priv = (vpx_codec_priv_t *)priv;
ctx->priv->init_flags = ctx->init_flags;
-
priv->si.sz = sizeof(priv->si);
priv->flushed = 0;
+ // Only do frame parallel decode when threads > 1.
priv->frame_parallel_decode =
- (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING);
- priv->frame_parallel_decode = 0; // Disable for now
-
+ (ctx->config.dec && (ctx->config.dec->threads > 1) &&
+ (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING)) ? 1 : 0;
if (ctx->config.dec) {
priv->cfg = *ctx->config.dec;
ctx->config.dec = &priv->cfg;
}
static vpx_codec_err_t decoder_destroy(vpx_codec_alg_priv_t *ctx) {
- if (ctx->pbi) {
- vp9_decoder_remove(ctx->pbi);
- ctx->pbi = NULL;
+ if (ctx->frame_workers != NULL) {
+ int i;
+ for (i = 0; i < ctx->num_frame_workers; ++i) {
+ VP9Worker *const worker = &ctx->frame_workers[i];
+ FrameWorkerData *const frame_worker_data =
+ (FrameWorkerData *)worker->data1;
+ vp9_get_worker_interface()->end(worker);
+ vp9_remove_common(&frame_worker_data->pbi->common);
+ vp9_decoder_remove(frame_worker_data->pbi);
+ vpx_free(frame_worker_data->scratch_buffer);
+#if CONFIG_MULTITHREAD
+ pthread_mutex_destroy(&frame_worker_data->stats_mutex);
+ pthread_cond_destroy(&frame_worker_data->stats_cond);
+#endif
+ vpx_free(frame_worker_data);
+ }
+#if CONFIG_MULTITHREAD
+ pthread_mutex_destroy(&ctx->buffer_pool->pool_mutex);
+#endif
}
- vpx_free(ctx);
+ if (ctx->buffer_pool)
+ vp9_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers);
+ vpx_free(ctx->frame_workers);
+ vpx_free(ctx->buffer_pool);
+ vpx_free(ctx);
return VPX_CODEC_OK;
}
static int parse_bitdepth_colorspace_sampling(
BITSTREAM_PROFILE profile, struct vp9_read_bit_buffer *rb) {
- const int sRGB = 7;
- int colorspace;
+ vpx_color_space_t color_space;
if (profile >= PROFILE_2)
rb->bit_offset += 1; // Bit-depth 10 or 12.
- colorspace = vp9_rb_read_literal(rb, 3);
- if (colorspace != sRGB) {
+ color_space = (vpx_color_space_t)vp9_rb_read_literal(rb, 3);
+ if (color_space != VPX_CS_SRGB) {
rb->bit_offset += 1; // [16,235] (including xvycc) vs [0,255] range.
if (profile == PROFILE_1 || profile == PROFILE_3) {
rb->bit_offset += 2; // subsampling x/y.
if (frame_marker != VP9_FRAME_MARKER)
return VPX_CODEC_UNSUP_BITSTREAM;
- if (profile >= MAX_PROFILES) return VPX_CODEC_UNSUP_BITSTREAM;
+ if (profile >= MAX_PROFILES)
+ return VPX_CODEC_UNSUP_BITSTREAM;
+
+ if ((profile >= 2 && data_sz <= 1) || data_sz < 1)
+ return VPX_CODEC_UNSUP_BITSTREAM;
if (vp9_rb_read_bit(&rb)) { // show an existing frame
vp9_rb_read_literal(&rb, 3); // Frame buffer to show.
return VPX_CODEC_OK;
}
+static void set_error_detail(vpx_codec_alg_priv_t *ctx,
+ const char *const error) {
+ ctx->base.err_detail = error;
+}
+
static vpx_codec_err_t update_error_state(vpx_codec_alg_priv_t *ctx,
const struct vpx_internal_error_info *error) {
if (error->error_code)
- ctx->base.err_detail = error->has_detail ? error->detail : NULL;
+ set_error_detail(ctx, error->has_detail ? error->detail : NULL);
return error->error_code;
}
static void init_buffer_callbacks(vpx_codec_alg_priv_t *ctx) {
- VP9_COMMON *const cm = &ctx->pbi->common;
+ int i;
- cm->new_fb_idx = -1;
- cm->byte_alignment = ctx->byte_alignment;
+ for (i = 0; i < ctx->num_frame_workers; ++i) {
+ VP9Worker *const worker = &ctx->frame_workers[i];
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
+ VP9_COMMON *const cm = &frame_worker_data->pbi->common;
+ BufferPool *const pool = cm->buffer_pool;
- if (ctx->get_ext_fb_cb != NULL && ctx->release_ext_fb_cb != NULL) {
- cm->get_fb_cb = ctx->get_ext_fb_cb;
- cm->release_fb_cb = ctx->release_ext_fb_cb;
- cm->cb_priv = ctx->ext_priv;
- } else {
- cm->get_fb_cb = vp9_get_frame_buffer;
- cm->release_fb_cb = vp9_release_frame_buffer;
+ cm->new_fb_idx = -1;
+ cm->byte_alignment = ctx->byte_alignment;
+
+ if (ctx->get_ext_fb_cb != NULL && ctx->release_ext_fb_cb != NULL) {
+ pool->get_fb_cb = ctx->get_ext_fb_cb;
+ pool->release_fb_cb = ctx->release_ext_fb_cb;
+ pool->cb_priv = ctx->ext_priv;
+ } else {
+ pool->get_fb_cb = vp9_get_frame_buffer;
+ pool->release_fb_cb = vp9_release_frame_buffer;
- if (vp9_alloc_internal_frame_buffers(&cm->int_frame_buffers))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
- "Failed to initialize internal frame buffers");
+ if (vp9_alloc_internal_frame_buffers(&pool->int_frame_buffers))
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Failed to initialize internal frame buffers");
- cm->cb_priv = &cm->int_frame_buffers;
+ pool->cb_priv = &pool->int_frame_buffers;
+ }
}
}
flags->noise_level = ctx->postproc_cfg.noise_level;
}
-static void init_decoder(vpx_codec_alg_priv_t *ctx) {
- ctx->pbi = vp9_decoder_create();
- if (ctx->pbi == NULL)
- return;
+static int frame_worker_hook(void *arg1, void *arg2) {
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)arg1;
+ const uint8_t *data = frame_worker_data->data;
+ (void)arg2;
+
+ frame_worker_data->result =
+ vp9_receive_compressed_data(frame_worker_data->pbi,
+ frame_worker_data->data_size,
+ &data);
+ frame_worker_data->data_end = data;
+
+ if (frame_worker_data->pbi->frame_parallel_decode) {
+ // In frame parallel decoding, a worker thread must successfully decode all
+ // the compressed data.
+ if (frame_worker_data->result != 0 ||
+ frame_worker_data->data + frame_worker_data->data_size - 1 > data) {
+ VP9Worker *const worker = frame_worker_data->pbi->frame_worker_owner;
+ BufferPool *const pool = frame_worker_data->pbi->common.buffer_pool;
+ // Signal all the other threads that are waiting for this frame.
+ vp9_frameworker_lock_stats(worker);
+ frame_worker_data->frame_context_ready = 1;
+ lock_buffer_pool(pool);
+ frame_worker_data->pbi->cur_buf->buf.corrupted = 1;
+ unlock_buffer_pool(pool);
+ frame_worker_data->pbi->need_resync = 1;
+ vp9_frameworker_signal_stats(worker);
+ vp9_frameworker_unlock_stats(worker);
+ return 0;
+ }
+ } else if (frame_worker_data->result != 0) {
+ // Check decode result in serial decode.
+ frame_worker_data->pbi->cur_buf->buf.corrupted = 1;
+ frame_worker_data->pbi->need_resync = 1;
+ }
+ return !frame_worker_data->result;
+}
+
+static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
+ int i;
+ const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
+
+ ctx->last_show_frame = -1;
+ ctx->next_submit_worker_id = 0;
+ ctx->last_submit_worker_id = 0;
+ ctx->next_output_worker_id = 0;
+ ctx->frame_cache_read = 0;
+ ctx->frame_cache_write = 0;
+ ctx->num_cache_frames = 0;
+ ctx->num_frame_workers =
+ (ctx->frame_parallel_decode == 1) ? ctx->cfg.threads: 1;
+ ctx->available_threads = ctx->num_frame_workers;
+ ctx->flushed = 0;
+
+ ctx->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
+ if (ctx->buffer_pool == NULL)
+ return VPX_CODEC_MEM_ERROR;
+
+#if CONFIG_MULTITHREAD
+ if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
+ set_error_detail(ctx, "Failed to allocate buffer pool mutex");
+ return VPX_CODEC_MEM_ERROR;
+ }
+#endif
+
+ ctx->frame_workers = (VP9Worker *)
+ vpx_malloc(ctx->num_frame_workers * sizeof(*ctx->frame_workers));
+ if (ctx->frame_workers == NULL) {
+ set_error_detail(ctx, "Failed to allocate frame_workers");
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ for (i = 0; i < ctx->num_frame_workers; ++i) {
+ VP9Worker *const worker = &ctx->frame_workers[i];
+ FrameWorkerData *frame_worker_data = NULL;
+ winterface->init(worker);
+ worker->data1 = vpx_memalign(32, sizeof(FrameWorkerData));
+ if (worker->data1 == NULL) {
+ set_error_detail(ctx, "Failed to allocate frame_worker_data");
+ return VPX_CODEC_MEM_ERROR;
+ }
+ frame_worker_data = (FrameWorkerData *)worker->data1;
+ frame_worker_data->pbi = vp9_decoder_create(ctx->buffer_pool);
+ if (frame_worker_data->pbi == NULL) {
+ set_error_detail(ctx, "Failed to allocate frame_worker_data");
+ return VPX_CODEC_MEM_ERROR;
+ }
+ frame_worker_data->pbi->frame_worker_owner = worker;
+ frame_worker_data->worker_id = i;
+ frame_worker_data->scratch_buffer = NULL;
+ frame_worker_data->scratch_buffer_size = 0;
+ frame_worker_data->frame_context_ready = 0;
+#if CONFIG_MULTITHREAD
+ if (pthread_mutex_init(&frame_worker_data->stats_mutex, NULL)) {
+ set_error_detail(ctx, "Failed to allocate frame_worker_data mutex");
+ return VPX_CODEC_MEM_ERROR;
+ }
- ctx->pbi->max_threads = ctx->cfg.threads;
- ctx->pbi->inv_tile_order = ctx->invert_tile_order;
- ctx->pbi->frame_parallel_decode = ctx->frame_parallel_decode;
+ if (pthread_cond_init(&frame_worker_data->stats_cond, NULL)) {
+ set_error_detail(ctx, "Failed to allocate frame_worker_data cond");
+ return VPX_CODEC_MEM_ERROR;
+ }
+#endif
+ // If decoding in serial mode, FrameWorker thread could create tile worker
+ // thread or loopfilter thread.
+ frame_worker_data->pbi->max_threads =
+ (ctx->frame_parallel_decode == 0) ? ctx->cfg.threads : 0;
+
+ frame_worker_data->pbi->inv_tile_order = ctx->invert_tile_order;
+ frame_worker_data->pbi->frame_parallel_decode = ctx->frame_parallel_decode;
+ frame_worker_data->pbi->common.frame_parallel_decode =
+ ctx->frame_parallel_decode;
+ worker->hook = (VP9WorkerHook)frame_worker_hook;
+ if (!winterface->reset(worker)) {
+ set_error_detail(ctx, "Frame Worker thread creation failed");
+ return VPX_CODEC_MEM_ERROR;
+ }
+ }
// If postprocessing was enabled by the application and a
// configuration has not been provided, default it.
set_default_ppflags(&ctx->postproc_cfg);
init_buffer_callbacks(ctx);
+
+ return VPX_CODEC_OK;
}
static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
const uint8_t **data, unsigned int data_sz,
void *user_priv, int64_t deadline) {
- YV12_BUFFER_CONFIG sd;
vp9_ppflags_t flags = {0, 0, 0};
- VP9_COMMON *cm = NULL;
-
+ const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
(void)deadline;
- vp9_zero(sd);
- ctx->img_avail = 0;
-
// Determine the stream parameters. Note that we rely on peek_si to
// validate that we have a buffer that does not wrap around the top
// of the heap.
return VPX_CODEC_ERROR;
}
- // Initialize the decoder instance on the first frame
- if (ctx->pbi == NULL) {
- init_decoder(ctx);
- if (ctx->pbi == NULL)
- return VPX_CODEC_ERROR;
- }
+ if (!ctx->frame_parallel_decode) {
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
+ frame_worker_data->data = *data;
+ frame_worker_data->data_size = data_sz;
+ frame_worker_data->user_priv = user_priv;
- // Set these even if already initialized. The caller may have changed the
- // decrypt config between frames.
- ctx->pbi->decrypt_cb = ctx->decrypt_cb;
- ctx->pbi->decrypt_state = ctx->decrypt_state;
+ // Set these even if already initialized. The caller may have changed the
+ // decrypt config between frames.
+ frame_worker_data->pbi->decrypt_cb = ctx->decrypt_cb;
+ frame_worker_data->pbi->decrypt_state = ctx->decrypt_state;
- cm = &ctx->pbi->common;
+ worker->had_error = 0;
+ winterface->execute(worker);
- if (vp9_receive_compressed_data(ctx->pbi, data_sz, data))
- return update_error_state(ctx, &cm->error);
+ // Update data pointer after decode.
+ *data = frame_worker_data->data_end;
+
+ if (worker->had_error)
+ return update_error_state(ctx, &frame_worker_data->pbi->common.error);
+ } else {
+ const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
+ VP9Worker *const worker = &ctx->frame_workers[ctx->next_submit_worker_id];
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
+ // Copy context from last worker thread to next worker thread.
+ if (ctx->next_submit_worker_id != ctx->last_submit_worker_id)
+ vp9_frameworker_copy_context(
+ &ctx->frame_workers[ctx->next_submit_worker_id],
+ &ctx->frame_workers[ctx->last_submit_worker_id]);
+
+ frame_worker_data->pbi->ready_for_new_data = 0;
+ // Copy the compressed data into worker's internal buffer.
+ // TODO(hkuang): Will all the workers allocate the same size
+ // as the size of the first intra frame be better? This will
+ // avoid too many deallocate and allocate.
+ if (frame_worker_data->scratch_buffer_size < data_sz) {
+ frame_worker_data->scratch_buffer =
+ (uint8_t *)vpx_realloc(frame_worker_data->scratch_buffer, data_sz);
+ if (frame_worker_data->scratch_buffer == NULL) {
+ set_error_detail(ctx, "Failed to reallocate scratch buffer");
+ return VPX_CODEC_MEM_ERROR;
+ }
+ frame_worker_data->scratch_buffer_size = data_sz;
+ }
+ frame_worker_data->data_size = data_sz;
+ vpx_memcpy(frame_worker_data->scratch_buffer, *data, data_sz);
+
+ frame_worker_data->frame_decoded = 0;
+ frame_worker_data->frame_context_ready = 0;
+ frame_worker_data->data = frame_worker_data->scratch_buffer;
+ frame_worker_data->user_priv = user_priv;
+
+ if (ctx->next_submit_worker_id != ctx->last_submit_worker_id)
+ ctx->last_submit_worker_id =
+ (ctx->last_submit_worker_id + 1) % ctx->num_frame_workers;
+
+ ctx->next_submit_worker_id =
+ (ctx->next_submit_worker_id + 1) % ctx->num_frame_workers;
+ --ctx->available_threads;
+ worker->had_error = 0;
+ winterface->launch(worker);
+ }
if (ctx->base.init_flags & VPX_CODEC_USE_POSTPROC)
set_ppflags(ctx, &flags);
- if (vp9_get_raw_frame(ctx->pbi, &sd, &flags))
- return update_error_state(ctx, &cm->error);
-
- yuvconfig2image(&ctx->img, &sd, user_priv);
- ctx->img.fb_priv = cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
- ctx->img_avail = 1;
-
return VPX_CODEC_OK;
}
+static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) {
+ YV12_BUFFER_CONFIG sd;
+ vp9_ppflags_t flags = {0, 0, 0};
+ const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
+ VP9Worker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
+ ctx->next_output_worker_id =
+ (ctx->next_output_worker_id + 1) % ctx->num_frame_workers;
+ winterface->sync(worker);
+ ++ctx->available_threads;
+ if (vp9_get_raw_frame(frame_worker_data->pbi, &sd, &flags) == 0) {
+ VP9_COMMON *const cm = &frame_worker_data->pbi->common;
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+ ctx->frame_cache[ctx->frame_cache_write].fb_idx = cm->new_fb_idx;
+ yuvconfig2image(&ctx->frame_cache[ctx->frame_cache_write].img, &sd,
+ frame_worker_data->user_priv);
+ ctx->frame_cache[ctx->frame_cache_write].img.fb_priv =
+ frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
+ ctx->frame_cache_write =
+ (ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE;
+ ++ctx->num_cache_frames;
+ }
+}
+
static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
const uint8_t *data, unsigned int data_sz,
void *user_priv, long deadline) {
// Reset flushed when receiving a valid frame.
ctx->flushed = 0;
+ // Initialize the decoder workers on the first frame.
+ if (ctx->frame_workers == NULL) {
+ const vpx_codec_err_t res = init_decoder(ctx);
+ if (res != VPX_CODEC_OK)
+ return res;
+ }
+
res = vp9_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
ctx->decrypt_cb, ctx->decrypt_state);
if (res != VPX_CODEC_OK)
for (i = 0; i < frame_count; ++i) {
const uint8_t *data_start_copy = data_start;
const uint32_t frame_size = frame_sizes[i];
- vpx_codec_err_t res;
if (data_start < data
|| frame_size > (uint32_t) (data_end - data_start)) {
- ctx->base.err_detail = "Invalid frame size in index";
+ set_error_detail(ctx, "Invalid frame size in index");
return VPX_CODEC_CORRUPT_FRAME;
}
+ if (ctx->available_threads == 0) {
+ // No more threads for decoding. Wait until the next output worker
+ // finishes decoding. Then copy the decoded frame into cache.
+ if (ctx->num_cache_frames < FRAME_CACHE_SIZE) {
+ wait_worker_and_cache_frame(ctx);
+ } else {
+ // TODO(hkuang): Add unit test to test this path.
+ set_error_detail(ctx, "Frame output cache is full.");
+ return VPX_CODEC_ERROR;
+ }
+ }
+
res = decode_one(ctx, &data_start_copy, frame_size, user_priv,
deadline);
if (res != VPX_CODEC_OK)
return res;
-
data_start += frame_size;
}
} else {
- res = decode_one(ctx, &data_start, data_sz, user_priv, deadline);
+ if (ctx->available_threads == 0) {
+ // No more threads for decoding. Wait until the next output worker
+ // finishes decoding. Then copy the decoded frame into cache.
+ if (ctx->num_cache_frames < FRAME_CACHE_SIZE) {
+ wait_worker_and_cache_frame(ctx);
+ } else {
+ // TODO(hkuang): Add unit test to test this path.
+ set_error_detail(ctx, "Frame output cache is full.");
+ return VPX_CODEC_ERROR;
+ }
+ }
+
+ res = decode_one(ctx, &data, data_sz, user_priv, deadline);
if (res != VPX_CODEC_OK)
return res;
-
- // Extra data detected after the frame.
- if (data_start < data_end - 1) {
- ctx->base.err_detail = "Fail to decode frame in parallel mode";
- return VPX_CODEC_INCAPABLE;
- }
}
} else {
// Decode in serial mode.
vpx_codec_err_t res;
if (data_start < data
|| frame_size > (uint32_t) (data_end - data_start)) {
- ctx->base.err_detail = "Invalid frame size in index";
+ set_error_detail(ctx, "Invalid frame size in index");
return VPX_CODEC_CORRUPT_FRAME;
}
}
}
- return VPX_CODEC_OK;
+ return res;
+}
+
+static void release_last_output_frame(vpx_codec_alg_priv_t *ctx) {
+ RefCntBuffer *const frame_bufs = ctx->buffer_pool->frame_bufs;
+ // Decrease reference count of last output frame in frame parallel mode.
+ if (ctx->frame_parallel_decode && ctx->last_show_frame >= 0) {
+ BufferPool *const pool = ctx->buffer_pool;
+ lock_buffer_pool(pool);
+ decrease_ref_count(ctx->last_show_frame, frame_bufs, pool);
+ unlock_buffer_pool(pool);
+ }
}
static vpx_image_t *decoder_get_frame(vpx_codec_alg_priv_t *ctx,
vpx_codec_iter_t *iter) {
vpx_image_t *img = NULL;
- if (ctx->img_avail) {
- // iter acts as a flip flop, so an image is only returned on the first
- // call to get_frame.
- if (!(*iter)) {
- img = &ctx->img;
- *iter = img;
- }
+ // Only return frame when all the cpu are busy or
+ // application fluhsed the decoder in frame parallel decode.
+ if (ctx->frame_parallel_decode && ctx->available_threads > 0 &&
+ !ctx->flushed) {
+ return img;
+ }
+
+ // Output the frames in the cache first.
+ if (ctx->num_cache_frames > 0) {
+ release_last_output_frame(ctx);
+ ctx->last_show_frame = ctx->frame_cache[ctx->frame_cache_read].fb_idx;
+ img = &ctx->frame_cache[ctx->frame_cache_read].img;
+ ctx->frame_cache_read = (ctx->frame_cache_read + 1) % FRAME_CACHE_SIZE;
+ --ctx->num_cache_frames;
+ return img;
}
- ctx->img_avail = 0;
+ // iter acts as a flip flop, so an image is only returned on the first
+ // call to get_frame.
+ if (*iter == NULL && ctx->frame_workers != NULL) {
+ do {
+ YV12_BUFFER_CONFIG sd;
+ vp9_ppflags_t flags = {0, 0, 0};
+ const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
+ VP9Worker *const worker =
+ &ctx->frame_workers[ctx->next_output_worker_id];
+ FrameWorkerData *const frame_worker_data =
+ (FrameWorkerData *)worker->data1;
+ ctx->next_output_worker_id =
+ (ctx->next_output_worker_id + 1) % ctx->num_frame_workers;
+ // Wait for the frame from worker thread.
+ if (!winterface->sync(worker)) {
+ // Decoding failed. Release the worker thread.
+ ++ctx->available_threads;
+ if (ctx->flushed != 1)
+ return img;
+ } else if (vp9_get_raw_frame(frame_worker_data->pbi, &sd, &flags) == 0) {
+ VP9_COMMON *const cm = &frame_worker_data->pbi->common;
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+ ++ctx->available_threads;
+ release_last_output_frame(ctx);
+ ctx->last_show_frame = frame_worker_data->pbi->common.new_fb_idx;
+ yuvconfig2image(&ctx->img, &sd, frame_worker_data->user_priv);
+ ctx->img.fb_priv = frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
+ img = &ctx->img;
+ return img;
+ }
+ } while (ctx->next_output_worker_id != ctx->next_submit_worker_id);
+ }
return img;
}
vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) {
if (cb_get == NULL || cb_release == NULL) {
return VPX_CODEC_INVALID_PARAM;
- } else if (ctx->pbi == NULL) {
+ } else if (ctx->frame_workers == NULL) {
// If the decoder has already been initialized, do not accept changes to
// the frame buffer functions.
ctx->get_ext_fb_cb = cb_get;
va_list args) {
vpx_ref_frame_t *const data = va_arg(args, vpx_ref_frame_t *);
+ // Only support this function in serial decode.
+ if (ctx->frame_parallel_decode) {
+ set_error_detail(ctx, "Not supported in frame parallel decode");
+ return VPX_CODEC_INCAPABLE;
+ }
+
if (data) {
vpx_ref_frame_t *const frame = (vpx_ref_frame_t *)data;
YV12_BUFFER_CONFIG sd;
-
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
image2yuvconfig(&frame->img, &sd);
- return vp9_set_reference_dec(&ctx->pbi->common,
+ return vp9_set_reference_dec(&frame_worker_data->pbi->common,
(VP9_REFFRAME)frame->frame_type, &sd);
} else {
return VPX_CODEC_INVALID_PARAM;
va_list args) {
vpx_ref_frame_t *data = va_arg(args, vpx_ref_frame_t *);
+ // Only support this function in serial decode.
+ if (ctx->frame_parallel_decode) {
+ set_error_detail(ctx, "Not supported in frame parallel decode");
+ return VPX_CODEC_INCAPABLE;
+ }
+
if (data) {
- vpx_ref_frame_t *frame = (vpx_ref_frame_t *)data;
+ vpx_ref_frame_t *frame = (vpx_ref_frame_t *) data;
YV12_BUFFER_CONFIG sd;
-
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
image2yuvconfig(&frame->img, &sd);
-
- return vp9_copy_reference_dec(ctx->pbi,
+ return vp9_copy_reference_dec(frame_worker_data->pbi,
(VP9_REFFRAME)frame->frame_type, &sd);
} else {
return VPX_CODEC_INVALID_PARAM;
va_list args) {
vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *);
+ // Only support this function in serial decode.
+ if (ctx->frame_parallel_decode) {
+ set_error_detail(ctx, "Not supported in frame parallel decode");
+ return VPX_CODEC_INCAPABLE;
+ }
+
if (data) {
- YV12_BUFFER_CONFIG* fb = get_ref_frame(&ctx->pbi->common, data->idx);
+ YV12_BUFFER_CONFIG* fb;
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
+ fb = get_ref_frame(&frame_worker_data->pbi->common, data->idx);
if (fb == NULL) return VPX_CODEC_ERROR;
-
yuvconfig2image(&data->img, fb, NULL);
return VPX_CODEC_OK;
} else {
va_list args) {
int *const update_info = va_arg(args, int *);
+ // Only support this function in serial decode.
+ if (ctx->frame_parallel_decode) {
+ set_error_detail(ctx, "Not supported in frame parallel decode");
+ return VPX_CODEC_INCAPABLE;
+ }
+
if (update_info) {
- if (ctx->pbi)
- *update_info = ctx->pbi->refresh_frame_flags;
- else
+ if (ctx->frame_workers) {
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const frame_worker_data =
+ (FrameWorkerData *)worker->data1;
+ *update_info = frame_worker_data->pbi->refresh_frame_flags;
+ } else {
return VPX_CODEC_ERROR;
+ }
return VPX_CODEC_OK;
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
-
static vpx_codec_err_t ctrl_get_frame_corrupted(vpx_codec_alg_priv_t *ctx,
va_list args) {
int *corrupted = va_arg(args, int *);
- if (corrupted != NULL && ctx->pbi != NULL) {
- const YV12_BUFFER_CONFIG *const frame = ctx->pbi->common.frame_to_show;
- if (frame == NULL) return VPX_CODEC_ERROR;
- *corrupted = frame->corrupted;
+ if (corrupted) {
+ if (ctx->frame_workers) {
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const frame_worker_data =
+ (FrameWorkerData *)worker->data1;
+ RefCntBuffer *const frame_bufs =
+ frame_worker_data->pbi->common.buffer_pool->frame_bufs;
+ if (frame_worker_data->pbi->common.frame_to_show == NULL)
+ return VPX_CODEC_ERROR;
+ *corrupted = frame_bufs[ctx->last_show_frame].buf.corrupted;
+ } else {
+ return VPX_CODEC_ERROR;
+ }
return VPX_CODEC_OK;
} else {
return VPX_CODEC_INVALID_PARAM;
va_list args) {
int *const display_size = va_arg(args, int *);
+ // Only support this function in serial decode.
+ if (ctx->frame_parallel_decode) {
+ set_error_detail(ctx, "Not supported in frame parallel decode");
+ return VPX_CODEC_INCAPABLE;
+ }
+
if (display_size) {
- if (ctx->pbi) {
- const VP9_COMMON *const cm = &ctx->pbi->common;
+ if (ctx->frame_workers) {
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const frame_worker_data =
+ (FrameWorkerData *)worker->data1;
+ const VP9_COMMON *const cm = &frame_worker_data->pbi->common;
display_size[0] = cm->display_width;
display_size[1] = cm->display_height;
} else {
static vpx_codec_err_t ctrl_get_bit_depth(vpx_codec_alg_priv_t *ctx,
va_list args) {
unsigned int *const bit_depth = va_arg(args, unsigned int *);
+ VP9Worker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
if (bit_depth) {
- if (ctx->pbi) {
- const VP9_COMMON *const cm = &ctx->pbi->common;
+ if (worker) {
+ FrameWorkerData *const frame_worker_data =
+ (FrameWorkerData *)worker->data1;
+ const VP9_COMMON *const cm = &frame_worker_data->pbi->common;
*bit_depth = cm->bit_depth;
return VPX_CODEC_OK;
} else {
return VPX_CODEC_INVALID_PARAM;
ctx->byte_alignment = byte_alignment;
- if (ctx->pbi != NULL) {
- VP9_COMMON *const cm = &ctx->pbi->common;
- cm->byte_alignment = byte_alignment;
+ if (ctx->frame_workers) {
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const frame_worker_data =
+ (FrameWorkerData *)worker->data1;
+ frame_worker_data->pbi->common.byte_alignment = byte_alignment;
}
return VPX_CODEC_OK;
}
bps = 12;
}
}
+ img->cs = yv12->color_space;
img->bit_depth = 8;
img->w = yv12->y_stride;
img->h = ALIGN_POWER_OF_TWO(yv12->y_height + 2 * VP9_ENC_BORDER_IN_PIXELS, 3);
yv12->y_stride = img->stride[VPX_PLANE_Y];
yv12->uv_stride = img->stride[VPX_PLANE_U];
+ yv12->color_space = img->cs;
#if CONFIG_VP9_HIGHBITDEPTH
if (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_error_intrin_avx2.c
VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_variance_avx2.c
+VP9_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp9_avg_neon.c
VP9_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp9_sad_neon.c
+VP9_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp9_sad4d_neon.c
VP9_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp9_dct_neon.c
VP9_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp9_variance_neon.c
VP9_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/vp9_quantize_neon.c
VP9_DX_SRCS-yes += decoder/vp9_decodeframe.c
VP9_DX_SRCS-yes += decoder/vp9_decodeframe.h
VP9_DX_SRCS-yes += decoder/vp9_detokenize.c
-VP9_DX_SRCS-yes += decoder/vp9_dthread.c
-VP9_DX_SRCS-yes += decoder/vp9_dthread.h
VP9_DX_SRCS-yes += decoder/vp9_reader.h
VP9_DX_SRCS-yes += decoder/vp9_reader.c
VP9_DX_SRCS-yes += decoder/vp9_read_bit_buffer.c
VP9_DX_SRCS-yes += decoder/vp9_read_bit_buffer.h
VP9_DX_SRCS-yes += decoder/vp9_decodemv.h
VP9_DX_SRCS-yes += decoder/vp9_detokenize.h
+VP9_DX_SRCS-yes += decoder/vp9_dthread.c
+VP9_DX_SRCS-yes += decoder/vp9_dthread.h
VP9_DX_SRCS-yes += decoder/vp9_decoder.c
VP9_DX_SRCS-yes += decoder/vp9_decoder.h
VP9_DX_SRCS-yes += decoder/vp9_dsubexp.c
#ifndef VPX_VP8CX_H_
#define VPX_VP8CX_H_
-/*!\defgroup vp8_encoder WebM VP8 Encoder
+/*!\defgroup vp8_encoder WebM VP8/VP9 Encoder
* \ingroup vp8
*
* @{
#include "./vp8.h"
/*!\file
- * \brief Provides definitions for using the VP8 encoder algorithm within the
+ * \brief Provides definitions for using VP8 or VP9 encoder algorithm within the
* vpx Codec Interface.
*/
/*!\name Algorithm interface for VP8
*
- * This interface provides the capability to encode raw VP8 streams, as would
- * be found in AVI files.
+ * This interface provides the capability to encode raw VP8 streams.
* @{
*/
extern vpx_codec_iface_t vpx_codec_vp8_cx_algo;
extern vpx_codec_iface_t *vpx_codec_vp8_cx(void);
+/*!@} - end algorithm interface member group*/
-/* TODO(jkoleszar): These move to VP9 in a later patch set. */
+/*!\name Algorithm interface for VP9
+ *
+ * This interface provides the capability to encode raw VP9 streams.
+ * @{
+ */
extern vpx_codec_iface_t vpx_codec_vp9_cx_algo;
extern vpx_codec_iface_t *vpx_codec_vp9_cx(void);
-
/*!@} - end algorithm interface member group*/
VP8E_SET_SCREEN_CONTENT_MODE, /**<control function to set encoder screen content mode */
- /* TODO(jkoleszar): Move to vp9cx.h */
+ /*!\brief Codec control function to set lossless encoding mode
+ *
+ * VP9 can operate in lossless encoding mode, in which the bitstream
+ * produced will be able to decode and reconstruct a perfect copy of
+ * input source. This control function provides a mean to switch encoder
+ * into lossless coding mode(1) or normal coding mode(0) that may be lossy.
+ * 0 = lossy coding mode
+ * 1 = lossless coding mode
+ *
+ * By default, encoder operates in normal coding mode (maybe lossy).
+ */
VP9E_SET_LOSSLESS,
+
+ /*!\brief Codec control function to set number of tile columns
+ *
+ * In encoding and decoding, VP9 allows an input image frame be partitioned
+ * into separated vertical tile columns, which can be encoded or decoded
+ * independently. This enables easy implementation of parallel encoding and
+ * decoding. This control requests the encoder to use column tiles in
+ * encoding an input frame, with number of tile columns (in Log2 unit) as
+ * the parameter:
+ * 0 = 1 tile column
+ * 1 = 2 tile columns
+ * 2 = 4 tile columns
+ * .....
+ * n = 2**n tile columns
+ * The requested tile columns will be capped by encoder based on image size
+ * limitation (The minimum width of a tile column is 256 pixel, the maximum
+ * is 4096).
+ *
+ * By default, the value is 0, i.e. one single column tile for entire image.
+ */
VP9E_SET_TILE_COLUMNS,
+
+ /*!\brief Codec control function to set number of tile rows
+ *
+ * In encoding and decoding, VP9 allows an input image frame be partitioned
+ * into separated horizontal tile rows. Tile rows are encoded or decoded
+ * sequentially. Even though encoding/decoding of later tile rows depends on
+ * earlier ones, this allows the encoder to output data packets for tile rows
+ * prior to completely processing all tile rows in a frame, thereby reducing
+ * the latency in processing between input and output. The parameter
+ * for this control describes the number of tile rows, which has a valid
+ * range [0, 2]:
+ * 0 = 1 tile row
+ * 1 = 2 tile rows
+ * 2 = 4 tile rows
+ *
+ * By default, the value is 0, i.e. one single row tile for entire image.
+ */
VP9E_SET_TILE_ROWS,
+
+ /*!\brief Codec control function to enable frame parallel decoding feature
+ *
+ * VP9 has a bitstream feature to reduce decoding dependency between frames
+ * by turning off backward update of probability context used in encoding
+ * and decoding. This allows staged parallel processing of more than one
+ * video frames in the decoder. This control function provides a mean to
+ * turn this feature on or off for bitstreams produced by encoder.
+ *
+ * By default, this feature is off.
+ */
VP9E_SET_FRAME_PARALLEL_DECODING,
+
+ /*!\brief Codec control function to set adaptive quantization mode
+ *
+ * VP9 has a segment based feature that allows encoder to adaptively change
+ * quantization parameter for each segment within a frame to improve the
+ * subjective quality. This control makes encoder operate in one of the
+ * several AQ_modes supported.
+ *
+ * By default, encoder operates with AQ_Mode 0(adaptive quantization off).
+ */
VP9E_SET_AQ_MODE,
+
+ /*!\brief Codec control function to enable/disable periodic Q boost
+ *
+ * One VP9 encoder speed feature is to enable quality boost by lowering
+ * frame level Q periodically. This control function provides a mean to
+ * turn on/off this feature.
+ * 0 = off
+ * 1 = on
+ *
+ * By default, the encoder is allowed to use this feature for appropriate
+ * encoding modes.
+ */
VP9E_SET_FRAME_PERIODIC_BOOST,
+
/*!\brief control function to set noise sensitivity
*
* 0: off, 1: OnYOnly
*/
VP9E_SET_NOISE_SENSITIVITY,
+ /*!\brief control function to turn on/off SVC in encoder.
+ * \note Return value is VPX_CODEC_INVALID_PARAM if the encoder does not
+ * support SVC in its current encoding mode
+ * 0: off, 1: on
+ */
VP9E_SET_SVC,
+
+ /*!\brief control function to set parameters for SVC.
+ * \note Parameters contain min_q, max_q, scaling factor for each of the
+ * SVC layers.
+ */
VP9E_SET_SVC_PARAMETERS,
/*!\brief control function to set svc layer for spatial and temporal.
* temporal layer.
*/
VP9E_SET_SVC_LAYER_ID,
+
+ /*!\brief control function to set content type.
+ * \note Valid parameter range:
+ * VP9E_CONTENT_DEFAULT = Regular video content (Default)
+ * VP9E_CONTENT_SCREEN = Screen capture content
+ */
VP9E_SET_TUNE_CONTENT,
+
+ /*!\brief control function to get svc layer ID.
+ * \note The layer ID returned is for the data packet from the registered
+ * callback function.
+ */
VP9E_GET_SVC_LAYER_ID,
+
+ /*!\brief control function to register callback for getting per layer packet.
+ * \note Parameter for this control function is a structure with a callback
+ * function and a pointer to private data used by the callback.
+ */
VP9E_REGISTER_CX_CALLBACK,
+
+ /*!\brief control function to set color space info.
+ * \note Valid ranges: 0..7, default is "UNKNOWN".
+ * 0 = UNKNOWN,
+ * 1 = BT_601
+ * 2 = BT_709
+ * 3 = SMPTE_170
+ * 4 = SMPTE_240
+ * 5 = BT_2020
+ * 6 = RESERVED
+ * 7 = SRGB
+ */
+ VP9E_SET_COLOR_SPACE,
};
/*!\brief vpx 1-D scaling mode
VPX_CTRL_USE_TYPE(VP9E_SET_NOISE_SENSITIVITY, unsigned int)
VPX_CTRL_USE_TYPE(VP9E_SET_TUNE_CONTENT, int) /* vp9e_tune_content */
+
+VPX_CTRL_USE_TYPE(VP9E_SET_COLOR_SPACE, int)
/*! @} - end defgroup vp8_encoder */
#ifdef __cplusplus
} // extern "C"
*/
-/*!\defgroup vp8_decoder WebM VP8 Decoder
+/*!\defgroup vp8_decoder WebM VP8/VP9 Decoder
* \ingroup vp8
*
* @{
*/
/*!\file
- * \brief Provides definitions for using the VP8 algorithm within the vpx Decoder
+ * \brief Provides definitions for using VP8 or VP9 within the vpx Decoder
* interface.
*/
#ifndef VPX_VP8DX_H_
/*!\name Algorithm interface for VP8
*
- * This interface provides the capability to decode raw VP8 streams, as would
- * be found in AVI files and other non-Flash uses.
+ * This interface provides the capability to decode VP8 streams.
* @{
*/
extern vpx_codec_iface_t vpx_codec_vp8_dx_algo;
extern vpx_codec_iface_t *vpx_codec_vp8_dx(void);
+/*!@} - end algorithm interface member group*/
-/* TODO(jkoleszar): These move to VP9 in a later patch set. */
+/*!\name Algorithm interface for VP9
+ *
+ * This interface provides the capability to decode VP9 streams.
+ * @{
+ */
extern vpx_codec_iface_t vpx_codec_vp9_dx_algo;
extern vpx_codec_iface_t *vpx_codec_vp9_dx(void);
/*!@} - end algorithm interface member group*/
*/
VP9_SET_BYTE_ALIGNMENT,
- /** For testing. */
+ /** control function to invert the decoding order to from right to left. The
+ * function is used in a test to confirm the decoding independence of tile
+ * columns. The function may be used in application where this order
+ * of decoding is desired.
+ *
+ * TODO(yaowu): Rework the unit test that uses this control, and in a future
+ * release, this test-only control shall be removed.
+ */
VP9_INVERT_TILE_DECODE_ORDER,
VP8_DECODER_CTRL_ID_MAX
/*!\brief Callback function pointer / user data pair storage */
typedef struct vpx_codec_enc_output_cx_cb_pair {
- vpx_codec_enc_output_cx_pkt_cb_fn_t output_cx_pkt;
- void *user_priv;
+ vpx_codec_enc_output_cx_pkt_cb_fn_t output_cx_pkt; /**< Callback function */
+ void *user_priv; /**< Pointer to private data */
} vpx_codec_priv_output_cx_pkt_cb_pair_t;
/*!\brief Rational Number
*
*/
typedef struct vpx_svc_parameters {
- int max_quantizers[VPX_SS_MAX_LAYERS];
- int min_quantizers[VPX_SS_MAX_LAYERS];
- int scaling_factor_num[VPX_SS_MAX_LAYERS];
- int scaling_factor_den[VPX_SS_MAX_LAYERS];
+ int max_quantizers[VPX_SS_MAX_LAYERS]; /**< Max Q for each layer */
+ int min_quantizers[VPX_SS_MAX_LAYERS]; /**< Min Q for each layer */
+ int scaling_factor_num[VPX_SS_MAX_LAYERS]; /**< Scaling factor-numerator*/
+ int scaling_factor_den[VPX_SS_MAX_LAYERS]; /**< Scaling factor-denominator*/
} vpx_svc_extra_cfg_t;
#include "./vpx_integer.h"
/*!\brief The maximum number of work buffers used by libvpx.
+ * Support maximum 4 threads to decode video in parallel.
+ * Each thread will use one work buffer.
+ * TODO(hkuang): Add support to set number of worker threads dynamically.
*/
-#define VPX_MAXIMUM_WORK_BUFFERS 1
+#define VPX_MAXIMUM_WORK_BUFFERS 8
/*!\brief The maximum number of reference buffers that a VP9 encoder may use.
*/
* types, removing or reassigning enums, adding/removing/rearranging
* fields to structures
*/
-#define VPX_IMAGE_ABI_VERSION (2) /**<\hideinitializer*/
+#define VPX_IMAGE_ABI_VERSION (3) /**<\hideinitializer*/
#define VPX_IMG_FMT_PLANAR 0x100 /**< Image is a planar format. */
VPX_IMG_FMT_I44016 = VPX_IMG_FMT_I440 | VPX_IMG_FMT_HIGHBITDEPTH
} vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */
+ /*!\brief List of supported color spaces */
+ typedef enum vpx_color_space {
+ VPX_CS_UNKNOWN = 0, /**< Unknown */
+ VPX_CS_BT_601 = 1, /**< BT.601 */
+ VPX_CS_BT_709 = 2, /**< BT.709 */
+ VPX_CS_SMPTE_170 = 3, /**< SMPTE.170 */
+ VPX_CS_SMPTE_240 = 4, /**< SMPTE.240 */
+ VPX_CS_BT_2020 = 5, /**< BT.2020 */
+ VPX_CS_RESERVED = 6, /**< Reserved */
+ VPX_CS_SRGB = 7 /**< sRGB */
+ } vpx_color_space_t; /**< alias for enum vpx_color_space */
+
/**\brief Image Descriptor */
typedef struct vpx_image {
vpx_img_fmt_t fmt; /**< Image Format */
+ vpx_color_space_t cs; /**< Color Space */
/* Image storage dimensions */
unsigned int w; /**< Stored image width */
#define VPX_PORTS_X86_H_
#include <stdlib.h>
#include "vpx_config.h"
+#include "vpx/vpx_integer.h"
#ifdef __cplusplus
extern "C" {
#endif
#endif /* end others */
+// NaCl has no support for xgetbv or the raw opcode.
+#if !defined(__native_client__) && (defined(__i386__) || defined(__x86_64__))
+static INLINE uint64_t xgetbv(void) {
+ const uint32_t ecx = 0;
+ uint32_t eax, edx;
+ // Use the raw opcode for xgetbv for compatibility with older toolchains.
+ __asm__ volatile (
+ ".byte 0x0f, 0x01, 0xd0\n"
+ : "=a"(eax), "=d"(edx) : "c" (ecx));
+ return ((uint64_t)edx << 32) | eax;
+}
+#elif (defined(_M_X64) || defined(_M_IX86)) && \
+ defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 160040219 // >= VS2010 SP1
+#include <immintrin.h>
+#define xgetbv() _xgetbv(0)
+#elif defined(_MSC_VER) && defined(_M_IX86)
+static INLINE uint64_t xgetbv(void) {
+ uint32_t eax_, edx_;
+ __asm {
+ xor ecx, ecx // ecx = 0
+ // Use the raw opcode for xgetbv for compatibility with older toolchains.
+ __asm _emit 0x0f __asm _emit 0x01 __asm _emit 0xd0
+ mov eax_, eax
+ mov edx_, edx
+ }
+ return ((uint64_t)edx_ << 32) | eax_;
+}
+#else
+#define xgetbv() 0U // no AVX for older x64 or unrecognized toolchains.
+#endif
+
#define HAS_MMX 0x01
#define HAS_SSE 0x02
#define HAS_SSE2 0x04
if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1;
- if (reg_ecx & BIT(28)) flags |= HAS_AVX;
+ // bits 27 (OSXSAVE) & 28 (256-bit AVX)
+ if ((reg_ecx & (BIT(27) | BIT(28))) == (BIT(27) | BIT(28))) {
+ if ((xgetbv() & 0x6) == 0x6) {
+ flags |= HAS_AVX;
- /* Get the leaf 7 feature flags. Needed to check for AVX2 support */
- reg_eax = 7;
- reg_ecx = 0;
- cpuid(7, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
+ /* Get the leaf 7 feature flags. Needed to check for AVX2 support */
+ cpuid(7, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
- if (reg_ebx & BIT(5)) flags |= HAS_AVX2;
+ if (reg_ebx & BIT(5)) flags |= HAS_AVX2;
+ }
+ }
return flags & mask;
}
int subsampling_x;
int subsampling_y;
unsigned int bit_depth;
+ vpx_color_space_t color_space;
int corrupted;
int flags;
if (vpx_codec_control(&decoder, VP8D_GET_FRAME_CORRUPTED, &corrupted)) {
warn("Failed VP8_GET_FRAME_CORRUPTED: %s", vpx_codec_error(&decoder));
- goto fail;
+ if (!keep_going)
+ goto fail;
}
frames_corrupted += corrupted;
NULL, "test-decode", 1, "Test encode/decode mismatch", test_decode_enum);
static const arg_def_t framerate = ARG_DEF(
NULL, "fps", 1, "Stream frame rate (rate/scale)");
+static const arg_def_t use_webm = ARG_DEF(
+ NULL, "webm", 0, "Output WebM (default when WebM IO is enabled)");
static const arg_def_t use_ivf = ARG_DEF(
- NULL, "ivf", 0, "Output IVF (default is WebM if WebM IO is enabled)");
+ NULL, "ivf", 0, "Output IVF");
static const arg_def_t out_part = ARG_DEF(
"P", "output-partitions", 0,
"Makes encoder output partitions. Requires IVF output!");
&debugmode,
&outputfile, &codecarg, &passes, &pass_arg, &fpf_name, &limit, &skip,
&deadline, &best_dl, &good_dl, &rt_dl,
- &quietarg, &verbosearg, &psnrarg, &use_ivf, &out_part, &q_hist_n,
+ &quietarg, &verbosearg, &psnrarg, &use_webm, &use_ivf, &out_part, &q_hist_n,
&rate_hist_n, &disable_warnings, &disable_warning_prompt,
NULL
};
VP8E_SET_MAX_INTER_BITRATE_PCT, VP8E_SET_GF_CBR_BOOST_PCT,
VP9E_SET_LOSSLESS, VP9E_SET_FRAME_PARALLEL_DECODING, VP9E_SET_AQ_MODE,
VP9E_SET_FRAME_PERIODIC_BOOST, VP9E_SET_NOISE_SENSITIVITY,
- VP9E_SET_TUNE_CONTENT,
+ VP9E_SET_TUNE_CONTENT, VP9E_SET_COLOR_SPACE,
0
};
#endif
} else if (arg_match(&arg, &fpmbf_name, argi)) {
config->fpmb_stats_fn = arg.val;
#endif
+ } else if (arg_match(&arg, &use_webm, argi)) {
+#if CONFIG_WEBM_IO
+ config->write_webm = 1;
+#else
+ die("Error: --webm specified but webm is disabled.");
+#endif
} else if (arg_match(&arg, &use_ivf, argi)) {
config->write_webm = 0;
} else if (arg_match(&arg, &threads, argi)) {
stats->file = fopen(fpf, "rb");
+ if (stats->file == NULL)
+ fatal("First-pass stats file does not exist!");
+
if (fseek(stats->file, 0, SEEK_END))
fatal("First-pass stats file must be seekable!");
webm_ctx->block_frame_index = 0;
webm_ctx->video_track_index = 0;
webm_ctx->timestamp_ns = 0;
+ webm_ctx->is_key_frame = false;
}
void get_first_cluster(struct WebmInputContext *const webm_ctx) {
}
*bytes_in_buffer = frame.len;
webm_ctx->timestamp_ns = block->GetTime(cluster);
+ webm_ctx->is_key_frame = block->IsKey();
mkvparser::MkvReader *const reader =
reinterpret_cast<mkvparser::MkvReader*>(webm_ctx->reader);
int block_frame_index;
int video_track_index;
uint64_t timestamp_ns;
+ int is_key_frame;
};
// Checks if the input is a WebM file. If so, initializes WebMInputContext so