--- /dev/null
+LIBVPX_TEST_SRCS-yes += test-data.mk
+
+# Encoder test source
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += hantro_collage_w352h288.yuv
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += hantro_odd.yuv
+
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_10_420.y4m
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_10_422.y4m
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_10_444.y4m
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_10_440.yuv
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_12_420.y4m
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_12_422.y4m
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_12_444.y4m
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_12_440.yuv
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_420.y4m
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_422.y4m
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_444.y4m
+LIBVPX_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_440.yuv
+
+LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += niklas_1280_720_30.yuv
+LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += rush_hour_444.y4m
+LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += screendata.y4m
+
+# Test vectors
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-001.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-001.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-002.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-002.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-003.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-003.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-004.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-004.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-005.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-005.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-006.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-006.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-007.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-007.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-008.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-008.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-009.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-009.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-010.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-010.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-011.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-011.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-012.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-012.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-013.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-013.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-014.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-014.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-015.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-015.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-016.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-016.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-017.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-017.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-018.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-00-comprehensive-018.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-01-intra-1400.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-01-intra-1400.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-01-intra-1411.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-01-intra-1411.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-01-intra-1416.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-01-intra-1416.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-01-intra-1417.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-01-intra-1417.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-02-inter-1402.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-02-inter-1402.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-02-inter-1412.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-02-inter-1412.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-02-inter-1418.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-02-inter-1418.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-02-inter-1424.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-02-inter-1424.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-01.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-01.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-02.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-02.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-03.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-03.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-04.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-04.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1401.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1401.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1403.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1403.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1407.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1407.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1408.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1408.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1409.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1409.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1410.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1410.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1413.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1413.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1414.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1414.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1415.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1415.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1425.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1425.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1426.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1426.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1427.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1427.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1432.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1432.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1435.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1435.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1436.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1436.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1437.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1437.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1441.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1441.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1442.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-03-segmentation-1442.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-04-partitions-1404.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-04-partitions-1404.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-04-partitions-1405.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-04-partitions-1405.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-04-partitions-1406.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-04-partitions-1406.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1428.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1428.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1429.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1429.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1430.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1430.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1431.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1431.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1433.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1433.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1434.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1434.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1438.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1438.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1439.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1439.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1440.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1440.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1443.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-05-sharpness-1443.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-06-smallsize.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP8_DECODER) += vp80-06-smallsize.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-00.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-00.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-01.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-01.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-02.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-02.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-03.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-03.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-04.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-04.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-05.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-05.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-06.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-06.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-07.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-07.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-09.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-09.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-11.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-11.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-12.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-12.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-13.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-13.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-14.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-14.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-15.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-15.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-17.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-17.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-19.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-19.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-20.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-20.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-21.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-21.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-22.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-22.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-23.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-23.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-24.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-24.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-25.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-25.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-26.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-26.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-27.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-27.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-28.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-28.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-29.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-29.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-30.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-30.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-31.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-31.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-33.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-33.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-35.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-35.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-36.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-36.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-37.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-37.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-38.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-38.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-39.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-39.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-40.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-40.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-41.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-41.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-42.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-42.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-43.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-43.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-44.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-44.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-45.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-45.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-46.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-46.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-47.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-47.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-48.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-48.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-49.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-49.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-50.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-50.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-51.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-51.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-52.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-52.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-53.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-53.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-54.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-54.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-55.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-55.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-56.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-56.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-57.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-57.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-58.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-58.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-59.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-59.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-60.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-60.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-61.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-61.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-62.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-62.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-63.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-00-quantizer-63.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-1.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-1.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-2.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-3.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-3.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-4.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-4.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-5.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-5.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-6.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-6.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-7.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-01-sharpness-7.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-08x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-10x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-16x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-18x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-32x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-34x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-64x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x08.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x08.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x10.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x10.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x18.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x18.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x32.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x32.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x34.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x34.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x64.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x64.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x66.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-66x66.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-lf-1920x1080.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-02-size-lf-1920x1080.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-deltaq.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-deltaq.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-196x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-198x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-200x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-202x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-208x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-210x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-224x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x196.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x196.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x198.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x198.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x200.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x200.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x202.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x202.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x208.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x208.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x210.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x210.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x224.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x224.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x226.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-03-size-226x226.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-05-resize.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-05-resize.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-06-bilinear.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-06-bilinear.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-07-frame_parallel.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-07-frame_parallel.webm.md5
++LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-07-frame_parallel-1.webm
++LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-07-frame_parallel-1.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile-4x1.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile-4x1.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile-4x4.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile-4x4.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile_1x2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile_1x2.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile_1x2_frame_parallel.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile_1x2_frame_parallel.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile_1x4.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile_1x4.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile_1x4_frame_parallel.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile_1x4_frame_parallel.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile_1x8.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile_1x8.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile_1x8_frame_parallel.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-08-tile_1x8_frame_parallel.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-09-aq2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-09-aq2.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-09-lf_deltas.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-09-lf_deltas.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-09-subpixel-00.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-09-subpixel-00.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-10-show-existing-frame.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-10-show-existing-frame.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-10-show-existing-frame2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-10-show-existing-frame2.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-11-size-351x287.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-11-size-351x287.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-11-size-351x288.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-11-size-351x288.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-11-size-352x287.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-11-size-352x287.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-12-droppable_1.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-12-droppable_1.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-12-droppable_2.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-12-droppable_2.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-12-droppable_3.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-12-droppable_3.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-13-largescaling.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-13-largescaling.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-1-16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-1-16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-1-2-4-8-16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-1-2-4-8-16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-1-2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-1-2.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-1-4.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-1-4.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-1-8.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-1-8.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-16-1.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-16-1.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-16-2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-16-2.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-16-4.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-16-4.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-16-8-4-2-1.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-16-8-4-2-1.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-16-8.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-16-8.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-2-1.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-2-1.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-2-16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-2-16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-2-4.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-2-4.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-2-8.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-2-8.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-4-1.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-4-1.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-4-16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-4-16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-4-2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-4-2.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-4-8.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-4-8.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-8-1.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-8-1.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-8-16.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-8-16.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-8-2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-8-2.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-8-4.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-14-resize-fp-tiles-8-4.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-15-segkey.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-15-segkey.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-15-segkey_adpq.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-15-segkey_adpq.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-16-intra-only.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-16-intra-only.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-17-show-existing-frame.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-17-show-existing-frame.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-18-resize.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-18-resize.ivf.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-19-skip.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-19-skip.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-19-skip-01.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-19-skip-01.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-19-skip-02.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-19-skip-02.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv422.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv422.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv440.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv440.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv444.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp91-2-04-yuv444.webm.md5
++LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-20-big_superframe-01.webm
++LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-20-big_superframe-01.webm.md5
++LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-20-big_superframe-02.webm
++LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-20-big_superframe-02.webm.md5
+ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp92-2-20-10bit-yuv420.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp92-2-20-10bit-yuv420.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp92-2-20-12bit-yuv420.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp92-2-20-12bit-yuv420.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-10bit-yuv422.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-10bit-yuv422.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-12bit-yuv422.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-12bit-yuv422.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-10bit-yuv440.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-10bit-yuv440.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-12bit-yuv440.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-12bit-yuv440.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-10bit-yuv444.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-10bit-yuv444.webm.md5
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-12bit-yuv444.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp93-2-20-12bit-yuv444.webm.md5
+endif # CONFIG_VP9_HIGHBITDEPTH
+
+# Invalid files for testing libvpx error checking.
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-01-v2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-01-v2.webm.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-02-v2.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-02-v2.webm.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-03-v3.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-03-v3.webm.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-00-quantizer-00.webm.ivf.s5861_r01-05_b6-.v2.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-00-quantizer-00.webm.ivf.s5861_r01-05_b6-.v2.ivf.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-05-resize.ivf.s59293_r01-05_b6-.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-05-resize.ivf.s59293_r01-05_b6-.ivf.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-08-tile_1x2_frame_parallel.webm.ivf.s47039_r01-05_b6-.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-08-tile_1x2_frame_parallel.webm.ivf.s47039_r01-05_b6-.ivf.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-08-tile_1x8_frame_parallel.webm.ivf.s288_r01-05_b6-.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-08-tile_1x8_frame_parallel.webm.ivf.s288_r01-05_b6-.ivf.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-08-tile_1x4_frame_parallel_all_key.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-08-tile_1x4_frame_parallel_all_key.webm.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-09-aq2.webm.ivf.s3984_r01-05_b6-.v2.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-09-aq2.webm.ivf.s3984_r01-05_b6-.v2.ivf.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-09-subpixel-00.ivf.s19552_r01-05_b6-.v2.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-09-subpixel-00.ivf.s19552_r01-05_b6-.v2.ivf.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-09-subpixel-00.ivf.s20492_r01-05_b6-.v2.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-09-subpixel-00.ivf.s20492_r01-05_b6-.v2.ivf.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-12-droppable_1.ivf.s3676_r01-05_b6-.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-12-droppable_1.ivf.s3676_r01-05_b6-.ivf.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf.res
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp91-2-mixedrefcsp-444to420.ivf
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp91-2-mixedrefcsp-444to420.ivf.res
++LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-07-frame_parallel-1.webm
++LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-07-frame_parallel-2.webm
++LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += invalid-vp90-2-07-frame_parallel-3.webm
+
+ifeq ($(CONFIG_DECODE_PERF_TESTS),yes)
+# NewEncode Test
+ifneq ($(CONFIG_VP9_ENCODER),yes)
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += niklas_1280_720_30.yuv
+endif
+# BBB VP9 streams
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_426x240_tile_1x1_180kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_640x360_tile_1x2_337kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_854x480_tile_1x2_651kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_1280x720_tile_1x4_1310kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_1920x1080_tile_1x1_2581kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_1920x1080_tile_1x4_2586kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-bbb_1920x1080_tile_1x4_fpm_2304kbps.webm
+# Sintel VP9 streams
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_426x182_tile_1x1_171kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_640x272_tile_1x2_318kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_854x364_tile_1x2_621kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_1280x546_tile_1x4_1257kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-sintel_1920x818_tile_1x4_fpm_2279kbps.webm
+# TOS VP9 streams
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_426x178_tile_1x1_181kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_640x266_tile_1x2_336kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_854x356_tile_1x2_656kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_854x356_tile_1x2_fpm_546kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_1280x534_tile_1x4_1306kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_1280x534_tile_1x4_fpm_952kbps.webm
+LIBVPX_TEST_DATA-$(CONFIG_VP9_DECODER) += vp90-2-tos_1920x800_tile_1x4_fpm_2335kbps.webm
+endif # CONFIG_DECODE_PERF_TESTS
+
+ifeq ($(CONFIG_ENCODE_PERF_TESTS),yes)
+LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += desktop_640_360_30.yuv
+LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += kirland_640_480_30.yuv
+LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += macmarcomoving_640_480_30.yuv
+LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += macmarcostationary_640_480_30.yuv
+LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += niklas_640_480_30.yuv
+LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += tacomanarrows_640_480_30.yuv
+LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += tacomasmallcameramovement_640_480_30.yuv
+LIBVPX_TEST_DATA-$(CONFIG_VP9_ENCODER) += thaloundeskmtg_640_480_30.yuv
+endif # CONFIG_ENCODE_PERF_TESTS
e3ab35d4316c5e81325c50f5236ceca4bc0d35df vp90-2-15-segkey.webm.md5
9b7ca2cac09d34c4a5d296c1900f93b1e2f69d0d vp90-2-15-segkey_adpq.webm
8f46ba5f785d0c2170591a153e0d0d146a7c8090 vp90-2-15-segkey_adpq.webm.md5
+698a6910a97486b833073ef0c0b18d75dce57ee8 vp90-2-16-intra-only.webm
+5661b0168752969f055eec37b05fa9fa947dc7eb vp90-2-16-intra-only.webm.md5
+c01bb7938f9a9f25e0c37afdec2f2fb73b6cc7fa vp90-2-17-show-existing-frame.webm
+cc75f351818b9a619818f5cc77b9bc013d0c1e11 vp90-2-17-show-existing-frame.webm.md5
+013708bd043f0821a3e56fb8404d82e7a0c7af6c vp91-2-04-yuv422.webm
+1e58a7d23adad830a672f1733c9d2ae17890d59c vp91-2-04-yuv422.webm.md5
+25d78f28948789d159a9453ebc13048b818251b1 vp91-2-04-yuv440.webm
+81b3870b27a7f695ef6a43e87ab04bbdb5aee2f5 vp91-2-04-yuv440.webm.md5
0321d507ce62dedc8a51b4e9011f7a19aed9c3dc vp91-2-04-yuv444.webm
367e423dd41fdb49aa028574a2cfec5c2f325c5c vp91-2-04-yuv444.webm.md5
-76024eb753cdac6a5e5703aaea189d35c3c30ac7 invalid-vp90-2-00-quantizer-00.webm.ivf.s5861_r01-05_b6-.ivf
-d3964f9dad9f60363c81b688324d95b4ec7c8038 invalid-vp90-2-00-quantizer-00.webm.ivf.s5861_r01-05_b6-.ivf.res
-83f50908c8dc0ef8760595447a2ff7727489542e invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf
-456d1493e52d32a5c30edf44a27debc1fa6b253a invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-.ivf.res
-c123d1f9f02fb4143abb5e271916e3a3080de8f6 invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf
-456d1493e52d32a5c30edf44a27debc1fa6b253a invalid-vp90-2-00-quantizer-11.webm.ivf.s52984_r01-05_b6-z.ivf.res
+f77673b566f686853adefe0c578ad251b7241281 vp92-2-20-10bit-yuv420.webm
+abdedfaddacbbe1a15ac7a54e86360f03629fb7a vp92-2-20-10bit-yuv420.webm.md5
+0c2c355a1b17b28537c5a3b19997c8783b69f1af vp92-2-20-12bit-yuv420.webm
+afb2c2798703e039189b0a15c8ac5685aa51d33f vp92-2-20-12bit-yuv420.webm.md5
+0d661bc6e83da33238981481efd1b1802d323d88 vp93-2-20-10bit-yuv422.webm
+10318907063db22eb02fad332556edbbecd443cc vp93-2-20-10bit-yuv422.webm.md5
+ebc6be2f7511a0bdeac0b18c67f84ba7168839c7 vp93-2-20-12bit-yuv422.webm
+235232267c6a1dc8a11e45d600f1c99d2f8b42d4 vp93-2-20-12bit-yuv422.webm.md5
+f76b11b26d4beaceac7a7e7729dd5054d095164f vp93-2-20-10bit-yuv440.webm
+757b33b5ac969c5999999488a731a3d1e6d9fb88 vp93-2-20-10bit-yuv440.webm.md5
+df8807dbd29bec795c2db9c3c18e511fbb988101 vp93-2-20-12bit-yuv440.webm
+ea4100930c3f59a1c23fbb33ab0ea01151cae159 vp93-2-20-12bit-yuv440.webm.md5
+189c1b5f404ff41a50a7fc96341085ad541314a9 vp93-2-20-10bit-yuv444.webm
+2dd0177c2f9d970b6e698892634c653630f91f40 vp93-2-20-10bit-yuv444.webm.md5
+bd44cf6e1c27343e3639df9ac21346aedd5d6973 vp93-2-20-12bit-yuv444.webm
+f36e5bdf5ec3213f32c0ddc82f95d82c5133bf27 vp93-2-20-12bit-yuv444.webm.md5
+eb438c6540eb429f74404eedfa3228d409c57874 desktop_640_360_30.yuv
+89e70ebd22c27d275fe14dc2f1a41841a6d8b9ab kirland_640_480_30.yuv
+33c533192759e5bb4f07abfbac389dc259db4686 macmarcomoving_640_480_30.yuv
+8bfaab121080821b8f03b23467911e59ec59b8fe macmarcostationary_640_480_30.yuv
+70894878d916a599842d9ad0dcd24e10c13e5467 niklas_640_480_30.yuv
+8784b6df2d8cc946195a90ac00540500d2e522e4 tacomanarrows_640_480_30.yuv
+edd86a1f5e62fd9da9a9d46078247759c2638009 tacomasmallcameramovement_640_480_30.yuv
+9a70e8b7d14fba9234d0e51dce876635413ce444 thaloundeskmtg_640_480_30.yuv
+e7d315dbf4f3928779e0dc624311196d44491d32 niklas_1280_720_30.yuv
+c77e4a26616add298a05dd5d12397be22c0e40c5 vp90-2-18-resize.ivf
+c12918cf0a716417fba2de35c3fc5ab90e52dfce vp90-2-18-resize.ivf.md5
+717da707afcaa1f692ff1946f291054eb75a4f06 screendata.y4m
+b7c1296630cdf1a7ef493d15ff4f9eb2999202f6 invalid-vp90-2-08-tile_1x2_frame_parallel.webm.ivf.s47039_r01-05_b6-.ivf
+0a3884edb3fd8f9d9b500223e650f7de257b67d8 invalid-vp90-2-08-tile_1x2_frame_parallel.webm.ivf.s47039_r01-05_b6-.ivf.res
+359e138dfb66863828397b77000ea7a83c844d02 invalid-vp90-2-08-tile_1x8_frame_parallel.webm.ivf.s288_r01-05_b6-.ivf
+bbd33de01c17b165b4ce00308e8a19a942023ab8 invalid-vp90-2-08-tile_1x8_frame_parallel.webm.ivf.s288_r01-05_b6-.ivf.res
+fac89b5735be8a86b0dc05159f996a5c3208ae32 invalid-vp90-2-09-aq2.webm.ivf.s3984_r01-05_b6-.v2.ivf
+0a3884edb3fd8f9d9b500223e650f7de257b67d8 invalid-vp90-2-09-aq2.webm.ivf.s3984_r01-05_b6-.v2.ivf.res
+4506dfdcdf8ee4250924b075a0dcf1f070f72e5a invalid-vp90-2-09-subpixel-00.ivf.s19552_r01-05_b6-.v2.ivf
+bcdedaf168ac225575468fda77502d2dc9fd5baa invalid-vp90-2-09-subpixel-00.ivf.s19552_r01-05_b6-.v2.ivf.res
+65e93f9653bcf65b022f7d225268d1a90a76e7bb vp90-2-19-skip.webm
+368dccdde5288c13c25695d2eacdc7402cadf613 vp90-2-19-skip.webm.md5
+ffe460282df2b0e7d4603c2158653ad96f574b02 vp90-2-19-skip-01.webm
+bd21bc9eda4a4a36b221d71ede3a139fc3c7bd85 vp90-2-19-skip-01.webm.md5
+178f5bd239e38cc1cc2657a7a5e1a9f52ad2d3fe vp90-2-19-skip-02.webm
+9020d5e260bd7df08e2b3d4b86f8623cee3daea2 vp90-2-19-skip-02.webm.md5
+b03c408cf23158638da18dbc3323b99a1635c68a invalid-vp90-2-12-droppable_1.ivf.s3676_r01-05_b6-.ivf
+0a3884edb3fd8f9d9b500223e650f7de257b67d8 invalid-vp90-2-12-droppable_1.ivf.s3676_r01-05_b6-.ivf.res
+5e67e24e7f53fd189e565513cef8519b1bd6c712 invalid-vp90-2-05-resize.ivf.s59293_r01-05_b6-.ivf
+741158f67c0d9d23726624d06bdc482ad368afc9 invalid-vp90-2-05-resize.ivf.s59293_r01-05_b6-.ivf.res
+8b1f7bf7e86c0976d277f60e8fcd9539e75a079a invalid-vp90-2-09-subpixel-00.ivf.s20492_r01-05_b6-.v2.ivf
+9c6bdf048fb2e66f07d4b4db5b32e6f303bd6109 invalid-vp90-2-09-subpixel-00.ivf.s20492_r01-05_b6-.v2.ivf.res
+552e372e9b78127389fb06b34545df2cec15ba6d invalid-vp91-2-mixedrefcsp-444to420.ivf
+a61774cf03fc584bd9f0904fc145253bb8ea6c4c invalid-vp91-2-mixedrefcsp-444to420.ivf.res
+812d05a64a0d83c1b504d0519927ddc5a2cdb273 invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf
+1e472baaf5f6113459f0399a38a5a5e68d17799d invalid-vp90-2-12-droppable_1.ivf.s73804_r01-05_b6-.ivf.res
+ f97088c7359fc8d3d5aa5eafe57bc7308b3ee124 vp90-2-20-big_superframe-01.webm
+ 47d7d409785afa33b123376de0c907336e6c7bd7 vp90-2-20-big_superframe-01.webm.md5
+ 65ade6d2786209582c50d34cfe22b3cdb033abaf vp90-2-20-big_superframe-02.webm
+ 7c0ed8d04c4d06c5411dd2e5de2411d37f092db5 vp90-2-20-big_superframe-02.webm.md5
+ 667ec8718c982aef6be07eb94f083c2efb9d2d16 vp90-2-07-frame_parallel-1.webm
+ bfc82bf848e9c05020d61e3ffc1e62f25df81d19 vp90-2-07-frame_parallel-1.webm.md5
+ efd5a51d175cfdacd169ed23477729dc558030dc invalid-vp90-2-07-frame_parallel-1.webm
+ 9f912712ec418be69adb910e2ca886a63c4cec08 invalid-vp90-2-07-frame_parallel-2.webm
-445f5a53ca9555341852997ccdd480a51540bd14 invalid-vp90-2-07-frame_parallel-3.webm
++445f5a53ca9555341852997ccdd480a51540bd14 invalid-vp90-2-07-frame_parallel-3.webm
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += cq_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP8_ENCODER) += keyframe_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += byte_alignment_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += external_frame_buffer_test.cc
+LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += invalid_file_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += user_priv_test.cc
+ LIBVPX_TEST_SRCS-$(CONFIG_VP9_DECODER) += vp9_frame_parallel_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += active_map_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += borders_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += cpu_speed_test.cc
namespace {
-typedef std::tr1::tuple<int, int, const char *> DecodeParam;
+ enum DecodeMode {
+ kSerialMode,
+ kFrameParallMode
+ };
+
+ const int kDecodeMode = 0;
+ const int kThreads = 1;
+ const int kFileName = 2;
+
++typedef std::tr1::tuple<int, int, const char*> DecodeParam;
+
class TestVectorTest : public ::libvpx_test::DecoderTest,
- public ::libvpx_test::CodecTestWithParam<const char*> {
+ public ::libvpx_test::CodecTestWithParam<DecodeParam> {
protected:
- TestVectorTest() : DecoderTest(GET_PARAM(0)), md5_file_(NULL) {}
+ TestVectorTest()
+ : DecoderTest(GET_PARAM(0)),
+ md5_file_(NULL) {
+ }
virtual ~TestVectorTest() {
if (md5_file_)
"vp90-2-14-resize-fp-tiles-8-16.webm", "vp90-2-14-resize-fp-tiles-8-1.webm",
"vp90-2-14-resize-fp-tiles-8-2.webm", "vp90-2-14-resize-fp-tiles-8-4.webm",
"vp90-2-15-segkey.webm", "vp90-2-15-segkey_adpq.webm",
+ "vp90-2-16-intra-only.webm", "vp90-2-17-show-existing-frame.webm",
+ "vp90-2-18-resize.ivf", "vp90-2-19-skip.webm",
+ "vp90-2-19-skip-01.webm", "vp90-2-19-skip-02.webm",
"vp91-2-04-yuv444.webm",
+ "vp91-2-04-yuv422.webm", "vp91-2-04-yuv440.webm",
+#if CONFIG_VP9_HIGHBITDEPTH
+ "vp92-2-20-10bit-yuv420.webm", "vp92-2-20-12bit-yuv420.webm",
+ "vp93-2-20-10bit-yuv422.webm", "vp93-2-20-12bit-yuv422.webm",
+ "vp93-2-20-10bit-yuv440.webm", "vp93-2-20-12bit-yuv440.webm",
+ "vp93-2-20-10bit-yuv444.webm", "vp93-2-20-12bit-yuv444.webm",
+#endif // CONFIG_VP9_HIGHBITDEPTH`
+ "vp90-2-20-big_superframe-01.webm", "vp90-2-20-big_superframe-02.webm",
};
const int kNumVP9TestVectors = NELEMENTS(kVP9TestVectors);
#endif // CONFIG_VP9_DECODER
--- /dev/null
- TEST_P(Vp9EncoderParmsGetToDecoder, BitstreamParms) {
+/*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "test/codec_factory.h"
+#include "test/encode_test_driver.h"
+#include "test/y4m_video_source.h"
+#include "test/yuv_video_source.h"
+#include "test/util.h"
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "vp9/decoder/vp9_decoder.h"
+
+typedef vpx_codec_stream_info_t vp9_stream_info_t;
+struct vpx_codec_alg_priv {
+ vpx_codec_priv_t base;
+ vpx_codec_dec_cfg_t cfg;
+ vp9_stream_info_t si;
+ struct VP9Decoder *pbi;
+ int postproc_cfg_set;
+ vp8_postproc_cfg_t postproc_cfg;
+ vpx_decrypt_cb decrypt_cb;
+ void *decrypt_state;
+ vpx_image_t img;
+ int img_avail;
+ int flushed;
+ int invert_tile_order;
+ int frame_parallel_decode;
+
+ // External frame buffer info to save for VP9 common.
+ void *ext_priv; // Private data associated with the external frame buffers.
+ vpx_get_frame_buffer_cb_fn_t get_ext_fb_cb;
+ vpx_release_frame_buffer_cb_fn_t release_ext_fb_cb;
+};
+
+static vpx_codec_alg_priv_t *get_alg_priv(vpx_codec_ctx_t *ctx) {
+ return (vpx_codec_alg_priv_t *)ctx->priv;
+}
+
+namespace {
+
+const unsigned int kFramerate = 50;
+const int kCpuUsed = 2;
+
+struct EncodePerfTestVideo {
+ const char *name;
+ uint32_t width;
+ uint32_t height;
+ uint32_t bitrate;
+ int frames;
+};
+
+const EncodePerfTestVideo kVP9EncodePerfTestVectors[] = {
+ {"niklas_1280_720_30.yuv", 1280, 720, 600, 10},
+};
+
+struct EncodeParameters {
+ int32_t tile_rows;
+ int32_t tile_cols;
+ int32_t lossless;
+ int32_t error_resilient;
+ int32_t frame_parallel;
+ vpx_color_space_t cs;
+ // TODO(JBB): quantizers / bitrate
+};
+
+const EncodeParameters kVP9EncodeParameterSet[] = {
+ {0, 0, 0, 1, 0, VPX_CS_BT_601},
+ {0, 0, 0, 0, 0, VPX_CS_BT_709},
+ {0, 0, 1, 0, 0, VPX_CS_BT_2020},
+ {0, 2, 0, 0, 1, VPX_CS_UNKNOWN},
+ // TODO(JBB): Test profiles (requires more work).
+};
+
+int is_extension_y4m(const char *filename) {
+ const char *dot = strrchr(filename, '.');
+ if (!dot || dot == filename)
+ return 0;
+ else
+ return !strcmp(dot, ".y4m");
+}
+
+class Vp9EncoderParmsGetToDecoder
+ : public ::libvpx_test::EncoderTest,
+ public ::libvpx_test::CodecTestWith2Params<EncodeParameters, \
+ EncodePerfTestVideo> {
+ protected:
+ Vp9EncoderParmsGetToDecoder()
+ : EncoderTest(GET_PARAM(0)),
+ encode_parms(GET_PARAM(1)) {
+ }
+
+ virtual ~Vp9EncoderParmsGetToDecoder() {}
+
+ virtual void SetUp() {
+ InitializeConfig();
+ SetMode(::libvpx_test::kTwoPassGood);
+ cfg_.g_lag_in_frames = 25;
+ cfg_.g_error_resilient = encode_parms.error_resilient;
+ dec_cfg_.threads = 4;
+ test_video_ = GET_PARAM(2);
+ cfg_.rc_target_bitrate = test_video_.bitrate;
+ }
+
+ virtual void PreEncodeFrameHook(::libvpx_test::VideoSource *video,
+ ::libvpx_test::Encoder *encoder) {
+ if (video->frame() == 1) {
+ encoder->Control(VP9E_SET_COLOR_SPACE, encode_parms.cs);
+ encoder->Control(VP9E_SET_LOSSLESS, encode_parms.lossless);
+ encoder->Control(VP9E_SET_FRAME_PARALLEL_DECODING,
+ encode_parms.frame_parallel);
+ encoder->Control(VP9E_SET_TILE_ROWS, encode_parms.tile_rows);
+ encoder->Control(VP9E_SET_TILE_COLUMNS, encode_parms.tile_cols);
+ encoder->Control(VP8E_SET_CPUUSED, kCpuUsed);
+ encoder->Control(VP8E_SET_ENABLEAUTOALTREF, 1);
+ encoder->Control(VP8E_SET_ARNR_MAXFRAMES, 7);
+ encoder->Control(VP8E_SET_ARNR_STRENGTH, 5);
+ encoder->Control(VP8E_SET_ARNR_TYPE, 3);
+ }
+ }
+
+ virtual bool HandleDecodeResult(const vpx_codec_err_t res_dec,
+ const libvpx_test::VideoSource& video,
+ libvpx_test::Decoder *decoder) {
+ vpx_codec_ctx_t* vp9_decoder = decoder->GetDecoder();
+ vpx_codec_alg_priv_t* priv =
+ (vpx_codec_alg_priv_t*) get_alg_priv(vp9_decoder);
+
+ VP9Decoder* pbi = priv->pbi;
+ VP9_COMMON* common = &pbi->common;
+
+ if (encode_parms.lossless) {
+ EXPECT_EQ(common->base_qindex, 0);
+ EXPECT_EQ(common->y_dc_delta_q, 0);
+ EXPECT_EQ(common->uv_dc_delta_q, 0);
+ EXPECT_EQ(common->uv_ac_delta_q, 0);
+ EXPECT_EQ(common->tx_mode, ONLY_4X4);
+ }
+ EXPECT_EQ(common->error_resilient_mode, encode_parms.error_resilient);
+ if (encode_parms.error_resilient) {
+ EXPECT_EQ(common->frame_parallel_decoding_mode, 1);
+ EXPECT_EQ(common->use_prev_frame_mvs, 0);
+ } else {
+ EXPECT_EQ(common->frame_parallel_decoding_mode,
+ encode_parms.frame_parallel);
+ }
+ EXPECT_EQ(common->color_space, encode_parms.cs);
+ EXPECT_EQ(common->log2_tile_cols, encode_parms.tile_cols);
+ EXPECT_EQ(common->log2_tile_rows, encode_parms.tile_rows);
+
+ EXPECT_EQ(VPX_CODEC_OK, res_dec) << decoder->DecodeError();
+ return VPX_CODEC_OK == res_dec;
+ }
+
+ EncodePerfTestVideo test_video_;
+
+ private:
+ EncodeParameters encode_parms;
+};
+
++// TODO(hkuang): This test conflicts with frame parallel decode. So disable it
++// for now until fix.
++TEST_P(Vp9EncoderParmsGetToDecoder, DISABLED_BitstreamParms) {
+ init_flags_ = VPX_CODEC_USE_PSNR;
+
+ libvpx_test::VideoSource *video;
+ if (is_extension_y4m(test_video_.name)) {
+ video = new libvpx_test::Y4mVideoSource(test_video_.name,
+ 0, test_video_.frames);
+ } else {
+ video = new libvpx_test::YUVVideoSource(test_video_.name,
+ VPX_IMG_FMT_I420,
+ test_video_.width,
+ test_video_.height,
+ kFramerate, 1, 0,
+ test_video_.frames);
+ }
+
+ ASSERT_NO_FATAL_FAILURE(RunLoop(video));
+ delete(video);
+}
+
+VP9_INSTANTIATE_TEST_CASE(
+ Vp9EncoderParmsGetToDecoder,
+ ::testing::ValuesIn(kVP9EncodeParameterSet),
+ ::testing::ValuesIn(kVP9EncodePerfTestVectors));
+
+} // namespace
--- /dev/null
- // one key frame for every ten frames. The 13th frame has corrupted data.
+ /*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+ #include <cstdio>
+ #include <cstdlib>
+ #include <string>
+ #include "third_party/googletest/src/include/gtest/gtest.h"
+ #include "./vpx_config.h"
+ #include "test/codec_factory.h"
+ #include "test/decode_test_driver.h"
+ #include "test/ivf_video_source.h"
+ #include "test/md5_helper.h"
+ #include "test/util.h"
+ #if CONFIG_WEBM_IO
+ #include "test/webm_video_source.h"
+ #endif
+ #include "vpx_mem/vpx_mem.h"
+
+ namespace {
+
+ using std::string;
+
+ #if CONFIG_WEBM_IO
+
+ struct FileList {
+ const char *name;
+ // md5 sum for decoded frames which does not include skipped frames.
+ const char *expected_md5;
+ const int pause_frame_num;
+ };
+
+ // Decodes |filename| with |num_threads|. Pause at the specified frame_num,
+ // seek to next key frame and then continue decoding until the end. Return
+ // the md5 of the decoded frames which does not include skipped frames.
+ string DecodeFile(const string &filename, int num_threads, int pause_num) {
+ libvpx_test::WebMVideoSource video(filename);
+ video.Init();
+ int in_frames = 0;
+ int out_frames = 0;
+
+ vpx_codec_dec_cfg_t cfg = {0};
+ cfg.threads = num_threads;
+ vpx_codec_flags_t flags = 0;
+ flags |= VPX_CODEC_USE_FRAME_THREADING;
+ libvpx_test::VP9Decoder decoder(cfg, flags, 0);
+
+ libvpx_test::MD5 md5;
+ video.Begin();
+
+ do {
+ ++in_frames;
+ const vpx_codec_err_t res =
+ decoder.DecodeFrame(video.cxdata(), video.frame_size());
+ if (res != VPX_CODEC_OK) {
+ EXPECT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
+ break;
+ }
+
+ // Pause at specified frame number.
+ if (in_frames == pause_num) {
+ // Flush the decoder and then seek to next key frame.
+ decoder.DecodeFrame(NULL, 0);
+ video.SeekToNextKeyFrame();
+ } else {
+ video.Next();
+ }
+
+ // Flush the decoder at the end of the video.
+ if (!video.cxdata())
+ decoder.DecodeFrame(NULL, 0);
+
+ libvpx_test::DxDataIterator dec_iter = decoder.GetDxData();
+ const vpx_image_t *img;
+
+ // Get decompressed data
+ while ((img = dec_iter.Next())) {
+ ++out_frames;
+ md5.Add(img);
+ }
+ } while (video.cxdata() != NULL);
+
+ EXPECT_EQ(in_frames, out_frames) <<
+ "Input frame count does not match output frame count";
+
+ return string(md5.Get());
+ }
+
+ void DecodeFiles(const FileList files[]) {
+ for (const FileList *iter = files; iter->name != NULL; ++iter) {
+ SCOPED_TRACE(iter->name);
+ for (int t = 2; t <= 8; ++t) {
+ EXPECT_EQ(iter->expected_md5,
+ DecodeFile(iter->name, t, iter->pause_frame_num))
+ << "threads = " << t;
+ }
+ }
+ }
+
+ TEST(VP9MultiThreadedFrameParallel, PauseSeekResume) {
+ // vp90-2-07-frame_parallel-1.webm is a 40 frame video file with
+ // one key frame for every ten frames.
+ static const FileList files[] = {
+ { "vp90-2-07-frame_parallel-1.webm",
+ "6ea7c3875d67252e7caf2bc6e75b36b1", 6},
+ { "vp90-2-07-frame_parallel-1.webm",
+ "4bb634160c7356a8d7d4299b6dc83a45", 12},
+ { "vp90-2-07-frame_parallel-1.webm",
+ "89772591e6ef461f9fa754f916c78ed8", 26},
+ { NULL, NULL, 0},
+ };
+ DecodeFiles(files);
+ }
+
+ struct InvalidFileList {
+ const char *name;
+ // md5 sum for decoded frames which does not include corrupted frames.
+ const char *expected_md5;
+ // Expected number of decoded frames which does not include corrupted frames.
+ const int expected_frame_count;
+ };
+
+ // Decodes |filename| with |num_threads|. Return the md5 of the decoded
+ // frames which does not include corrupted frames.
+ string DecodeInvalidFile(const string &filename, int num_threads,
+ int expected_frame_count) {
+ libvpx_test::WebMVideoSource video(filename);
+ video.Init();
+
+ vpx_codec_dec_cfg_t cfg = vpx_codec_dec_cfg_t();
+ cfg.threads = num_threads;
+ const vpx_codec_flags_t flags = VPX_CODEC_USE_FRAME_THREADING;
+ libvpx_test::VP9Decoder decoder(cfg, flags, 0);
+
+ libvpx_test::MD5 md5;
+ video.Begin();
+
+ int out_frames = 0;
+ do {
+ const vpx_codec_err_t res =
+ decoder.DecodeFrame(video.cxdata(), video.frame_size());
+ // TODO(hkuang): frame parallel mode should return an error on corruption.
+ if (res != VPX_CODEC_OK) {
+ EXPECT_EQ(VPX_CODEC_OK, res) << decoder.DecodeError();
+ break;
+ }
+
+ video.Next();
+
+ // Flush the decoder at the end of the video.
+ if (!video.cxdata())
+ decoder.DecodeFrame(NULL, 0);
+
+ libvpx_test::DxDataIterator dec_iter = decoder.GetDxData();
+ const vpx_image_t *img;
+
+ // Get decompressed data
+ while ((img = dec_iter.Next())) {
+ ++out_frames;
+ md5.Add(img);
+ }
+ } while (video.cxdata() != NULL);
+
+ EXPECT_EQ(expected_frame_count, out_frames) <<
+ "Input frame count does not match expected output frame count";
+
+ return string(md5.Get());
+ }
+
+ void DecodeInvalidFiles(const InvalidFileList files[]) {
+ for (const InvalidFileList *iter = files; iter->name != NULL; ++iter) {
+ SCOPED_TRACE(iter->name);
+ for (int t = 2; t <= 8; ++t) {
+ EXPECT_EQ(iter->expected_md5,
+ DecodeInvalidFile(iter->name, t, iter->expected_frame_count))
+ << "threads = " << t;
+ }
+ }
+ }
+
+ TEST(VP9MultiThreadedFrameParallel, InvalidFileTest) {
+ static const InvalidFileList files[] = {
+ // invalid-vp90-2-07-frame_parallel-1.webm is a 40 frame video file with
+ // one key frame for every ten frames. The 11th frame has corrupted data.
+ { "invalid-vp90-2-07-frame_parallel-1.webm",
+ "0549d0f45f60deaef8eb708e6c0eb6cb", 30},
+ // invalid-vp90-2-07-frame_parallel-2.webm is a 40 frame video file with
+ // one key frame for every ten frames. The 1st and 31st frames have
+ // corrupted data.
+ { "invalid-vp90-2-07-frame_parallel-2.webm",
+ "6a1f3cf6f9e7a364212fadb9580d525e", 20},
+ // invalid-vp90-2-07-frame_parallel-3.webm is a 40 frame video file with
- "a567c8259d27ad32b1b7f58db5ac89dd", 32},
++ // one key frame for every ten frames. The 5th and 13th frames have
++ // corrupted data.
+ { "invalid-vp90-2-07-frame_parallel-3.webm",
++ "8256544308de926b0681e04685b98677", 27},
+ { NULL, NULL, 0},
+ };
+ DecodeInvalidFiles(files);
+ }
+
+ #endif // CONFIG_WEBM_IO
+ } // namespace
#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/common/vp9_systemdependent.h"
-static INLINE void alloc_mi_array(VP9_COMMON *cm, int mi_size, int idx) {
- CHECK_MEM_ERROR(cm, cm->mip_array[idx],
- vpx_calloc(mi_size, sizeof(*cm->mip_array[0])));
- CHECK_MEM_ERROR(cm, cm->mi_grid_base_array[idx],
- vpx_calloc(mi_size, sizeof(*cm->mi_grid_base_array[0])));
-}
-
-static void clear_mi_border(const VP9_COMMON *cm, MODE_INFO *mi) {
- int i;
-
- // Top border row
- vpx_memset(mi, 0, sizeof(*mi) * cm->mi_stride);
-
- // Left border column
- for (i = 1; i < cm->mi_rows + 1; ++i)
- vpx_memset(&mi[i * cm->mi_stride], 0, sizeof(*mi));
-}
+ // TODO(hkuang): Don't need to lock the whole pool after implementing atomic
+ // frame reference count.
+ void lock_buffer_pool(BufferPool *const pool) {
+ #if CONFIG_MULTITHREAD
+ pthread_mutex_lock(&pool->pool_mutex);
+ #else
+ (void)pool;
+ #endif
+ }
+
+ void unlock_buffer_pool(BufferPool *const pool) {
+ #if CONFIG_MULTITHREAD
+ pthread_mutex_unlock(&pool->pool_mutex);
+ #else
+ (void)pool;
+ #endif
+ }
+
+void vp9_set_mb_mi(VP9_COMMON *cm, int width, int height) {
+ const int aligned_width = ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2);
+ const int aligned_height = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2);
-static void set_mb_mi(VP9_COMMON *cm, int aligned_width, int aligned_height) {
cm->mi_cols = aligned_width >> MI_SIZE_LOG2;
cm->mi_rows = aligned_height >> MI_SIZE_LOG2;
- cm->mi_stride = cm->mi_cols + MI_BLOCK_SIZE;
+ cm->mi_stride = calc_mi_size(cm->mi_cols);
cm->mb_cols = (cm->mi_cols + 1) >> 1;
cm->mb_rows = (cm->mi_rows + 1) >> 1;
cm->MBs = cm->mb_rows * cm->mb_cols;
}
-static void setup_mi(VP9_COMMON *cm) {
- cm->mi = cm->mip + cm->mi_stride + 1;
- cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
- cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
- cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
-
- vpx_memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
-
- vpx_memset(cm->mi_grid_base, 0, cm->mi_stride * (cm->mi_rows + 1) *
- sizeof(*cm->mi_grid_base));
-
- // Only clear mi border in non frame-parallel decode. In frame-parallel
- // decode, prev_mip is managed by previous decoding thread. While in
- // non frame-parallel decode, prev_mip and mip are both managed by
- // current decoding thread.
- if (!cm->frame_parallel_decode)
- clear_mi_border(cm, cm->prev_mip);
-}
-
-static int alloc_mi(VP9_COMMON *cm, int mi_size) {
- int i;
-
- for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
- // Delay reallocation as another thread is accessing prev_mi.
- if (cm->frame_parallel_decode && i == cm->prev_mi_idx) {
- cm->update_prev_mi = 1;
- continue;
- }
- alloc_mi_array(cm, mi_size, i);
- }
-
- cm->mip = cm->mip_array[cm->mi_idx];
- cm->mi_grid_base = cm->mi_grid_base_array[cm->mi_idx];
-
- if (!cm->frame_parallel_decode) {
- cm->mi_idx = 0;
- cm->prev_mi_idx = 1;
- // In frame-parallel decode, prev_mip comes from another thread,
- // so current decoding thread should not touch it.
- cm->prev_mip = cm->mip_array[cm->prev_mi_idx];
- cm->prev_mi_grid_base = cm->mi_grid_base_array[cm->prev_mi_idx];
- }
-
- return 0;
-}
-
-static void free_mi(VP9_COMMON *cm, int decode_done) {
- int i;
-
- for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
- if (cm->frame_parallel_decode && i == cm->prev_mi_idx && !decode_done)
- continue;
- vpx_free(cm->mip_array[i]);
- cm->mip_array[i] = NULL;
- vpx_free(cm->mi_grid_base_array[i]);
- cm->mi_grid_base_array[i] = NULL;
- }
-
- cm->mip = NULL;
- cm->mi_grid_base = NULL;
-
- if (!cm->frame_parallel_decode) {
- cm->prev_mip = NULL;
- cm->prev_mi_grid_base = NULL;
- }
-}
-
+ static int alloc_seg_map(VP9_COMMON *cm, int seg_map_size) {
+ int i;
+
+ for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
+ cm->seg_map_array[i] = (uint8_t *)vpx_calloc(seg_map_size, 1);
+ if (cm->seg_map_array[i] == NULL)
+ return 1;
+ }
+
+ // Init the index.
+ cm->seg_map_idx = 0;
+ cm->prev_seg_map_idx = 1;
+
+ cm->current_frame_seg_map = cm->seg_map_array[cm->seg_map_idx];
+
+ if (!cm->frame_parallel_decode) {
+ cm->last_frame_seg_map = cm->seg_map_array[cm->prev_seg_map_idx];
+ }
+
+ return 0;
+ }
+
+ static void free_seg_map(VP9_COMMON *cm) {
+ int i;
+
+ for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
+ vpx_free(cm->seg_map_array[i]);
+ cm->seg_map_array[i] = NULL;
+ }
+
+ cm->current_frame_seg_map = NULL;
+
+ if (!cm->frame_parallel_decode) {
+ cm->last_frame_seg_map = NULL;
+ }
+ }
+
-void vp9_free_frame_buffers(VP9_COMMON *cm) {
- int i;
+void vp9_free_ref_frame_buffers(VP9_COMMON *cm) {
+ BufferPool *const pool = cm->buffer_pool;
+ int i;
for (i = 0; i < FRAME_BUFFERS; ++i) {
- if (cm->frame_bufs[i].ref_count > 0 &&
- cm->frame_bufs[i].raw_frame_buffer.data != NULL) {
- cm->release_fb_cb(cm->cb_priv, &cm->frame_bufs[i].raw_frame_buffer);
- cm->frame_bufs[i].ref_count = 0;
- vp9_free_frame_buffer(&pool->frame_bufs[i].buf);
-
+ if (pool->frame_bufs[i].ref_count > 0 &&
+ pool->frame_bufs[i].raw_frame_buffer.data != NULL) {
+ pool->release_fb_cb(pool->cb_priv, &pool->frame_bufs[i].raw_frame_buffer);
+ pool->frame_bufs[i].ref_count = 0;
}
- vpx_free(cm->frame_bufs[i].mvs);
- cm->frame_bufs[i].mvs = NULL;
- vp9_free_frame_buffer(&cm->frame_bufs[i].buf);
++ vpx_free(pool->frame_bufs[i].mvs);
++ pool->frame_bufs[i].mvs = NULL;
++ vp9_free_frame_buffer(&pool->frame_bufs[i].buf);
}
+#if CONFIG_VP9_POSTPROC
vp9_free_frame_buffer(&cm->post_proc_buffer);
+ vp9_free_frame_buffer(&cm->post_proc_buffer_int);
+#endif
}
void vp9_free_context_buffers(VP9_COMMON *cm) {
- free_mi(cm, 1);
+ cm->free_mi(cm);
- vpx_free(cm->last_frame_seg_map);
- cm->last_frame_seg_map = NULL;
+ free_seg_map(cm);
-
vpx_free(cm->above_context);
cm->above_context = NULL;
-
vpx_free(cm->above_seg_context);
cm->above_seg_context = NULL;
}
-int vp9_resize_frame_buffers(VP9_COMMON *cm, int width, int height) {
- const int aligned_width = ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2);
- const int aligned_height = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2);
-#if CONFIG_INTERNAL_STATS || CONFIG_VP9_POSTPROC
- const int ss_x = cm->subsampling_x;
- const int ss_y = cm->subsampling_y;
-
- // TODO(agrange): this should be conditionally allocated.
- if (vp9_realloc_frame_buffer(&cm->post_proc_buffer, width, height, ss_x, ss_y,
- VP9_DEC_BORDER_IN_PIXELS, NULL, NULL, NULL) < 0)
- goto fail;
-#endif
-
- set_mb_mi(cm, aligned_width, aligned_height);
+int vp9_alloc_context_buffers(VP9_COMMON *cm, int width, int height) {
+ vp9_free_context_buffers(cm);
- free_mi(cm, 0);
- if (alloc_mi(cm, cm->mi_stride * (cm->mi_rows + MI_BLOCK_SIZE)))
+ vp9_set_mb_mi(cm, width, height);
+ if (cm->alloc_mi(cm, cm->mi_stride * calc_mi_size(cm->mi_rows)))
goto fail;
- cm->last_frame_seg_map = (uint8_t *)vpx_calloc(cm->mi_rows * cm->mi_cols, 1);
- if (!cm->last_frame_seg_map) goto fail;
- setup_mi(cm);
-
+ // Create the segmentation map structure and set to 0.
+ free_seg_map(cm);
+ if (alloc_seg_map(cm, cm->mi_rows * cm->mi_cols))
+ goto fail;
- vpx_free(cm->above_context);
- cm->above_context =
- (ENTROPY_CONTEXT *)vpx_calloc(2 * mi_cols_aligned_to_sb(cm->mi_cols) *
- MAX_MB_PLANE,
- sizeof(*cm->above_context));
- if (!cm->above_context)
- goto fail;
+ cm->above_context = (ENTROPY_CONTEXT *)vpx_calloc(
+ 2 * mi_cols_aligned_to_sb(cm->mi_cols) * MAX_MB_PLANE,
+ sizeof(*cm->above_context));
+ if (!cm->above_context) goto fail;
- vpx_free(cm->above_seg_context);
- cm->above_seg_context =
- (PARTITION_CONTEXT *)vpx_calloc(mi_cols_aligned_to_sb(cm->mi_cols),
- sizeof(*cm->above_seg_context));
- if (!cm->above_seg_context)
- goto fail;
+ cm->above_seg_context = (PARTITION_CONTEXT *)vpx_calloc(
+ mi_cols_aligned_to_sb(cm->mi_cols), sizeof(*cm->above_seg_context));
+ if (!cm->above_seg_context) goto fail;
return 0;
}
static void init_frame_bufs(VP9_COMMON *cm) {
++ BufferPool *const pool = cm->buffer_pool;
int i;
- RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
cm->new_fb_idx = FRAME_BUFFERS - 1;
- cm->frame_bufs[cm->new_fb_idx].ref_count = 1;
- frame_bufs[cm->new_fb_idx].ref_count = 1;
++ pool->frame_bufs[cm->new_fb_idx].ref_count = 1;
for (i = 0; i < REF_FRAMES; ++i) {
cm->ref_frame_map[i] = i;
- cm->frame_bufs[i].ref_count = 1;
- frame_bufs[i].ref_count = 1;
++ pool->frame_bufs[i].ref_count = 1;
}
}
int i;
const int ss_x = cm->subsampling_x;
const int ss_y = cm->subsampling_y;
- RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
- vp9_free_frame_buffers(cm);
+ vp9_free_ref_frame_buffers(cm);
for (i = 0; i < FRAME_BUFFERS; ++i) {
- cm->frame_bufs[i].ref_count = 0;
- if (vp9_alloc_frame_buffer(&cm->frame_bufs[i].buf, width, height,
- frame_bufs[i].ref_count = 0;
- if (vp9_alloc_frame_buffer(&frame_bufs[i].buf, width, height,
- ss_x, ss_y, VP9_ENC_BORDER_IN_PIXELS) < 0)
++ BufferPool *const pool = cm->buffer_pool;
++ pool->frame_bufs[i].ref_count = 0;
++ if (vp9_alloc_frame_buffer(&pool->frame_bufs[i].buf, width, height,
+ ss_x, ss_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+ cm->use_highbitdepth,
+#endif
+ VP9_ENC_BORDER_IN_PIXELS,
+ cm->byte_alignment) < 0)
goto fail;
- if (cm->frame_bufs[i].mvs == NULL) {
- cm->frame_bufs[i].mvs =
++ if (pool->frame_bufs[i].mvs == NULL) {
++ pool->frame_bufs[i].mvs =
+ (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
- sizeof(*cm->frame_bufs[i].mvs));
- if (cm->frame_bufs[i].mvs == NULL)
++ sizeof(*pool->frame_bufs[i].mvs));
++ if (pool->frame_bufs[i].mvs == NULL)
+ goto fail;
+
- cm->frame_bufs[i].mi_rows = cm->mi_rows;
- cm->frame_bufs[i].mi_cols = cm->mi_cols;
++ pool->frame_bufs[i].mi_rows = cm->mi_rows;
++ pool->frame_bufs[i].mi_cols = cm->mi_cols;
+ }
}
init_frame_bufs(cm);
}
void vp9_remove_common(VP9_COMMON *cm) {
- vp9_free_frame_buffers(cm);
+ vp9_free_ref_frame_buffers(cm);
vp9_free_context_buffers(cm);
- vp9_free_internal_frame_buffers(&cm->int_frame_buffers);
-}
-
-void vp9_update_frame_size(VP9_COMMON *cm) {
- const int aligned_width = ALIGN_POWER_OF_TWO(cm->width, MI_SIZE_LOG2);
- const int aligned_height = ALIGN_POWER_OF_TWO(cm->height, MI_SIZE_LOG2);
-
- set_mb_mi(cm, aligned_width, aligned_height);
- setup_mi(cm);
- // Initialize the previous frame segment map to 0.
- if (cm->current_frame_seg_map)
- vpx_memset(cm->current_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
+ vpx_free(cm->fc);
+ cm->fc = NULL;
+ vpx_free(cm->frame_contexts);
+ cm->frame_contexts = NULL;
}
-void vp9_swap_mi_and_prev_mi(VP9_COMMON *cm) {
- // Swap indices.
- const int tmp = cm->mi_idx;
-
- // Only used in frame parallel decode: Update the prev_mi buffer if
- // needed. The worker that was accessing it must already finish decoding.
- // So it can be resized safely now.
- if (cm->update_prev_mi) {
- const int mi_size = cm->mi_stride * (cm->mi_rows + MI_BLOCK_SIZE);
- vpx_free(cm->mip_array[cm->prev_mi_idx]);
- vpx_free(cm->mi_grid_base_array[cm->prev_mi_idx]);
- cm->mip_array[cm->prev_mi_idx] = NULL;
- cm->mi_grid_base_array[cm->prev_mi_idx] = NULL;
- alloc_mi_array(cm, mi_size, cm->prev_mi_idx);
- cm->update_prev_mi = 0;
- }
-
- cm->mi_idx = cm->prev_mi_idx;
- cm->prev_mi_idx = tmp;
-
- // Current mip will be the prev_mip for the next frame.
- cm->mip = cm->mip_array[cm->mi_idx];
- cm->prev_mip = cm->mip_array[cm->prev_mi_idx];
- cm->mi_grid_base = cm->mi_grid_base_array[cm->mi_idx];
- cm->prev_mi_grid_base = cm->mi_grid_base_array[cm->prev_mi_idx];
-
- // Update the upper left visible macroblock ptrs.
- cm->mi = cm->mip + cm->mi_stride + 1;
- cm->prev_mi = cm->prev_mip + cm->mi_stride + 1;
- cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
- cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
+void vp9_init_context_buffers(VP9_COMMON *cm) {
+ cm->setup_mi(cm);
+ if (cm->last_frame_seg_map)
+ vpx_memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
}
+
+ void vp9_swap_current_and_last_seg_map(VP9_COMMON *cm) {
+ // Swap indices.
+ const int tmp = cm->seg_map_idx;
+ cm->seg_map_idx = cm->prev_seg_map_idx;
+ cm->prev_seg_map_idx = tmp;
+
+ cm->current_frame_seg_map = cm->seg_map_array[cm->seg_map_idx];
+ cm->last_frame_seg_map = cm->seg_map_array[cm->prev_seg_map_idx];
+ }
void vp9_remove_common(struct VP9Common *cm);
-int vp9_resize_frame_buffers(struct VP9Common *cm, int width, int height);
-
-int vp9_alloc_frame_buffers(struct VP9Common *cm, int width, int height);
-int vp9_alloc_state_buffers(struct VP9Common *cm, int width, int height);
int vp9_alloc_context_buffers(struct VP9Common *cm, int width, int height);
-
-void vp9_free_frame_buffers(struct VP9Common *cm);
-void vp9_free_state_buffers(struct VP9Common *cm);
+void vp9_init_context_buffers(struct VP9Common *cm);
void vp9_free_context_buffers(struct VP9Common *cm);
-void vp9_update_frame_size(struct VP9Common *cm);
+int vp9_alloc_ref_frame_buffers(struct VP9Common *cm, int width, int height);
+void vp9_free_ref_frame_buffers(struct VP9Common *cm);
+
+int vp9_alloc_state_buffers(struct VP9Common *cm, int width, int height);
+void vp9_free_state_buffers(struct VP9Common *cm);
-void vp9_swap_mi_and_prev_mi(struct VP9Common *cm);
+void vp9_set_mb_mi(struct VP9Common *cm, int width, int height);
+ void vp9_swap_current_and_last_seg_map(struct VP9Common *cm);
+
#ifdef __cplusplus
} // extern "C"
#endif
cm->error_resilient_mode || cm->reset_frame_context == 3) {
// Reset all frame contexts.
for (i = 0; i < FRAME_CONTEXTS; ++i)
- cm->frame_contexts[i] = cm->fc;
+ cm->frame_contexts[i] = *cm->fc;
} else if (cm->reset_frame_context == 2) {
// Reset only the frame context specified in the frame header.
- cm->frame_contexts[cm->frame_context_idx] = cm->fc;
+ cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
}
- if (frame_is_intra_only(cm) && !cm->frame_parallel_decode)
+ // prev_mip will only be allocated in encoder.
- if (frame_is_intra_only(cm) && cm->prev_mip)
++ if (frame_is_intra_only(cm) && cm->prev_mip && !cm->frame_parallel_decode)
vpx_memset(cm->prev_mip, 0, cm->mi_stride * (cm->mi_rows + 1) *
sizeof(*cm->prev_mip));
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list,
- int block, int mi_row, int mi_col) {
+ int block, int mi_row, int mi_col,
+ find_mv_refs_sync sync, void *const data) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
int i, refmv_count = 0;
- MODE_INFO *prev_mi = NULL;
- MB_MODE_INFO *prev_mbmi = NULL;
-
const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
-
int different_ref_found = 0;
int context_counter = 0;
+ const MV_REF *const prev_frame_mvs = cm->use_prev_frame_mvs ?
+ cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col : NULL;
// Blank the reference vector list
vpx_memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
}
}
- prev_mi = cm->coding_use_prev_mi && cm->prev_mi ?
- cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col] : NULL;
- prev_mbmi = prev_mi ? &prev_mi->mbmi : NULL;
+ // Synchronize here for frame parallel decode if sync function is provided.
+ if (sync != NULL) {
+ sync(data, mi_row);
+ }
+
// Check the last frame's mode and mv info.
- if (prev_mbmi) {
- if (prev_mbmi->ref_frame[0] == ref_frame)
- ADD_MV_REF_LIST(prev_mbmi->mv[0]);
- else if (prev_mbmi->ref_frame[1] == ref_frame)
- ADD_MV_REF_LIST(prev_mbmi->mv[1]);
+ if (cm->use_prev_frame_mvs) {
+ if (prev_frame_mvs->ref_frame[0] == ref_frame) {
+ ADD_MV_REF_LIST(prev_frame_mvs->mv[0], refmv_count, mv_ref_list, Done);
+ } else if (prev_frame_mvs->ref_frame[1] == ref_frame) {
+ ADD_MV_REF_LIST(prev_frame_mvs->mv[1], refmv_count, mv_ref_list, Done);
+ }
}
// Since we couldn't find 2 mvs from the same reference frame
typedef struct {
int ref_count;
+ MV_REF *mvs;
+ int mi_rows;
+ int mi_cols;
vpx_codec_frame_buffer_t raw_frame_buffer;
YV12_BUFFER_CONFIG buf;
+
+ // The Following variables will only be used in frame parallel decode.
+
+ // frame_worker_owner indicates which FrameWorker owns this buffer. NULL means
+ // that no FrameWorker owns, or is decoding, this buffer.
+ VP9Worker *frame_worker_owner;
+
+ // row and col indicate which position frame has been decoded to in real
+ // pixel unit. They are reset to -1 when decoding begins and set to INT_MAX
+ // when the frame is fully decoded.
+ int row;
+ int col;
} RefCntBuffer;
+ typedef struct {
+ // Protect BufferPool from being accessed by several FrameWorkers at
+ // the same time during frame parallel decode.
+ // TODO(hkuang): Try to use atomic variable instead of locking the whole pool.
+ #if CONFIG_MULTITHREAD
+ pthread_mutex_t pool_mutex;
+ #endif
+
+ // Private data associated with the frame buffer callbacks.
+ void *cb_priv;
+
+ vpx_get_frame_buffer_cb_fn_t get_fb_cb;
+ vpx_release_frame_buffer_cb_fn_t release_fb_cb;
+
+ RefCntBuffer frame_bufs[FRAME_BUFFERS];
+
+ // Frame buffers allocated internally by the codec.
+ InternalFrameBufferList int_frame_buffers;
+ } BufferPool;
+
typedef struct VP9Common {
struct vpx_internal_error_info error;
int subsampling_x;
int subsampling_y;
+#if CONFIG_VP9_HIGHBITDEPTH
+ int use_highbitdepth; // Marks if we need to use 16bit frame buffers.
+#endif
+
YV12_BUFFER_CONFIG *frame_to_show;
- RefCntBuffer frame_bufs[FRAME_BUFFERS];
+ RefCntBuffer *prev_frame;
+
+ // TODO(hkuang): Combine this with cur_buf in macroblockd.
+ RefCntBuffer *cur_frame;
int ref_frame_map[REF_FRAMES]; /* maps fb_idx to reference slot */
MODE_INFO *prev_mip; /* MODE_INFO array 'mip' from last decoded frame */
MODE_INFO *prev_mi; /* 'mi' from last frame (points into prev_mip) */
- MODE_INFO **mi_grid_base;
- MODE_INFO **mi_grid_visible;
- MODE_INFO **prev_mi_grid_base;
- MODE_INFO **prev_mi_grid_visible;
+ // Separate mi functions between encoder and decoder.
+ int (*alloc_mi)(struct VP9Common *cm, int mi_size);
+ void (*free_mi)(struct VP9Common *cm);
+ void (*setup_mi)(struct VP9Common *cm);
+
- // Used in frame parallel decode for delay resizing prev_mi.
- int update_prev_mi;
+ // Whether to use previous frame's motion vectors for prediction.
+ int use_prev_frame_mvs;
// Persistent mb segment id map used in prediction.
- unsigned char *last_frame_seg_map;
+ int seg_map_idx;
+ int prev_seg_map_idx;
+
+ uint8_t *seg_map_array[NUM_PING_PONG_BUFFERS];
+ uint8_t *last_frame_seg_map;
+ uint8_t *current_frame_seg_map;
INTERP_FILTER interp_filter;
struct loopfilter lf;
struct segmentation seg;
+ // TODO(hkuang): Remove this as it is the same as frame_parallel_decode
+ // in pbi.
+ int frame_parallel_decode; // frame-based threading.
+
// Context probabilities for reference frame prediction
- int allow_comp_inter_inter;
MV_REFERENCE_FRAME comp_fixed_ref;
MV_REFERENCE_FRAME comp_var_ref[2];
REFERENCE_MODE reference_mode;
int error_resilient_mode;
int frame_parallel_decoding_mode;
- // Flag indicates if prev_mi can be used in coding:
- // 0: encoder assumes decoder does not have prev_mi
- // 1: encoder assumes decoder has and uses prev_mi
- unsigned int coding_use_prev_mi;
-
int log2_tile_cols, log2_tile_rows;
+ int byte_alignment;
+
+ // Private data associated with the frame buffer callbacks.
+ void *cb_priv;
+ vpx_get_frame_buffer_cb_fn_t get_fb_cb;
+ vpx_release_frame_buffer_cb_fn_t release_fb_cb;
+
+ // Handles memory for the codec.
+ InternalFrameBufferList int_frame_buffers;
+ // External BufferPool passed from outside.
+ BufferPool *buffer_pool;
+
PARTITION_CONTEXT *above_seg_context;
ENTROPY_CONTEXT *above_context;
} VP9_COMMON;
- return &cm->frame_bufs[cm->ref_frame_map[index]].buf;
+ // TODO(hkuang): Don't need to lock the whole pool after implementing atomic
+ // frame reference count.
+ void lock_buffer_pool(BufferPool *const pool);
+ void unlock_buffer_pool(BufferPool *const pool);
+
+static INLINE YV12_BUFFER_CONFIG *get_ref_frame(VP9_COMMON *cm, int index) {
+ if (index < 0 || index >= REF_FRAMES)
+ return NULL;
+ if (cm->ref_frame_map[index] < 0)
+ return NULL;
+ assert(cm->ref_frame_map[index] < FRAME_BUFFERS);
++ return &cm->buffer_pool->frame_bufs[cm->ref_frame_map[index]].buf;
+}
+
static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(VP9_COMMON *cm) {
- return &cm->frame_bufs[cm->new_fb_idx].buf;
+ return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
}
static INLINE int get_free_fb(VP9_COMMON *cm) {
kernel[subpel_x], xs, kernel[subpel_y], ys, w, h);
}
- void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
- uint8_t *dst, int dst_stride,
- const MV *src_mv,
- const struct scale_factors *sf,
- int w, int h, int ref,
- const InterpKernel *kernel,
- enum mv_precision precision,
- int x, int y) {
- const int is_q4 = precision == MV_PRECISION_Q4;
- const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
- is_q4 ? src_mv->col : src_mv->col * 2 };
- MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
- const int subpel_x = mv.col & SUBPEL_MASK;
- const int subpel_y = mv.row & SUBPEL_MASK;
-
- src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
-
- inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
- sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
- }
-
+#if CONFIG_VP9_HIGHBITDEPTH
- static void high_inter_predictor(const uint8_t *src, int src_stride,
++void high_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int subpel_x,
+ const int subpel_y,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ int xs, int ys, int bd) {
+ sf->highbd_predict[subpel_x != 0][subpel_y != 0][ref](
+ src, src_stride, dst, dst_stride,
+ kernel[subpel_x], xs, kernel[subpel_y], ys, w, h, bd);
+}
+
+void vp9_highbd_build_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const MV *src_mv,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ enum mv_precision precision,
+ int x, int y, int bd) {
+ const int is_q4 = precision == MV_PRECISION_Q4;
+ const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
+ is_q4 ? src_mv->col : src_mv->col * 2 };
+ MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
+ const int subpel_x = mv.col & SUBPEL_MASK;
+ const int subpel_y = mv.row & SUBPEL_MASK;
+
+ src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
+
+ high_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
+ sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, bd);
+}
+#endif // CONFIG_VP9_HIGHBITDEPTH
+
+ void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const MV *src_mv,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ enum mv_precision precision,
+ int x, int y) {
+ const int is_q4 = precision == MV_PRECISION_Q4;
+ const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
+ is_q4 ? src_mv->col : src_mv->col * 2 };
+ MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
+ const int subpel_x = mv.col & SUBPEL_MASK;
+ const int subpel_y = mv.row & SUBPEL_MASK;
+
+ src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
+
+ inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
+ sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
+ }
+
static INLINE int round_mv_comp_q4(int value) {
return (value < 0 ? value - 2 : value + 2) / 4;
}
return clamped_mv;
}
- static MV average_split_mvs(const struct macroblockd_plane *pd,
-MV average_split_mvs(const struct macroblockd_plane *pd, int plane,
-- const MODE_INFO *mi, int ref, int block) {
++MV average_split_mvs(const struct macroblockd_plane *pd,
++ const MODE_INFO *mi, int ref, int block) {
const int ss_idx = ((pd->subsampling_x > 0) << 1) | (pd->subsampling_y > 0);
MV res = {0, 0};
switch (ss_idx) {
extern "C" {
#endif
-MV average_split_mvs(const struct macroblockd_plane *pd, int plane,
- const MODE_INFO *mi, int ref, int block);
+ void inter_predictor(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ const int subpel_x,
+ const int subpel_y,
+ const struct scale_factors *sf,
+ int w, int h, int ref,
+ const InterpKernel *kernel,
+ int xs, int ys);
+
++#if CONFIG_VP9_HIGHBITDEPTH
++void high_inter_predictor(const uint8_t *src, int src_stride,
++ uint8_t *dst, int dst_stride,
++ const int subpel_x,
++ const int subpel_y,
++ const struct scale_factors *sf,
++ int w, int h, int ref,
++ const InterpKernel *kernel,
++ int xs, int ys, int bd);
++#endif // CONFIG_VP9_HIGHBITDEPTH
++
++MV average_split_mvs(const struct macroblockd_plane *pd, const MODE_INFO *mi,
++ int ref, int block);
+
+ MV clamp_mv_to_umv_border_sb(const MACROBLOCKD *xd, const MV *src_mv,
+ int bw, int bh, int ss_x, int ss_y);
+
+ void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
+ int bw, int bh,
+ int x, int y, int w, int h,
+ int mi_x, int mi_y);
+
void vp9_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
#include "vp9/common/vp9_entropy.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_idct.h"
++#include "vp9/common/vp9_loopfilter_thread.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/common/vp9_reconintra.h"
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
- return &xd->mi[0]->mbmi;
-}
-
-static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd,
- int idx, int mi_row, int mi_col) {
- MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
- RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME];
- xd->block_refs[idx] = ref_buffer;
-
- if (!vp9_is_valid_scale(&ref_buffer->sf))
- vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
- "Invalid scale factors");
- vp9_setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col,
- &ref_buffer->sf);
- if (!cm->frame_parallel_decode)
- xd->corrupted |= ref_buffer->buf->corrupted;
+ return &xd->mi[0].mbmi;
}
- static void decode_block(VP9_COMMON *const cm, MACROBLOCKD *const xd,
+ static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
const TileInfo *const tile,
int mi_row, int mi_col,
vp9_reader *r, BLOCK_SIZE bsize) {
vp9_foreach_transformed_block(xd, bsize,
predict_and_reconstruct_intra_block, &arg);
} else {
- // Setup
- set_ref(cm, xd, 0, mi_row, mi_col);
- if (has_second_ref(mbmi))
- set_ref(cm, xd, 1, mi_row, mi_col);
-
// Prediction
- vp9_dec_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+ vp9_dec_build_inter_predictors_sb(pbi, xd, mi_row, mi_col, bsize);
// Reconstruction
if (!mbmi->skip) {
subsize = get_subsize(bsize, partition);
uv_subsize = ss_size_lookup[subsize][cm->subsampling_x][cm->subsampling_y];
if (subsize >= BLOCK_8X8 && uv_subsize == BLOCK_INVALID)
- vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
- "Invalid block size.");
+ vpx_internal_error(xd->error_info,
+ VPX_CODEC_CORRUPT_FRAME, "Invalid block size.");
if (subsize < BLOCK_8X8) {
- decode_block(cm, xd, tile, mi_row, mi_col, r, subsize);
+ decode_block(pbi, xd, tile, mi_row, mi_col, r, subsize);
} else {
switch (partition) {
case PARTITION_NONE:
cm->display_width = cm->width;
cm->display_height = cm->height;
if (vp9_rb_read_bit(rb))
- read_frame_size(rb, &cm->display_width, &cm->display_height);
+ vp9_read_frame_size(rb, &cm->display_width, &cm->display_height);
}
-static void apply_frame_size(VP9_COMMON *cm, int width, int height) {
- BufferPool *const pool = cm->buffer_pool;
+static void resize_mv_buffer(VP9_COMMON *cm) {
+ vpx_free(cm->cur_frame->mvs);
+ cm->cur_frame->mi_rows = cm->mi_rows;
+ cm->cur_frame->mi_cols = cm->mi_cols;
+ cm->cur_frame->mvs = (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
+ sizeof(*cm->cur_frame->mvs));
+}
+
+static void resize_context_buffers(VP9_COMMON *cm, int width, int height) {
+#if CONFIG_SIZE_LIMIT
+ if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Width and height beyond allowed size.");
+#endif
if (cm->width != width || cm->height != height) {
- // Change in frame size.
- // TODO(agrange) Don't test width/height, check overall size.
- if (width > cm->width || height > cm->height) {
- // Rescale frame buffers only if they're not big enough already.
- if (vp9_resize_frame_buffers(cm, width, height))
+ const int new_mi_rows =
+ ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
+ const int new_mi_cols =
+ ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
+
+ // Allocations in vp9_alloc_context_buffers() depend on individual
+ // dimensions as well as the overall size.
+ if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) {
+ if (vp9_alloc_context_buffers(cm, width, height))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate frame buffers");
+ "Failed to allocate context buffers");
+ } else {
+ vp9_set_mb_mi(cm, width, height);
}
-
+ vp9_init_context_buffers(cm);
cm->width = width;
cm->height = height;
-
- vp9_update_frame_size(cm);
}
+ if (cm->cur_frame->mvs == NULL || cm->mi_rows > cm->cur_frame->mi_rows ||
+ cm->mi_cols > cm->cur_frame->mi_cols) {
+ resize_mv_buffer(cm);
+ }
+}
+
+static void setup_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
+ int width, height;
++ BufferPool *const pool = cm->buffer_pool;
+ vp9_read_frame_size(rb, &width, &height);
+ resize_context_buffers(cm, width, height);
+ setup_display_size(cm, rb);
+ lock_buffer_pool(pool);
if (vp9_realloc_frame_buffer(
get_frame_new_buffer(cm), cm->width, cm->height,
- cm->subsampling_x, cm->subsampling_y, VP9_DEC_BORDER_IN_PIXELS,
+ cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+ cm->use_highbitdepth,
+#endif
+ VP9_DEC_BORDER_IN_PIXELS,
+ cm->byte_alignment,
- &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer, cm->get_fb_cb,
- cm->cb_priv)) {
+ &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
+ pool->cb_priv)) {
+ unlock_buffer_pool(pool);
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
}
- cm->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
- cm->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
- cm->frame_bufs[cm->new_fb_idx].buf.color_space =
- (vpx_color_space_t)cm->color_space;
- cm->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
+ unlock_buffer_pool(pool);
++
++ pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
++ pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
++ pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
}
-static void setup_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
- int width, height;
- read_frame_size(rb, &width, &height);
- apply_frame_size(cm, width, height);
- setup_display_size(cm, rb);
+static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth,
+ int ref_xss, int ref_yss,
+ vpx_bit_depth_t this_bit_depth,
+ int this_xss, int this_yss) {
+ return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
+ ref_yss == this_yss;
}
static void setup_frame_size_with_refs(VP9_COMMON *cm,
struct vp9_read_bit_buffer *rb) {
int width, height;
int found = 0, i;
+ int has_valid_ref_frame = 0;
++ BufferPool *const pool = cm->buffer_pool;
for (i = 0; i < REFS_PER_FRAME; ++i) {
if (vp9_rb_read_bit(rb)) {
YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
}
if (!found)
- read_frame_size(rb, &width, &height);
+ vp9_read_frame_size(rb, &width, &height);
+
+ if (width <= 0 || height <= 0)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Invalid frame size");
- // Check that each of the frames that this frame references has valid
- // dimensions.
+ // Check to make sure at least one of frames that this frame references
+ // has valid dimensions.
for (i = 0; i < REFS_PER_FRAME; ++i) {
RefBuffer *const ref_frame = &cm->frame_refs[i];
- const int ref_width = ref_frame->buf->y_width;
- const int ref_height = ref_frame->buf->y_height;
-
- if (!valid_ref_frame_size(ref_width, ref_height, width, height))
+ has_valid_ref_frame |= valid_ref_frame_size(ref_frame->buf->y_crop_width,
+ ref_frame->buf->y_crop_height,
+ width, height);
+ }
+ if (!has_valid_ref_frame)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Referenced frame has invalid size");
+ for (i = 0; i < REFS_PER_FRAME; ++i) {
+ RefBuffer *const ref_frame = &cm->frame_refs[i];
+ if (!valid_ref_frame_img_fmt(
+ ref_frame->buf->bit_depth,
+ ref_frame->buf->subsampling_x,
+ ref_frame->buf->subsampling_y,
+ cm->bit_depth,
+ cm->subsampling_x,
+ cm->subsampling_y))
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
- "Referenced frame has invalid size");
+ "Referenced frame has incompatible color format");
}
- apply_frame_size(cm, width, height);
+ resize_context_buffers(cm, width, height);
setup_display_size(cm, rb);
- &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer, cm->get_fb_cb,
- cm->cb_priv)) {
+
++ lock_buffer_pool(pool);
+ if (vp9_realloc_frame_buffer(
+ get_frame_new_buffer(cm), cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+ cm->use_highbitdepth,
+#endif
+ VP9_DEC_BORDER_IN_PIXELS,
+ cm->byte_alignment,
- cm->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
- cm->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
- cm->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
++ &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
++ pool->cb_priv)) {
++ unlock_buffer_pool(pool);
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate frame buffer");
+ }
++ unlock_buffer_pool(pool);
++
++ pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
++ pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
++ pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
}
static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
vp9_zero(tile_data->xd.left_seg_context);
for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
mi_col += MI_BLOCK_SIZE) {
- decode_partition(tile_data->cm, &tile_data->xd, &tile, mi_row, mi_col,
+ decode_partition(pbi, &tile_data->xd, &tile, mi_row, mi_col,
&tile_data->bit_reader, BLOCK_64X64);
}
+ pbi->mb.corrupted |= tile_data->xd.corrupted;
+ if (pbi->mb.corrupted)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Failed to decode tile data");
}
// Loopfilter one row.
if (cm->lf.filter_level) {
if (cm->show_existing_frame) {
// Show an existing frame directly.
const int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
-
- if (frame_to_show < 0 || cm->frame_bufs[frame_to_show].ref_count < 1)
+ lock_buffer_pool(pool);
- if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1)
++ if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
++ unlock_buffer_pool(pool);
vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
"Buffer %d does not contain a decoded frame",
frame_to_show);
++ }
- ref_cnt_fb(cm->frame_bufs, &cm->new_fb_idx, frame_to_show);
+ ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
+ unlock_buffer_pool(pool);
pbi->refresh_frame_flags = 0;
cm->lf.filter_level = 0;
cm->show_frame = 1;
pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
setup_frame_size(cm, rb);
- pbi->need_resync = 0;
- } else {
+ if (pbi->need_resync) {
+ vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
+ pbi->need_resync = 0;
+ }
- } else {
++ } else if (pbi->need_resync != 1) { /* Skip if need resync */
pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
for (i = 0; i < REFS_PER_FRAME; ++i) {
const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2);
const int idx = cm->ref_frame_map[ref];
RefBuffer *const ref_frame = &cm->frame_refs[i];
ref_frame->idx = idx;
- ref_frame->buf = &cm->frame_bufs[idx].buf;
+ ref_frame->buf = &frame_bufs[idx].buf;
cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
}
+
setup_frame_size_with_refs(cm, rb);
cm->allow_high_precision_mv = vp9_rb_read_bit(rb);
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
struct vp9_read_bit_buffer rb = { NULL, NULL, 0, NULL, 0};
-
+ int context_updated = 0;
-
uint8_t clear_data[MAX_VP9_HEADER_SIZE];
const size_t first_partition_size = read_uncompressed_header(pbi,
init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
xd->corrupted = 0;
new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
+ if (new_fb->corrupted)
+ vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
+ "Decode failed. Frame data header is corrupted.");
- cm->frame_contexts[cm->frame_context_idx] = cm->fc;
+ if (cm->lf.filter_level) {
+ vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
+ }
+
+ // If encoded in frame parallel mode, frame context is ready after decoding
+ // the frame header.
+ if (pbi->frame_parallel_decode && cm->frame_parallel_decoding_mode) {
+ VP9Worker *const worker = pbi->frame_worker_owner;
+ FrameWorkerData *const frame_worker_data = worker->data1;
+ if (cm->refresh_frame_context) {
+ context_updated = 1;
++ cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
+ }
+ vp9_frameworker_lock_stats(worker);
+ pbi->cur_buf->row = -1;
+ pbi->cur_buf->col = -1;
+ frame_worker_data->frame_context_ready = 1;
+ // Signal the main thread that context is ready.
+ vp9_frameworker_signal_stats(worker);
+ vp9_frameworker_unlock_stats(worker);
+ }
+
// TODO(jzern): remove frame_parallel_decoding_mode restriction for
// single-frame tile decoding.
if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1 &&
"Decode failed. Frame data is corrupted.");
}
- if (cm->refresh_frame_context)
+ // Non frame parallel update frame context here.
+ if (cm->refresh_frame_context && !context_updated)
- cm->frame_contexts[cm->frame_context_idx] = cm->fc;
+ cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
}
- const MODE_INFO *mi = xd->mi[0];
+
+ static void build_mc_border(const uint8_t *src, int src_stride,
+ uint8_t *dst, int dst_stride,
+ int x, int y, int b_w, int b_h, int w, int h) {
+ // Get a pointer to the start of the real data for this row.
+ const uint8_t *ref_row = src - x - y * src_stride;
+
+ if (y >= h)
+ ref_row += (h - 1) * src_stride;
+ else if (y > 0)
+ ref_row += y * src_stride;
+
+ do {
+ int right = 0, copy;
+ int left = x < 0 ? -x : 0;
+
+ if (left > b_w)
+ left = b_w;
+
+ if (x + b_w > w)
+ right = x + b_w - w;
+
+ if (right > b_w)
+ right = b_w;
+
+ copy = b_w - left - right;
+
+ if (left)
+ memset(dst, ref_row[0], left);
+
+ if (copy)
+ memcpy(dst + left, ref_row + x + left, copy);
+
+ if (right)
+ memset(dst + left + copy, ref_row[w - 1], right);
+
+ dst += dst_stride;
+ ++y;
+
+ if (y > 0 && y < h)
+ ref_row += src_stride;
+ } while (--b_h);
+ }
+
++#if CONFIG_VP9_HIGHBITDEPTH
++static void high_build_mc_border(const uint8_t *src8, int src_stride,
++ uint16_t *dst, int dst_stride,
++ int x, int y, int b_w, int b_h,
++ int w, int h) {
++ // Get a pointer to the start of the real data for this row.
++ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
++ const uint16_t *ref_row = src - x - y * src_stride;
++
++ if (y >= h)
++ ref_row += (h - 1) * src_stride;
++ else if (y > 0)
++ ref_row += y * src_stride;
++
++ do {
++ int right = 0, copy;
++ int left = x < 0 ? -x : 0;
++
++ if (left > b_w)
++ left = b_w;
++
++ if (x + b_w > w)
++ right = x + b_w - w;
++
++ if (right > b_w)
++ right = b_w;
++
++ copy = b_w - left - right;
++
++ if (left)
++ vpx_memset16(dst, ref_row[0], left);
++
++ if (copy)
++ memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
++
++ if (right)
++ vpx_memset16(dst + left + copy, ref_row[w - 1], right);
++
++ dst += dst_stride;
++ ++y;
++
++ if (y > 0 && y < h)
++ ref_row += src_stride;
++ } while (--b_h);
++}
++#endif // CONFIG_VP9_HIGHBITDEPTH
++
+ void dec_build_inter_predictors(VP9Decoder *const pbi, MACROBLOCKD *xd,
+ int plane, int block, int bw, int bh, int x,
+ int y, int w, int h, int mi_x, int mi_y) {
+ struct macroblockd_plane *const pd = &xd->plane[plane];
- ? average_split_mvs(pd, plane, mi, ref, block)
++ const MODE_INFO *mi = xd->mi[0].src_mi;
+ const int is_compound = has_second_ref(&mi->mbmi);
+ const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
+ int ref;
+
+ for (ref = 0; ref < 1 + is_compound; ++ref) {
+ const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
+ struct buf_2d *const pre_buf = &pd->pre[ref];
+ struct buf_2d *const dst_buf = &pd->dst;
+ uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
+ const MV mv = mi->mbmi.sb_type < BLOCK_8X8
- if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width ||
++ ? average_split_mvs(pd, mi, ref, block)
+ : mi->mbmi.mv[ref].as_mv;
+
+
+ // TODO(jkoleszar): This clamping is done in the incorrect place for the
+ // scaling case. It needs to be done on the scaled MV, not the pre-scaling
+ // MV. Note however that it performs the subsampling aware scaling so
+ // that the result is always q4.
+ // mv_precision precision is MV_PRECISION_Q4.
+ const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, &mv, bw, bh,
+ pd->subsampling_x,
+ pd->subsampling_y);
+
+ MV32 scaled_mv;
+ int xs, ys, x0, y0, x0_16, y0_16, y1, frame_width, frame_height,
+ buf_stride, subpel_x, subpel_y;
+ uint8_t *ref_frame, *buf_ptr;
+ const int idx = xd->block_refs[ref]->idx;
+ BufferPool *const pool = pbi->common.buffer_pool;
+ RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx];
+
+ // Get reference frame pointer, width and height.
+ if (plane == 0) {
+ frame_width = ref_frame_buf->buf.y_crop_width;
+ frame_height = ref_frame_buf->buf.y_crop_height;
+ ref_frame = ref_frame_buf->buf.y_buffer;
+ } else {
+ frame_width = ref_frame_buf->buf.uv_crop_width;
+ frame_height = ref_frame_buf->buf.uv_crop_height;
+ ref_frame = plane == 1 ? ref_frame_buf->buf.u_buffer
+ : ref_frame_buf->buf.v_buffer;
+ }
+
+ if (vp9_is_scaled(sf)) {
+ // Co-ordinate of containing block to pixel precision.
+ int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
+ int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
+
+ // Co-ordinate of the block to 1/16th pixel precision.
+ x0_16 = (x_start + x) << SUBPEL_BITS;
+ y0_16 = (y_start + y) << SUBPEL_BITS;
+
+ // Co-ordinate of current block in reference frame
+ // to 1/16th pixel precision.
+ x0_16 = sf->scale_value_x(x0_16, sf);
+ y0_16 = sf->scale_value_y(y0_16, sf);
+
+ // Map the top left corner of the block into the reference frame.
+ x0 = sf->scale_value_x(x_start + x, sf);
+ y0 = sf->scale_value_y(y_start + y, sf);
+
+ // Scale the MV and incorporate the sub-pixel offset of the block
+ // in the reference frame.
+ scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+ xs = sf->x_step_q4;
+ ys = sf->y_step_q4;
+ } else {
+ // Co-ordinate of containing block to pixel precision.
+ x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
+ y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
+
+ // Co-ordinate of the block to 1/16th pixel precision.
+ x0_16 = x0 << SUBPEL_BITS;
+ y0_16 = y0 << SUBPEL_BITS;
+
+ scaled_mv.row = mv_q4.row;
+ scaled_mv.col = mv_q4.col;
+ xs = ys = 16;
+ }
+ subpel_x = scaled_mv.col & SUBPEL_MASK;
+ subpel_y = scaled_mv.row & SUBPEL_MASK;
+
+ // Calculate the top left corner of the best matching block in the
+ // reference frame.
+ x0 += scaled_mv.col >> SUBPEL_BITS;
+ y0 += scaled_mv.row >> SUBPEL_BITS;
+ x0_16 += scaled_mv.col;
+ y0_16 += scaled_mv.row;
+
+ // Get reference block pointer.
+ buf_ptr = ref_frame + y0 * pre_buf->stride + x0;
+ buf_stride = pre_buf->stride;
+
+ // Get reference block bottom right vertical coordinate.
+ y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
+
+ // Do border extension if there is motion or the
+ // width/height is not a multiple of 8 pixels.
+ if (scaled_mv.col || scaled_mv.row ||
+ (frame_width & 0x7) || (frame_height & 0x7)) {
+ int x_pad = 0, y_pad = 0;
+
+ // Get reference block bottom right horizontal coordinate.
+ int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1;
+
+ if (subpel_x || (sf->x_step_q4 & SUBPEL_MASK)) {
+ x0 -= VP9_INTERP_EXTEND - 1;
+ x1 += VP9_INTERP_EXTEND;
+ x_pad = 1;
+ }
+
+ if (subpel_y || (sf->y_step_q4 & SUBPEL_MASK)) {
+ y0 -= VP9_INTERP_EXTEND - 1;
+ y1 += VP9_INTERP_EXTEND;
+ y_pad = 1;
+ }
+
+ // Wait until reference block is ready. Pad 7 more pixels as last 7
+ // pixels of each superblock row can be changed by next superblock row.
+ if (pbi->frame_parallel_decode)
+ vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
+ (y1 + 7) << (plane == 0 ? 0 : 1));
+
+ // Skip border extension if block is inside the frame.
- build_mc_border(buf_ptr1, pre_buf->stride, xd->mc_buf, x1 - x0 + 1,
- x0, y0, x1 - x0 + 1, y1 - y0 + 1, frame_width,
++ if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
+ y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
+ uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0;
+ // Extend the border.
-
++#if CONFIG_VP9_HIGHBITDEPTH
++ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
++ high_build_mc_border(buf_ptr1,
++ pre_buf->stride,
++ xd->mc_buf_high,
++ x1 - x0 + 1,
++ x0,
++ y0,
++ x1 - x0 + 1,
++ y1 - y0 + 1,
++ frame_width,
++ frame_height);
++ buf_stride = x1 - x0 + 1;
++ buf_ptr = CONVERT_TO_BYTEPTR(xd->mc_buf_high) +
++ y_pad * 3 * buf_stride + x_pad * 3;
++ } else {
++ build_mc_border(buf_ptr1,
++ pre_buf->stride,
++ xd->mc_buf,
++ x1 - x0 + 1,
++ x0,
++ y0,
++ x1 - x0 + 1,
++ y1 - y0 + 1,
++ frame_width,
++ frame_height);
++ buf_stride = x1 - x0 + 1;
++ buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
++ }
++#else
++ build_mc_border(buf_ptr1,
++ pre_buf->stride,
++ xd->mc_buf,
++ x1 - x0 + 1,
++ x0,
++ y0,
++ x1 - x0 + 1,
++ y1 - y0 + 1,
++ frame_width,
+ frame_height);
+ buf_stride = x1 - x0 + 1;
+ buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
++#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ } else {
+ // Wait until reference block is ready. Pad 7 more pixels as last 7
+ // pixels of each superblock row can be changed by next superblock row.
+ if (pbi->frame_parallel_decode)
+ vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
+ (y1 + 7) << (plane == 0 ? 0 : 1));
+ }
- if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) {
++#if CONFIG_VP9_HIGHBITDEPTH
++ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
++ high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
++ subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
++ } else {
++ inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
++ subpel_y, sf, w, h, ref, kernel, xs, ys);
++ }
++#else
+ inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
+ subpel_y, sf, w, h, ref, kernel, xs, ys);
++#endif // CONFIG_VP9_HIGHBITDEPTH
+ }
+ }
+
+ void vp9_dec_build_inter_predictors_sb(VP9Decoder *const pbi, MACROBLOCKD *xd,
+ int mi_row, int mi_col,
+ BLOCK_SIZE bsize) {
+ int plane;
+ const int mi_x = mi_col * MI_SIZE;
+ const int mi_y = mi_row * MI_SIZE;
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
+ &xd->plane[plane]);
+ const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
+ const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
+ const int bw = 4 * num_4x4_w;
+ const int bh = 4 * num_4x4_h;
+
++ if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) {
+ int i = 0, x, y;
+ assert(bsize == BLOCK_8X8);
+ for (y = 0; y < num_4x4_h; ++y)
+ for (x = 0; x < num_4x4_w; ++x)
+ dec_build_inter_predictors(pbi, xd, plane, i++, bw, bh,
+ 4 * x, 4 * y, 4, 4, mi_x, mi_y);
+ } else {
+ dec_build_inter_predictors(pbi, xd, plane, 0, bw, bh,
+ 0, 0, bw, bh, mi_x, mi_y);
+ }
+ }
+ }
const uint8_t *data, const uint8_t *data_end,
const uint8_t **p_data_end);
+int vp9_read_sync_code(struct vp9_read_bit_buffer *const rb);
+void vp9_read_frame_size(struct vp9_read_bit_buffer *rb,
+ int *width, int *height);
+BITSTREAM_PROFILE vp9_read_profile(struct vp9_read_bit_buffer *rb);
+
+ void vp9_dec_build_inter_predictors_sb(struct VP9Decoder *const pbi,
+ MACROBLOCKD *xd, int mi_row, int mi_col,
+ BLOCK_SIZE bsize);
#ifdef __cplusplus
} // extern "C"
#endif
for (y = 0; y < ymis; y++)
for (x = 0; x < xmis; x++)
- cm->last_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
+ cm->current_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
+ }
+
++static void copy_segment_id(const VP9_COMMON *cm,
++ const uint8_t *last_segment_ids,
++ uint8_t *current_segment_ids,
++ BLOCK_SIZE bsize, int mi_row, int mi_col) {
++ const int mi_offset = mi_row * cm->mi_cols + mi_col;
++ const int bw = num_8x8_blocks_wide_lookup[bsize];
++ const int bh = num_8x8_blocks_high_lookup[bsize];
++ const int xmis = MIN(cm->mi_cols - mi_col, bw);
++ const int ymis = MIN(cm->mi_rows - mi_row, bh);
++ int x, y;
++
++ for (y = 0; y < ymis; y++)
++ for (x = 0; x < xmis; x++)
++ current_segment_ids[mi_offset + y * cm->mi_cols + x] =
++ last_segment_ids[mi_offset + y * cm->mi_cols + x];
+}
+
static int read_intra_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
int mi_row, int mi_col,
vp9_reader *r) {
if (!seg->enabled)
return 0; // Default for disabled segmentation
-- if (!seg->update_map)
++ if (!seg->update_map) {
++ copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
++ bsize, mi_row, mi_col);
return 0;
++ }
segment_id = read_segment_id(r, seg);
set_segment_id(cm, bsize, mi_row, mi_col, segment_id);
predicted_segment_id = vp9_get_segment_id(cm, cm->last_frame_seg_map,
bsize, mi_row, mi_col);
- if (!seg->update_map)
+ if (!seg->update_map) {
- set_segment_id(cm, bsize, mi_row, mi_col, predicted_segment_id);
++ copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
++ bsize, mi_row, mi_col);
return predicted_segment_id;
+ }
if (seg->temporal_update) {
const vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
for (ref = 0; ref < 1 + is_compound; ++ref) {
const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
+ RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
+ xd->block_refs[ref] = ref_buf;
+ if ((!vp9_is_valid_scale(&ref_buf->sf)))
+ vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
+ "Reference frame has invalid dimensions");
+ vp9_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col,
+ &ref_buf->sf);
vp9_find_mv_refs(cm, xd, tile, mi, frame, mbmi->ref_mvs[frame],
- mi_row, mi_col);
+ mi_row, mi_col, fpm_sync, (void *)pbi);
}
inter_mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
}
}
- static void read_inter_frame_mode_info(VP9_COMMON *const cm,
-// TODO(hkuang): Pass cm instead of pbi. This requires change in
-// vp9_frameworker_wait.
+ static void read_inter_frame_mode_info(VP9Decoder *const pbi,
MACROBLOCKD *const xd,
const TileInfo *const tile,
int mi_row, int mi_col, vp9_reader *r) {
- MODE_INFO *const mi = xd->mi[0];
+ VP9_COMMON *const cm = &pbi->common;
+ MODE_INFO *const mi = xd->mi[0].src_mi;
MB_MODE_INFO *const mbmi = &mi->mbmi;
int inter_block;
mbmi->segment_id = read_inter_segment_id(cm, xd, mi_row, mi_col, r);
mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r);
inter_block = read_is_inter_block(cm, xd, mbmi->segment_id, r);
- mbmi->tx_size = read_tx_size(cm, xd, cm->tx_mode, mbmi->sb_type,
- !mbmi->skip || !inter_block, r);
+ mbmi->tx_size = read_tx_size(cm, xd, !mbmi->skip || !inter_block, r);
if (inter_block)
- read_inter_block_mode_info(cm, xd, tile, mi, mi_row, mi_col, r);
+ read_inter_block_mode_info(pbi, xd, tile, mi, mi_row, mi_col, r);
else
read_intra_block_mode_info(cm, mi, r);
}
- void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd,
+ void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
const TileInfo *const tile,
int mi_row, int mi_col, vp9_reader *r) {
+ VP9_COMMON *const cm = &pbi->common;
+ MODE_INFO *const mi = xd->mi[0].src_mi;
+ const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
+ const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
+ const int x_mis = MIN(bw, cm->mi_cols - mi_col);
+ const int y_mis = MIN(bh, cm->mi_rows - mi_row);
+ MV_REF* frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
+ int w, h;
+
if (frame_is_intra_only(cm))
read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r);
else
- read_inter_frame_mode_info(cm, xd, tile, mi_row, mi_col, r);
+ read_inter_frame_mode_info(pbi, xd, tile, mi_row, mi_col, r);
+
+ for (h = 0; h < y_mis; ++h) {
+ MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
+ for (w = 0; w < x_mis; ++w) {
+ MV_REF *const mv = frame_mv + w;
+ mv->ref_frame[0] = mi->src_mi->mbmi.ref_frame[0];
+ mv->ref_frame[1] = mi->src_mi->mbmi.ref_frame[1];
+ mv->mv[0].as_int = mi->src_mi->mbmi.mv[0].as_int;
+ mv->mv[1].as_int = mi->src_mi->mbmi.mv[1].as_int;
+ }
+ }
}
#include "vp9/common/vp9_postproc.h"
#endif
#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_systemdependent.h"
+ #include "vp9/common/vp9_thread.h"
#include "vp9/decoder/vp9_decodeframe.h"
#include "vp9/decoder/vp9_decoder.h"
}
}
- VP9Decoder *vp9_decoder_create() {
+static void vp9_dec_setup_mi(VP9_COMMON *cm) {
+ cm->mi = cm->mip + cm->mi_stride + 1;
+ vpx_memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
+}
+
+static int vp9_dec_alloc_mi(VP9_COMMON *cm, int mi_size) {
+ cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip));
+ if (!cm->mip)
+ return 1;
+ cm->mi_alloc_size = mi_size;
+ return 0;
+}
+
+static void vp9_dec_free_mi(VP9_COMMON *cm) {
+ vpx_free(cm->mip);
+ cm->mip = NULL;
+}
+
- VP9Decoder *const pbi = vpx_memalign(32, sizeof(*pbi));
- VP9_COMMON *const cm = pbi ? &pbi->common : NULL;
+ VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
+ VP9Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi));
+ VP9_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
if (!cm)
return NULL;
cm->current_video_frame = 0;
pbi->ready_for_new_data = 1;
+ pbi->common.buffer_pool = pool;
+
+ cm->bit_depth = VPX_BITS_8;
+ cm->dequant_bit_depth = VPX_BITS_8;
+
+ cm->alloc_mi = vp9_dec_alloc_mi;
+ cm->free_mi = vp9_dec_free_mi;
+ cm->setup_mi = vp9_dec_setup_mi;
+
// vp9_init_dequantizer() is first called here. Add check in
// frame_init_dequantizer() to avoid unnecessary calling of
// vp9_init_dequantizer() for every frame.
}
void vp9_decoder_remove(VP9Decoder *pbi) {
-- VP9_COMMON *const cm = &pbi->common;
int i;
- vp9_remove_common(cm);
vp9_get_worker_interface()->end(&pbi->lf_worker);
vpx_free(pbi->lf_worker.data1);
vpx_free(pbi->tile_data);
for (i = 0; i < pbi->num_tile_workers; ++i) {
VP9Worker *const worker = &pbi->tile_workers[i];
vp9_get_worker_interface()->end(worker);
- vpx_free(worker->data1);
- vpx_free(worker->data2);
}
+ vpx_free(pbi->tile_worker_data);
+ vpx_free(pbi->tile_worker_info);
vpx_free(pbi->tile_workers);
- if (pbi->num_tile_workers) {
- const int sb_rows =
- mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
- vp9_loop_filter_dealloc(&pbi->lf_row_sync, sb_rows);
+ if (pbi->num_tile_workers > 0) {
+ vp9_loop_filter_dealloc(&pbi->lf_row_sync);
}
- vp9_remove_common(cm);
vpx_free(pbi);
}
int vp9_receive_compressed_data(VP9Decoder *pbi,
size_t size, const uint8_t **psource) {
- VP9_COMMON *const cm = &pbi->common;
+ VP9_COMMON *volatile const cm = &pbi->common;
+ BufferPool *const pool = cm->buffer_pool;
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
const uint8_t *source = *psource;
int retcode = 0;
+
cm->error.error_code = VPX_CODEC_OK;
if (size == 0) {
pbi->ready_for_new_data = 0;
// Check if the previous frame was a frame without any references to it.
- if (cm->new_fb_idx >= 0 && cm->frame_bufs[cm->new_fb_idx].ref_count == 0)
- cm->release_fb_cb(cm->cb_priv,
- &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer);
+ // Release frame buffer if not decoding in frame parallel mode.
+ if (!pbi->frame_parallel_decode && cm->new_fb_idx >= 0
+ && frame_bufs[cm->new_fb_idx].ref_count == 0)
+ pool->release_fb_cb(pool->cb_priv,
+ &frame_bufs[cm->new_fb_idx].raw_frame_buffer);
cm->new_fb_idx = get_free_fb(cm);
- cm->cur_frame = &cm->frame_bufs[cm->new_fb_idx];
+ // Assign a MV array to the frame buffer.
++ cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
++
+ pbi->hold_ref_buf = 0;
+ if (pbi->frame_parallel_decode) {
+ VP9Worker *const worker = pbi->frame_worker_owner;
+ vp9_frameworker_lock_stats(worker);
+ frame_bufs[cm->new_fb_idx].frame_worker_owner = worker;
+ // Reset decoding progress.
+ pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
+ pbi->cur_buf->row = -1;
+ pbi->cur_buf->col = -1;
+ vp9_frameworker_unlock_stats(worker);
+ } else {
+ pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
+ }
+
+
if (setjmp(cm->error.jmp)) {
- pbi->need_resync = 1;
+ const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
++ VP9_COMMON *const cm = &pbi->common;
+ int i;
+
cm->error.setjmp = 0;
+ pbi->ready_for_new_data = 1;
- vp9_clear_system_state();
-
- if (cm->new_fb_idx > 0 && cm->frame_bufs[cm->new_fb_idx].ref_count > 0)
- cm->frame_bufs[cm->new_fb_idx].ref_count--;
+ // Synchronize all threads immediately as a subsequent decode call may
+ // cause a resize invalidating some allocations.
+ winterface->sync(&pbi->lf_worker);
+ for (i = 0; i < pbi->num_tile_workers; ++i) {
+ winterface->sync(&pbi->tile_workers[i]);
+ }
+
- VP9_COMMON *const cm = &pbi->common;
+ lock_buffer_pool(pool);
+ // Release all the reference buffers if worker thread is holding them.
+ if (pbi->hold_ref_buf == 1) {
+ int ref_index = 0, mask;
+ BufferPool *const pool = cm->buffer_pool;
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+ for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
+ const int old_idx = cm->ref_frame_map[ref_index];
+ // Current thread releases the holding of reference frame.
+ decrease_ref_count(old_idx, frame_bufs, pool);
+
+ // Release the reference frame in reference map.
+ if ((mask & 1) && old_idx >= 0) {
+ decrease_ref_count(old_idx, frame_bufs, pool);
+ }
+ ++ref_index;
+ }
+
+ // Current thread releases the holding of reference frame.
+ for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) {
+ const int old_idx = cm->ref_frame_map[ref_index];
+ decrease_ref_count(old_idx, frame_bufs, pool);
+ }
+ pbi->hold_ref_buf = 0;
+ }
+ // Release current frame.
+ decrease_ref_count(cm->new_fb_idx, frame_bufs, pool);
+ unlock_buffer_pool(pool);
++ vp9_clear_system_state();
return -1;
}
vp9_clear_system_state();
- if (!cm->show_existing_frame)
+ cm->last_width = cm->width;
+ cm->last_height = cm->height;
+
+ if (!cm->show_existing_frame) {
cm->last_show_frame = cm->show_frame;
+ cm->prev_frame = cm->cur_frame;
+ }
- if (cm->show_frame)
- cm->current_video_frame++;
+ // Update progress in frame parallel decode.
+ if (pbi->frame_parallel_decode) {
+ // Need to lock the mutex here as another thread may
+ // be accessing this buffer.
+ VP9Worker *const worker = pbi->frame_worker_owner;
+ FrameWorkerData *const frame_worker_data = worker->data1;
+ vp9_frameworker_lock_stats(worker);
+
+ if (cm->show_frame) {
- if (!cm->show_existing_frame)
- vp9_swap_mi_and_prev_mi(cm);
+ cm->current_video_frame++;
+ }
+ vp9_swap_current_and_last_seg_map(cm);
+ frame_worker_data->frame_decoded = 1;
+ frame_worker_data->frame_context_ready = 1;
+ vp9_frameworker_signal_stats(worker);
+ vp9_frameworker_unlock_stats(worker);
+ } else {
+ cm->last_width = cm->width;
+ cm->last_height = cm->height;
+ if (cm->show_frame) {
- if (!cm->show_existing_frame)
- vp9_swap_mi_and_prev_mi(cm);
+ cm->current_video_frame++;
+ }
+
+ vp9_swap_current_and_last_seg_map(cm);
+ }
cm->error.setjmp = 0;
return retcode;
pbi->ready_for_new_data = 1;
/* no raw frame to show!!! */
- if (pbi->common.show_frame == 0)
+ if (!cm->show_frame)
return ret;
++ pbi->ready_for_new_data = 1;
++
#if CONFIG_VP9_POSTPROC
- ret = vp9_post_proc_frame(&pbi->common, sd, flags);
+ if (!cm->show_existing_frame) {
+ ret = vp9_post_proc_frame(cm, sd, flags);
+ } else {
+ *sd = *cm->frame_to_show;
+ ret = 0;
+ }
#else
- *sd = *pbi->common.frame_to_show;
+ *sd = *cm->frame_to_show;
ret = 0;
#endif /*!CONFIG_POSTPROC*/
vp9_clear_system_state();
#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/common/vp9_ppflags.h"
#include "vp9/common/vp9_thread.h"
-
-#include "vp9/decoder/vp9_decoder.h"
+ #include "vp9/decoder/vp9_dthread.h"
+#include "vp9/decoder/vp9_reader.h"
#ifdef __cplusplus
extern "C" {
DECLARE_ALIGNED(16, MACROBLOCKD, xd);
} TileData;
- VP9_COMMON *cm;
+typedef struct TileWorkerData {
++ struct VP9Decoder *pbi;
+ vp9_reader bit_reader;
+ DECLARE_ALIGNED(16, MACROBLOCKD, xd);
+ struct vpx_internal_error_info error_info;
+} TileWorkerData;
+
typedef struct VP9Decoder {
DECLARE_ALIGNED(16, MACROBLOCKD, mb);
int frame_parallel_decode; // frame-based threading.
+ // TODO(hkuang): Combine this with cur_buf in macroblockd as they are
+ // the same.
+ RefCntBuffer *cur_buf; // Current decoding frame buffer.
+ RefCntBuffer *prev_buf; // Previous decoding frame buffer.
+
+ VP9Worker *frame_worker_owner; // frame_worker that owns this pbi.
VP9Worker lf_worker;
VP9Worker *tile_workers;
+ TileWorkerData *tile_worker_data;
+ TileInfo *tile_worker_info;
int num_tile_workers;
TileData *tile_data;
VP9_REFFRAME ref_frame_flag,
YV12_BUFFER_CONFIG *sd);
- struct VP9Decoder *vp9_decoder_create();
-
- void vp9_decoder_remove(struct VP9Decoder *pbi);
-
-int vp9_get_reference_dec(struct VP9Decoder *pbi,
- int index, YV12_BUFFER_CONFIG **fb);
+static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state,
+ const uint8_t *data) {
+ if (decrypt_cb) {
+ uint8_t marker;
+ decrypt_cb(decrypt_state, data, &marker, 1);
+ return marker;
+ }
+ return *data;
+}
+
+// This function is exposed for use in tests, as well as the inlined function
+// "read_marker".
+vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data,
+ size_t data_sz,
+ uint32_t sizes[8], int *count,
+ vpx_decrypt_cb decrypt_cb,
+ void *decrypt_state);
+ struct VP9Decoder *vp9_decoder_create(BufferPool *const pool);
+
+ void vp9_decoder_remove(struct VP9Decoder *pbi);
+
+ static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs,
+ BufferPool *const pool) {
+ if (idx >= 0) {
+ --frame_bufs[idx].ref_count;
+ // A worker may only get a free framebuffer index when calling get_free_fb.
+ // But the private buffer is not set up until finish decoding header.
+ // So any error happens during decoding header, the frame_bufs will not
+ // have valid priv buffer.
+ if (frame_bufs[idx].ref_count == 0 &&
+ frame_bufs[idx].raw_frame_buffer.priv) {
+ pool->release_fb_cb(pool->cb_priv, &frame_bufs[idx].raw_frame_buffer);
+ }
+ }
+ }
+
#ifdef __cplusplus
} // extern "C"
#endif
--- /dev/null
-
+ /*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+ #include "./vpx_config.h"
-
+ #include "vpx_mem/vpx_mem.h"
-
+ #include "vp9/common/vp9_reconinter.h"
-// #define DEBUG_THREAD
-
-#if CONFIG_MULTITHREAD
-static INLINE void mutex_lock(pthread_mutex_t *const mutex) {
- const int kMaxTryLocks = 4000;
- int locked = 0;
- int i;
-
- for (i = 0; i < kMaxTryLocks; ++i) {
- if (!pthread_mutex_trylock(mutex)) {
- locked = 1;
- break;
- }
- }
-
- if (!locked)
- pthread_mutex_lock(mutex);
-}
-#endif // CONFIG_MULTITHREAD
-
-static INLINE void sync_read(VP9LfSync *const lf_sync, int r, int c) {
-#if CONFIG_MULTITHREAD
- const int nsync = lf_sync->sync_range;
-
- if (r && !(c & (nsync - 1))) {
- pthread_mutex_t *const mutex = &lf_sync->mutex_[r - 1];
- mutex_lock(mutex);
-
- while (c > lf_sync->cur_sb_col[r - 1] - nsync) {
- pthread_cond_wait(&lf_sync->cond_[r - 1], mutex);
- }
- pthread_mutex_unlock(mutex);
- }
-#else
- (void)lf_sync;
- (void)r;
- (void)c;
-#endif // CONFIG_MULTITHREAD
-}
-
-static INLINE void sync_write(VP9LfSync *const lf_sync, int r, int c,
- const int sb_cols) {
-#if CONFIG_MULTITHREAD
- const int nsync = lf_sync->sync_range;
- int cur;
- // Only signal when there are enough filtered SB for next row to run.
- int sig = 1;
-
- if (c < sb_cols - 1) {
- cur = c;
- if (c % nsync)
- sig = 0;
- } else {
- cur = sb_cols + nsync;
- }
-
- if (sig) {
- mutex_lock(&lf_sync->mutex_[r]);
-
- lf_sync->cur_sb_col[r] = cur;
-
- pthread_cond_signal(&lf_sync->cond_[r]);
- pthread_mutex_unlock(&lf_sync->mutex_[r]);
- }
-#else
- (void)lf_sync;
- (void)r;
- (void)c;
- (void)sb_cols;
-#endif // CONFIG_MULTITHREAD
-}
-
-// Implement row loopfiltering for each thread.
-static void loop_filter_rows_mt(const YV12_BUFFER_CONFIG *const frame_buffer,
- VP9_COMMON *const cm,
- struct macroblockd_plane planes[MAX_MB_PLANE],
- int start, int stop, int y_only,
- VP9LfSync *const lf_sync, int num_lf_workers) {
- const int num_planes = y_only ? 1 : MAX_MB_PLANE;
- int r, c; // SB row and col
- const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2;
-
- for (r = start; r < stop; r += num_lf_workers) {
- const int mi_row = r << MI_BLOCK_SIZE_LOG2;
- MODE_INFO **const mi = cm->mi_grid_visible + mi_row * cm->mi_stride;
-
- for (c = 0; c < sb_cols; ++c) {
- const int mi_col = c << MI_BLOCK_SIZE_LOG2;
- LOOP_FILTER_MASK lfm;
- int plane;
-
- sync_read(lf_sync, r, c);
-
- vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
- vp9_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
-
- for (plane = 0; plane < num_planes; ++plane) {
- vp9_filter_block_plane(cm, &planes[plane], mi_row, &lfm);
- }
-
- sync_write(lf_sync, r, c, sb_cols);
- }
- }
-}
-
-// Row-based multi-threaded loopfilter hook
-static int loop_filter_row_worker(void *arg1, void *arg2) {
- TileWorkerData *const tile_data = (TileWorkerData*)arg1;
- LFWorkerData *const lf_data = &tile_data->lfdata;
- (void) arg2;
- loop_filter_rows_mt(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
- lf_data->start, lf_data->stop, lf_data->y_only,
- lf_data->lf_sync, lf_data->num_lf_workers);
- return 1;
-}
-
-// VP9 decoder: Implement multi-threaded loopfilter that uses the tile
-// threads.
-void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame,
- VP9Decoder *pbi, VP9_COMMON *cm,
- int frame_filter_level,
- int y_only) {
- VP9LfSync *const lf_sync = &pbi->lf_row_sync;
- const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
- // Number of superblock rows and cols
- const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
- const int tile_cols = 1 << cm->log2_tile_cols;
- const int num_workers = MIN(pbi->max_threads & ~1, tile_cols);
- int i;
-
- // Allocate memory used in thread synchronization.
- // This always needs to be done even if frame_filter_level is 0.
- if (!cm->current_video_frame || cm->last_height != cm->height) {
- if (cm->last_height != cm->height) {
- const int aligned_last_height =
- ALIGN_POWER_OF_TWO(cm->last_height, MI_SIZE_LOG2);
- const int last_sb_rows =
- mi_cols_aligned_to_sb(aligned_last_height >> MI_SIZE_LOG2) >>
- MI_BLOCK_SIZE_LOG2;
-
- vp9_loop_filter_dealloc(lf_sync, last_sb_rows);
- }
-
- vp9_loop_filter_alloc(cm, lf_sync, sb_rows, cm->width);
- }
-
- if (!frame_filter_level) return;
-
- vp9_loop_filter_frame_init(cm, frame_filter_level);
-
- // Initialize cur_sb_col to -1 for all SB rows.
- vpx_memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows);
-
- // Set up loopfilter thread data.
- // The decoder is using num_workers instead of pbi->num_tile_workers
- // because it has been observed that using more threads on the
- // loopfilter, than there are tile columns in the frame will hurt
- // performance on Android. This is because the system will only
- // schedule the tile decode workers on cores equal to the number
- // of tile columns. Then if the decoder tries to use more threads for the
- // loopfilter, it will hurt performance because of contention. If the
- // multithreading code changes in the future then the number of workers
- // used by the loopfilter should be revisited.
- for (i = 0; i < num_workers; ++i) {
- VP9Worker *const worker = &pbi->tile_workers[i];
- TileWorkerData *const tile_data = (TileWorkerData*)worker->data1;
- LFWorkerData *const lf_data = &tile_data->lfdata;
-
- worker->hook = (VP9WorkerHook)loop_filter_row_worker;
-
- // Loopfilter data
- lf_data->frame_buffer = frame;
- lf_data->cm = cm;
- vp9_copy(lf_data->planes, pbi->mb.plane);
- lf_data->start = i;
- lf_data->stop = sb_rows;
- lf_data->y_only = y_only; // always do all planes in decoder
-
- lf_data->lf_sync = lf_sync;
- lf_data->num_lf_workers = num_workers;
-
- // Start loopfiltering
- if (i == num_workers - 1) {
- winterface->execute(worker);
- } else {
- winterface->launch(worker);
- }
- }
-
- // Wait till all rows are finished
- for (i = 0; i < num_workers; ++i) {
- winterface->sync(&pbi->tile_workers[i]);
- }
-}
-
-// Set up nsync by width.
-static int get_sync_range(int width) {
- // nsync numbers are picked by testing. For example, for 4k
- // video, using 4 gives best performance.
- if (width < 640)
- return 1;
- else if (width <= 1280)
- return 2;
- else if (width <= 4096)
- return 4;
- else
- return 8;
-}
-
-// Allocate memory for lf row synchronization
-void vp9_loop_filter_alloc(VP9_COMMON *cm, VP9LfSync *lf_sync, int rows,
- int width) {
-#if CONFIG_MULTITHREAD
- int i;
-
- CHECK_MEM_ERROR(cm, lf_sync->mutex_,
- vpx_malloc(sizeof(*lf_sync->mutex_) * rows));
- for (i = 0; i < rows; ++i) {
- pthread_mutex_init(&lf_sync->mutex_[i], NULL);
- }
-
- CHECK_MEM_ERROR(cm, lf_sync->cond_,
- vpx_malloc(sizeof(*lf_sync->cond_) * rows));
- for (i = 0; i < rows; ++i) {
- pthread_cond_init(&lf_sync->cond_[i], NULL);
- }
-#endif // CONFIG_MULTITHREAD
-
- CHECK_MEM_ERROR(cm, lf_sync->cur_sb_col,
- vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows));
-
- // Set up nsync.
- lf_sync->sync_range = get_sync_range(width);
-}
-
-// Deallocate lf synchronization related mutex and data
-void vp9_loop_filter_dealloc(VP9LfSync *lf_sync, int rows) {
-#if !CONFIG_MULTITHREAD
- (void)rows;
-#endif // !CONFIG_MULTITHREAD
-
- if (lf_sync != NULL) {
-#if CONFIG_MULTITHREAD
- int i;
-
- if (lf_sync->mutex_ != NULL) {
- for (i = 0; i < rows; ++i) {
- pthread_mutex_destroy(&lf_sync->mutex_[i]);
- }
- vpx_free(lf_sync->mutex_);
- }
- if (lf_sync->cond_ != NULL) {
- for (i = 0; i < rows; ++i) {
- pthread_cond_destroy(&lf_sync->cond_[i]);
- }
- vpx_free(lf_sync->cond_);
- }
-#endif // CONFIG_MULTITHREAD
- vpx_free(lf_sync->cur_sb_col);
- // clear the structure as the source of this call may be a resize in which
- // case this call will be followed by an _alloc() which may fail.
- vp9_zero(*lf_sync);
- }
-}
-
+ #include "vp9/decoder/vp9_dthread.h"
+ #include "vp9/decoder/vp9_decoder.h"
+
- dst_cm->prev_mi_grid_base = src_cm->prev_mi_grid_base;
- dst_cm->prev_mi_grid_visible = src_cm->prev_mi_grid_visible;
+ // TODO(hkuang): Clean up all the #ifdef in this file.
+ void vp9_frameworker_lock_stats(VP9Worker *const worker) {
+ #if CONFIG_MULTITHREAD
+ FrameWorkerData *const worker_data = worker->data1;
+ pthread_mutex_lock(&worker_data->stats_mutex);
+ #else
+ (void)worker;
+ #endif
+ }
+
+ void vp9_frameworker_unlock_stats(VP9Worker *const worker) {
+ #if CONFIG_MULTITHREAD
+ FrameWorkerData *const worker_data = worker->data1;
+ pthread_mutex_unlock(&worker_data->stats_mutex);
+ #else
+ (void)worker;
+ #endif
+ }
+
+ void vp9_frameworker_signal_stats(VP9Worker *const worker) {
+ #if CONFIG_MULTITHREAD
+ FrameWorkerData *const worker_data = worker->data1;
+ // TODO(hkuang): Investigate using broadcast or signal.
+ pthread_cond_signal(&worker_data->stats_cond);
+ #else
+ (void)worker;
+ #endif
+ }
+
+ // TODO(hkuang): Remove worker parameter as it is only used in debug code.
+ void vp9_frameworker_wait(VP9Worker *const worker, RefCntBuffer *const ref_buf,
+ int row) {
+ #if CONFIG_MULTITHREAD
+ if (!ref_buf)
+ return;
+
+ // Enabling the following line of code will get harmless tsan error but
+ // will get best performance.
+ // if (ref_buf->row >= row && ref_buf->buf.corrupted != 1) return;
+
+ {
+ // Find the worker thread that owns the reference frame. If the reference
+ // frame has been fully decoded, it may not have owner.
+ VP9Worker *const ref_worker = ref_buf->frame_worker_owner;
+ FrameWorkerData *const ref_worker_data =
+ (FrameWorkerData *)ref_worker->data1;
+ const VP9Decoder *const pbi = ref_worker_data->pbi;
+
+ #ifdef DEBUG_THREAD
+ {
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ printf("%d %p worker is waiting for %d %p worker (%d) ref %d \r\n",
+ worker_data->worker_id, worker, ref_worker_data->worker_id,
+ ref_buf->frame_worker_owner, row, ref_buf->row);
+ }
+ #endif
+
+ vp9_frameworker_lock_stats(ref_worker);
+ while (ref_buf->row < row && pbi->cur_buf == ref_buf &&
+ ref_buf->buf.corrupted != 1) {
+ pthread_cond_wait(&ref_worker_data->stats_cond,
+ &ref_worker_data->stats_mutex);
+ }
+
+ if (ref_buf->buf.corrupted == 1) {
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ vp9_frameworker_unlock_stats(ref_worker);
+ vpx_internal_error(&worker_data->pbi->common.error,
+ VPX_CODEC_CORRUPT_FRAME,
+ "Worker %p failed to decode frame", worker);
+ }
+ vp9_frameworker_unlock_stats(ref_worker);
+ }
+ #else
++ (void)worker;
+ (void)ref_buf;
+ (void)row;
+ (void)ref_buf;
+ #endif // CONFIG_MULTITHREAD
+ }
+
+ void vp9_frameworker_broadcast(RefCntBuffer *const buf, int row) {
+ #if CONFIG_MULTITHREAD
+ VP9Worker *worker = buf->frame_worker_owner;
+
+ #ifdef DEBUG_THREAD
+ {
+ FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
+ printf("%d %p worker decode to (%d) \r\n", worker_data->worker_id,
+ buf->frame_worker_owner, row);
+ }
+ #endif
+
+ vp9_frameworker_lock_stats(worker);
+ buf->row = row;
+ vp9_frameworker_signal_stats(worker);
+ vp9_frameworker_unlock_stats(worker);
+ #else
+ (void)buf;
+ (void)row;
+ #endif // CONFIG_MULTITHREAD
+ }
+
+ void vp9_frameworker_copy_context(VP9Worker *const dst_worker,
+ VP9Worker *const src_worker) {
+ #if CONFIG_MULTITHREAD
+ FrameWorkerData *const src_worker_data = (FrameWorkerData *)src_worker->data1;
+ FrameWorkerData *const dst_worker_data = (FrameWorkerData *)dst_worker->data1;
+ VP9_COMMON *const src_cm = &src_worker_data->pbi->common;
+ VP9_COMMON *const dst_cm = &dst_worker_data->pbi->common;
+ int i;
+
+ // Wait until source frame's context is ready.
+ vp9_frameworker_lock_stats(src_worker);
+ while (!src_worker_data->frame_context_ready) {
+ pthread_cond_wait(&src_worker_data->stats_cond,
+ &src_worker_data->stats_mutex);
+ }
+
+ // src worker may have already finished decoding a frame and swapped the mi.
+ // TODO(hkuang): Remove following code after implenment no ModeInfo decoding.
+ if (src_worker_data->frame_decoded) {
+ dst_cm->prev_mip = src_cm->prev_mip;
+ dst_cm->prev_mi = src_cm->prev_mi;
- dst_cm->prev_mi_grid_base = src_cm->mi_grid_base;
- dst_cm->prev_mi_grid_visible = src_cm->mi_grid_visible;
+ dst_cm->last_frame_seg_map = src_cm->last_frame_seg_map;
+ } else {
+ dst_cm->prev_mip = src_cm->mip;
+ dst_cm->prev_mi = src_cm->mi;
+ dst_cm->last_frame_seg_map = src_cm->current_frame_seg_map;
+ }
+ dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync;
+ vp9_frameworker_unlock_stats(src_worker);
+
+ dst_worker_data->pbi->prev_buf =
+ src_worker_data->pbi->common.show_existing_frame ?
+ NULL : src_worker_data->pbi->cur_buf;
+
+ dst_cm->last_width = !src_cm->show_existing_frame ?
+ src_cm->width : src_cm->last_width;
+ dst_cm->last_height = !src_cm->show_existing_frame ?
+ src_cm->height : src_cm->last_height;
+ dst_cm->display_width = src_cm->display_width;
+ dst_cm->display_height = src_cm->display_height;
+ dst_cm->subsampling_x = src_cm->subsampling_x;
+ dst_cm->subsampling_y = src_cm->subsampling_y;
+ dst_cm->last_show_frame = !src_cm->show_existing_frame ?
+ src_cm->show_frame : src_cm->last_show_frame;
+ dst_cm->last_frame_type = src_cm->last_frame_type;
+ dst_cm->frame_type = src_cm->frame_type;
+ dst_cm->y_dc_delta_q = src_cm->y_dc_delta_q;
+ dst_cm->uv_dc_delta_q = src_cm->uv_dc_delta_q;
+ dst_cm->uv_ac_delta_q = src_cm->uv_ac_delta_q;
+ dst_cm->base_qindex = src_cm->base_qindex;
+
+ for (i = 0; i < REF_FRAMES; ++i)
+ dst_cm->ref_frame_map[i] = src_cm->next_ref_frame_map[i];
+
+ memcpy(dst_cm->lf_info.lfthr, src_cm->lf_info.lfthr,
+ (MAX_LOOP_FILTER + 1) * sizeof(loop_filter_thresh));
+ dst_cm->lf.last_sharpness_level = src_cm->lf.sharpness_level;
+ dst_cm->lf.filter_level = src_cm->lf.filter_level;
+ memcpy(dst_cm->lf.ref_deltas, src_cm->lf.ref_deltas, MAX_REF_LF_DELTAS);
+ memcpy(dst_cm->lf.mode_deltas, src_cm->lf.mode_deltas, MAX_MODE_LF_DELTAS);
+ dst_cm->seg = src_cm->seg;
+ memcpy(dst_cm->frame_contexts, src_cm->frame_contexts,
+ FRAME_CONTEXTS * sizeof(dst_cm->frame_contexts[0]));
+ #else
+ (void) dst_worker;
+ (void) src_worker;
+ #endif // CONFIG_MULTITHREAD
+ }
--- /dev/null
-#include "vp9/decoder/vp9_reader.h"
+ /*
+ * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+ #ifndef VP9_DECODER_VP9_DTHREAD_H_
+ #define VP9_DECODER_VP9_DTHREAD_H_
+
+ #include "./vpx_config.h"
+ #include "vp9/common/vp9_thread.h"
-typedef struct TileWorkerData {
- struct VP9Decoder *pbi;
- vp9_reader bit_reader;
- DECLARE_ALIGNED(16, struct macroblockd, xd);
-
- // Row-based parallel loopfilter data
- LFWorkerData lfdata;
-} TileWorkerData;
-
-// Loopfilter row synchronization
-typedef struct VP9LfSyncData {
-#if CONFIG_MULTITHREAD
- pthread_mutex_t *mutex_;
- pthread_cond_t *cond_;
-#endif
- // Allocate memory to store the loop-filtered superblock index in each row.
- int *cur_sb_col;
- // The optimal sync_range for different resolution and platform should be
- // determined by testing. Currently, it is chosen to be a power-of-2 number.
- int sync_range;
-} VP9LfSync;
-
++#include "vpx/internal/vpx_codec_internal.h"
+
+ struct VP9Common;
+ struct VP9Decoder;
+
-// Allocate memory for loopfilter row synchronization.
-void vp9_loop_filter_alloc(struct VP9Common *cm, VP9LfSync *lf_sync,
- int rows, int width);
-
-// Deallocate loopfilter synchronization related mutex and data.
-void vp9_loop_filter_dealloc(VP9LfSync *lf_sync, int rows);
-
-// Multi-threaded loopfilter that uses the tile threads.
-void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame,
- struct VP9Decoder *pbi,
- struct VP9Common *cm,
- int frame_filter_level,
- int y_only);
-
+ // WorkerData for the FrameWorker thread. It contains all the information of
+ // the worker and decode structures for decoding a frame.
+ typedef struct FrameWorkerData {
+ struct VP9Decoder *pbi;
+ const uint8_t *data;
+ const uint8_t *data_end;
+ size_t data_size;
+ void *user_priv;
+ int result;
+ int worker_id;
+
+ // scratch_buffer is used in frame parallel mode only.
+ // It is used to make a copy of the compressed data.
+ uint8_t *scratch_buffer;
+ size_t scratch_buffer_size;
+
+ #if CONFIG_MULTITHREAD
+ pthread_mutex_t stats_mutex;
+ pthread_cond_t stats_cond;
+ #endif
+
+ int frame_context_ready; // Current frame's context is ready to read.
+ int frame_decoded; // Finished decoding current frame.
+ } FrameWorkerData;
+
+ void vp9_frameworker_lock_stats(VP9Worker *const worker);
+ void vp9_frameworker_unlock_stats(VP9Worker *const worker);
+ void vp9_frameworker_signal_stats(VP9Worker *const worker);
+
+ // Wait until ref_buf has been decoded to row in real pixel unit.
+ // Note: worker may already finish decoding ref_buf and release it in order to
+ // start decoding next frame. So need to check whether worker is still decoding
+ // ref_buf.
+ void vp9_frameworker_wait(VP9Worker *const worker, RefCntBuffer *const ref_buf,
+ int row);
+
+ // FrameWorker broadcasts its decoding progress so other workers that are
+ // waiting on it can resume decoding.
+ void vp9_frameworker_broadcast(RefCntBuffer *const buf, int row);
+
+ // Copy necessary decoding context from src worker to dst worker.
+ void vp9_frameworker_copy_context(VP9Worker *const dst_worker,
+ VP9Worker *const src_worker);
+
+ #endif // VP9_DECODER_VP9_DTHREAD_H_
// Delete sementation map
vpx_free(cpi->segmentation_map);
cpi->segmentation_map = NULL;
-- vpx_free(cm->last_frame_seg_map);
-- cm->last_frame_seg_map = NULL;
vpx_free(cpi->coding_context.last_frame_seg_map_copy);
cpi->coding_context.last_frame_seg_map_copy = NULL;
}
- VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf) {
+ VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
+ BufferPool *const pool) {
- unsigned int i, j;
- VP9_COMP *const cpi = vpx_memalign(32, sizeof(VP9_COMP));
- VP9_COMMON *const cm = cpi != NULL ? &cpi->common : NULL;
+ unsigned int i;
+ VP9_COMP *volatile const cpi = vpx_memalign(32, sizeof(VP9_COMP));
+ VP9_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
if (!cm)
return NULL;
}
cm->error.setjmp = 1;
+ cm->alloc_mi = vp9_enc_alloc_mi;
+ cm->free_mi = vp9_enc_free_mi;
+ cm->setup_mi = vp9_enc_setup_mi;
- vp9_rtcd();
+ CHECK_MEM_ERROR(cm, cm->fc,
+ (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc)));
+ CHECK_MEM_ERROR(cm, cm->frame_contexts,
+ (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS,
+ sizeof(*cm->frame_contexts)));
cpi->use_svc = 0;
+ cpi->common.buffer_pool = pool;
init_config(cpi, oxcf);
- vp9_rc_init(&cpi->oxcf, cpi->pass, &cpi->rc);
+ vp9_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
cm->current_video_frame = 0;
-
- // Set reference frame sign bias for ALTREF frame to 1 (for now)
- cm->ref_frame_sign_bias[ALTREF_FRAME] = 1;
-
- cpi->gold_is_last = 0;
- cpi->alt_is_last = 0;
- cpi->gold_is_alt = 0;
-
- cpi->skippable_frame = 0;
+ cpi->partition_search_skippable_frame = 0;
+ cpi->tile_data = NULL;
// Create the encoder segmentation map and set all entries to 0
CHECK_MEM_ERROR(cm, cpi->segmentation_map,
void vp9_update_reference_frames(VP9_COMP *cpi) {
VP9_COMMON * const cm = &cpi->common;
- RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
++ BufferPool *const pool = cm->buffer_pool;
// At this point the new frame has been encoded.
// If any buffer copy / swapping is signaled it should be done here.
if (cm->frame_type == KEY_FRAME) {
- ref_cnt_fb(cm->frame_bufs,
- ref_cnt_fb(frame_bufs, &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
- ref_cnt_fb(frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
- } else if (!cpi->multi_arf_allowed && cpi->refresh_golden_frame &&
- cpi->rc.is_src_frame_alt_ref && !cpi->use_svc) {
- /* Preserve the previously existing golden frame and update the frame in
- * the alt ref slot instead. This is highly specific to the current use of
- * alt-ref as a forward reference, and this needs to be generalized as
- * other uses are implemented (like RTC/temporal scaling)
- *
- * The update to the buffer in the alt ref slot was signaled in
- * vp9_pack_bitstream(), now swap the buffer pointers so that it's treated
- * as the golden frame next time.
- */
++ ref_cnt_fb(pool->frame_bufs,
+ &cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
- ref_cnt_fb(cm->frame_bufs,
++ ref_cnt_fb(pool->frame_bufs,
+ &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
+ } else if (vp9_preserve_existing_gf(cpi)) {
+ // We have decided to preserve the previously existing golden frame as our
+ // new ARF frame. However, in the short term in function
+ // vp9_bitstream.c::get_refresh_mask() we left it in the GF slot and, if
+ // we're updating the GF with the current decoded frame, we save it to the
+ // ARF slot instead.
+ // We now have to update the ARF with the current frame and swap gld_fb_idx
+ // and alt_fb_idx so that, overall, we've stored the old GF in the new ARF
+ // slot and, if we're updating the GF, the current frame becomes the new GF.
int tmp;
- ref_cnt_fb(cm->frame_bufs,
- ref_cnt_fb(frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
++ ref_cnt_fb(pool->frame_bufs,
+ &cm->ref_frame_map[cpi->alt_fb_idx], cm->new_fb_idx);
tmp = cpi->alt_fb_idx;
cpi->alt_fb_idx = cpi->gld_fb_idx;
arf_idx = gf_group->arf_update_idx[gf_group->index];
}
- ref_cnt_fb(cm->frame_bufs,
- ref_cnt_fb(frame_bufs, &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
++ ref_cnt_fb(pool->frame_bufs,
+ &cm->ref_frame_map[arf_idx], cm->new_fb_idx);
+ vpx_memcpy(cpi->interp_filter_selected[ALTREF_FRAME],
+ cpi->interp_filter_selected[0],
+ sizeof(cpi->interp_filter_selected[0]));
}
if (cpi->refresh_golden_frame) {
- ref_cnt_fb(cm->frame_bufs,
- ref_cnt_fb(frame_bufs,
++ ref_cnt_fb(pool->frame_bufs,
&cm->ref_frame_map[cpi->gld_fb_idx], cm->new_fb_idx);
+ if (!cpi->rc.is_src_frame_alt_ref)
+ vpx_memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
+ cpi->interp_filter_selected[0],
+ sizeof(cpi->interp_filter_selected[0]));
+ else
+ vpx_memcpy(cpi->interp_filter_selected[GOLDEN_FRAME],
+ cpi->interp_filter_selected[ALTREF_FRAME],
+ sizeof(cpi->interp_filter_selected[ALTREF_FRAME]));
}
}
if (cpi->refresh_last_frame) {
- ref_cnt_fb(cm->frame_bufs,
- ref_cnt_fb(frame_bufs, &cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx);
- }
-#if CONFIG_DENOISING
- vp9_denoiser_update_frame_info(&cpi->denoiser,
- *cpi->Source,
- cpi->common.frame_type,
- cpi->refresh_alt_ref_frame,
- cpi->refresh_golden_frame,
- cpi->refresh_last_frame);
++ ref_cnt_fb(pool->frame_bufs,
+ &cm->ref_frame_map[cpi->lst_fb_idx], cm->new_fb_idx);
+ if (!cpi->rc.is_src_frame_alt_ref)
+ vpx_memcpy(cpi->interp_filter_selected[LAST_FRAME],
+ cpi->interp_filter_selected[0],
+ sizeof(cpi->interp_filter_selected[0]));
+ }
+#if CONFIG_VP9_TEMPORAL_DENOISING
+ if (cpi->oxcf.noise_sensitivity > 0) {
+ vp9_denoiser_update_frame_info(&cpi->denoiser,
+ *cpi->Source,
+ cpi->common.frame_type,
+ cpi->refresh_alt_ref_frame,
+ cpi->refresh_golden_frame,
+ cpi->refresh_last_frame);
+ }
#endif
}
VP9_COMMON *cm = &cpi->common;
MV_REFERENCE_FRAME ref_frame;
const VP9_REFFRAME ref_mask[3] = {VP9_LAST_FLAG, VP9_GOLD_FLAG, VP9_ALT_FLAG};
- RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
- const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)];
- const YV12_BUFFER_CONFIG *const ref = &frame_bufs[idx].buf;
-
// Need to convert from VP9_REFFRAME to index into ref_mask (subtract 1).
- if ((cpi->ref_frame_flags & ref_mask[ref_frame - 1]) &&
- (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height)) {
- const int new_fb = get_free_fb(cm);
- vp9_realloc_frame_buffer(&frame_bufs[new_fb].buf,
- cm->width, cm->height,
- cm->subsampling_x, cm->subsampling_y,
- VP9_ENC_BORDER_IN_PIXELS, NULL, NULL, NULL);
- scale_and_extend_frame(ref, &frame_bufs[new_fb].buf);
- cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
+ if (cpi->ref_frame_flags & ref_mask[ref_frame - 1]) {
+ const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)];
- const YV12_BUFFER_CONFIG *const ref = &cm->frame_bufs[idx].buf;
++ BufferPool *const pool = cm->buffer_pool;
++ const YV12_BUFFER_CONFIG *const ref = &pool->frame_bufs[idx].buf;
+
+#if CONFIG_VP9_HIGHBITDEPTH
+ if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
+ const int new_fb = get_free_fb(cm);
- cm->cur_frame = &cm->frame_bufs[new_fb];
- vp9_realloc_frame_buffer(&cm->frame_bufs[new_fb].buf,
++ cm->cur_frame = &pool->frame_bufs[new_fb];
++ vp9_realloc_frame_buffer(&pool->frame_bufs[new_fb].buf,
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ cm->use_highbitdepth,
+ VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+ NULL, NULL, NULL);
- scale_and_extend_frame(ref, &cm->frame_bufs[new_fb].buf,
++ scale_and_extend_frame(ref, &pool->frame_bufs[new_fb].buf,
+ (int)cm->bit_depth);
+#else
+ if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
+ const int new_fb = get_free_fb(cm);
- vp9_realloc_frame_buffer(&cm->frame_bufs[new_fb].buf,
++ vp9_realloc_frame_buffer(&pool->frame_bufs[new_fb].buf,
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+ VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+ NULL, NULL, NULL);
- scale_and_extend_frame(ref, &cm->frame_bufs[new_fb].buf);
++ scale_and_extend_frame(ref, &pool->frame_bufs[new_fb].buf);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
- if (cm->frame_bufs[new_fb].mvs == NULL ||
- cm->frame_bufs[new_fb].mi_rows < cm->mi_rows ||
- cm->frame_bufs[new_fb].mi_cols < cm->mi_cols) {
- vpx_free(cm->frame_bufs[new_fb].mvs);
- cm->frame_bufs[new_fb].mvs =
++ if (pool->frame_bufs[new_fb].mvs == NULL ||
++ pool->frame_bufs[new_fb].mi_rows < cm->mi_rows ||
++ pool->frame_bufs[new_fb].mi_cols < cm->mi_cols) {
++ vpx_free(pool->frame_bufs[new_fb].mvs);
++ pool->frame_bufs[new_fb].mvs =
+ (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
- sizeof(*cm->frame_bufs[new_fb].mvs));
- cm->frame_bufs[new_fb].mi_rows = cm->mi_rows;
- cm->frame_bufs[new_fb].mi_cols = cm->mi_cols;
++ sizeof(*pool->frame_bufs[new_fb].mvs));
++ pool->frame_bufs[new_fb].mi_rows = cm->mi_rows;
++ pool->frame_bufs[new_fb].mi_cols = cm->mi_cols;
+ }
+ } else {
+ cpi->scaled_ref_idx[ref_frame - 1] = idx;
- ++cm->frame_bufs[idx].ref_count;
++ ++pool->frame_bufs[idx].ref_count;
+ }
} else {
- cpi->scaled_ref_idx[ref_frame - 1] = idx;
- ++frame_bufs[idx].ref_count;
+ cpi->scaled_ref_idx[ref_frame - 1] = INVALID_REF_BUFFER_IDX;
}
}
}
static void release_scaled_references(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common;
- RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
int i;
-
- for (i = 0; i < 3; ++i)
- --frame_bufs[cpi->scaled_ref_idx[i]].ref_count;
+ for (i = 0; i < MAX_REF_FRAMES; ++i) {
+ const int idx = cpi->scaled_ref_idx[i];
- RefCntBuffer *const buf =
- idx != INVALID_REF_BUFFER_IDX ? &cm->frame_bufs[idx] : NULL;
++ RefCntBuffer *const buf = idx != INVALID_REF_BUFFER_IDX ?
++ &cm->buffer_pool->frame_bufs[idx] : NULL;
+ if (buf != NULL) {
+ --buf->ref_count;
+ cpi->scaled_ref_idx[i] = INVALID_REF_BUFFER_IDX;
+ }
+ }
}
static void full_to_model_count(unsigned int *model_count,
}
#endif
-static void encode_without_recode_loop(VP9_COMP *cpi,
- int q) {
+static void set_mv_search_params(VP9_COMP *cpi) {
+ const VP9_COMMON *const cm = &cpi->common;
+ const unsigned int max_mv_def = MIN(cm->width, cm->height);
+
+ // Default based on max resolution.
+ cpi->mv_step_param = vp9_init_search_range(max_mv_def);
+
+ if (cpi->sf.mv.auto_mv_step_size) {
+ if (frame_is_intra_only(cm)) {
+ // Initialize max_mv_magnitude for use in the first INTER frame
+ // after a key/intra-only frame.
+ cpi->max_mv_magnitude = max_mv_def;
+ } else {
+ if (cm->show_frame) {
+ // Allow mv_steps to correspond to twice the max mv magnitude found
+ // in the previous frame, capped by the default max_mv_magnitude based
+ // on resolution.
+ cpi->mv_step_param =
+ vp9_init_search_range(MIN(max_mv_def, 2 * cpi->max_mv_magnitude));
+ }
+ cpi->max_mv_magnitude = 0;
+ }
+ }
+}
+
+static void set_size_independent_vars(VP9_COMP *cpi) {
+ vp9_set_speed_features_framesize_independent(cpi);
+ vp9_set_rd_speed_thresholds(cpi);
+ vp9_set_rd_speed_thresholds_sub8x8(cpi);
+ cpi->common.interp_filter = cpi->sf.default_interp_filter;
+}
+
+static void set_size_dependent_vars(VP9_COMP *cpi, int *q,
+ int *bottom_index, int *top_index) {
VP9_COMMON *const cm = &cpi->common;
- YV12_BUFFER_CONFIG *const buf = &cm->frame_bufs[idx].buf;
+ const VP9EncoderConfig *const oxcf = &cpi->oxcf;
+
+ // Setup variables that depend on the dimensions of the frame.
+ vp9_set_speed_features_framesize_dependent(cpi);
+
+ // Decide q and q bounds.
+ *q = vp9_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
+
+ if (!frame_is_intra_only(cm)) {
+ vp9_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
+ }
+
+ // Configure experimental use of segmentation for enhanced coding of
+ // static regions if indicated.
+ // Only allowed in the second pass of a two pass encode, as it requires
+ // lagged coding, and if the relevant speed feature flag is set.
+ if (oxcf->pass == 2 && cpi->sf.static_segmentation)
+ configure_static_seg_features(cpi);
+
+#if CONFIG_VP9_POSTPROC
+ if (oxcf->noise_sensitivity > 0) {
+ int l = 0;
+ switch (oxcf->noise_sensitivity) {
+ case 1:
+ l = 20;
+ break;
+ case 2:
+ l = 40;
+ break;
+ case 3:
+ l = 60;
+ break;
+ case 4:
+ case 5:
+ l = 100;
+ break;
+ case 6:
+ l = 150;
+ break;
+ }
+ vp9_denoise(cpi->Source, cpi->Source, l);
+ }
+#endif // CONFIG_VP9_POSTPROC
+}
+
+static void init_motion_estimation(VP9_COMP *cpi) {
+ int y_stride = cpi->scaled_source.y_stride;
+
+ if (cpi->sf.mv.search_method == NSTEP) {
+ vp9_init3smotion_compensation(&cpi->ss_cfg, y_stride);
+ } else if (cpi->sf.mv.search_method == DIAMOND) {
+ vp9_init_dsmotion_compensation(&cpi->ss_cfg, y_stride);
+ }
+}
+
+void set_frame_size(VP9_COMP *cpi) {
+ int ref_frame;
+ VP9_COMMON *const cm = &cpi->common;
+ const VP9EncoderConfig *const oxcf = &cpi->oxcf;
+ MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
+
+ if (oxcf->pass == 2 &&
+ cm->current_video_frame == 0 &&
+ oxcf->resize_mode == RESIZE_FIXED &&
+ oxcf->rc_mode == VPX_VBR) {
+ // Internal scaling is triggered on the first frame.
+ vp9_set_size_literal(cpi, oxcf->scaled_frame_width,
+ oxcf->scaled_frame_height);
+ }
+
+ if ((oxcf->pass == 2) &&
+ (!cpi->use_svc ||
+ (is_two_pass_svc(cpi) &&
+ cpi->svc.encode_empty_frame_state != ENCODING))) {
+ vp9_set_target_rate(cpi);
+ }
+
+ // Reset the frame pointers to the current frame size.
+ vp9_realloc_frame_buffer(get_frame_new_buffer(cm),
+ cm->width, cm->height,
+ cm->subsampling_x, cm->subsampling_y,
+#if CONFIG_VP9_HIGHBITDEPTH
+ cm->use_highbitdepth,
+#endif
+ VP9_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
+ NULL, NULL, NULL);
+
+ alloc_util_frame_buffers(cpi);
+ init_motion_estimation(cpi);
+
+ for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+ const int idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)];
++ YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[idx].buf;
+ RefBuffer *const ref_buf = &cm->frame_refs[ref_frame - 1];
+ ref_buf->buf = buf;
+ ref_buf->idx = idx;
+#if CONFIG_VP9_HIGHBITDEPTH
+ vp9_setup_scale_factors_for_frame(&ref_buf->sf,
+ buf->y_crop_width, buf->y_crop_height,
+ cm->width, cm->height,
+ (buf->flags & YV12_FLAG_HIGHBITDEPTH) ?
+ 1 : 0);
+#else
+ vp9_setup_scale_factors_for_frame(&ref_buf->sf,
+ buf->y_crop_width, buf->y_crop_height,
+ cm->width, cm->height);
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ if (vp9_is_scaled(&ref_buf->sf))
+ vp9_extend_frame_borders(buf);
+ }
+
+ set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME);
+}
+
+static void encode_without_recode_loop(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ int q, bottom_index, top_index; // Dummy variables.
+
vp9_clear_system_state();
+
+ set_frame_size(cpi);
+
+ cpi->Source = vp9_scale_if_required(cm, cpi->un_scaled_source,
+ &cpi->scaled_source);
+
+ if (cpi->unscaled_last_source != NULL)
+ cpi->Last_Source = vp9_scale_if_required(cm, cpi->unscaled_last_source,
+ &cpi->scaled_last_source);
+
+ if (frame_is_intra_only(cm) == 0) {
+ vp9_scale_references(cpi);
+ }
+
+ set_size_independent_vars(cpi);
+ set_size_dependent_vars(cpi, &q, &bottom_index, &top_index);
+
vp9_set_quantizer(cm, q);
setup_frame(cpi);
// Variance adaptive and in frame q adjustment experiments are mutually
int vp9_get_compressed_data(VP9_COMP *cpi, unsigned int *frame_flags,
size_t *size, uint8_t *dest,
int64_t *time_stamp, int64_t *time_end, int flush) {
+ const VP9EncoderConfig *const oxcf = &cpi->oxcf;
VP9_COMMON *const cm = &cpi->common;
- MACROBLOCKD *const xd = &cpi->mb.e_mbd;
++ BufferPool *const pool = cm->buffer_pool;
RATE_CONTROL *const rc = &cpi->rc;
struct vpx_usec_timer cmptimer;
YV12_BUFFER_CONFIG *force_src_buffer = NULL;
vp9_restore_layer_context(cpi);
}
- // start with a 0 size frame
- *size = 0;
-
- // Clear down mmx registers
- vp9_clear_system_state();
-
- /* find a free buffer for the new frame, releasing the reference previously
- * held.
- */
- --frame_bufs[cm->new_fb_idx].ref_count;
+ // Find a free buffer for the new frame, releasing the reference previously
+ // held.
- cm->frame_bufs[cm->new_fb_idx].ref_count--;
++ pool->frame_bufs[cm->new_fb_idx].ref_count--;
cm->new_fb_idx = get_free_fb(cm);
- cm->cur_frame = &cm->frame_bufs[cm->new_fb_idx];
++ cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
if (!cpi->use_svc && cpi->multi_arf_allowed) {
if (cm->frame_type == KEY_FRAME) {
int multi_arf_allowed;
int multi_arf_enabled;
+ int multi_arf_last_grp_enabled;
-#if CONFIG_DENOISING
+#if CONFIG_VP9_TEMPORAL_DENOISING
VP9_DENOISER denoiser;
#endif
+
+ // Multi-threading
+ int num_workers;
+ VP9Worker *workers;
+ struct EncWorkerData *tile_thr_data;
+ VP9LfSync lf_row_sync;
} VP9_COMP;
-void vp9_initialize_enc();
+void vp9_initialize_enc(void);
- struct VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf);
+ struct VP9_COMP *vp9_create_compressor(VP9EncoderConfig *oxcf,
+ BufferPool *const pool);
void vp9_remove_compressor(VP9_COMP *cpi);
void vp9_change_config(VP9_COMP *cpi, const VP9EncoderConfig *oxcf);
// Use either last frame or alt frame for motion search.
if (cpi->ref_frame_flags & VP9_LAST_FLAG) {
- scaled_ref_buf = vp9_get_scaled_ref_frame(cpi, LAST_FRAME);
- ref_frame = LAST_FRAME;
- } else if (cpi->ref_frame_flags & VP9_ALT_FLAG) {
- scaled_ref_buf = vp9_get_scaled_ref_frame(cpi, ALTREF_FRAME);
- ref_frame = ALTREF_FRAME;
+ first_ref_buf = vp9_get_scaled_ref_frame(cpi, LAST_FRAME);
+ if (first_ref_buf == NULL)
+ first_ref_buf = get_ref_frame_buffer(cpi, LAST_FRAME);
}
- if (scaled_ref_buf != NULL) {
- // Update the stride since we are using scaled reference buffer
- first_ref_buf = scaled_ref_buf;
- recon_y_stride = first_ref_buf->y_stride;
- recon_uv_stride = first_ref_buf->uv_stride;
- uv_mb_height = 16 >> (first_ref_buf->y_height > first_ref_buf->uv_height);
+ if (cpi->ref_frame_flags & VP9_GOLD_FLAG) {
++ BufferPool *const pool = cm->buffer_pool;
+ const int ref_idx =
+ cm->ref_frame_map[get_ref_frame_idx(cpi, GOLDEN_FRAME)];
+ const int scaled_idx = cpi->scaled_ref_idx[GOLDEN_FRAME - 1];
+
- gld_yv12 = (scaled_idx != ref_idx) ? &cm->frame_bufs[scaled_idx].buf :
++ gld_yv12 = (scaled_idx != ref_idx) ? &pool->frame_bufs[scaled_idx].buf :
+ get_ref_frame_buffer(cpi, GOLDEN_FRAME);
+ } else {
+ gld_yv12 = NULL;
}
- // Disable golden frame for svc first pass for now.
- gld_yv12 = NULL;
- set_ref_ptrs(cm, xd, ref_frame, NONE);
+ recon_y_stride = new_yv12->y_stride;
+ recon_uv_stride = new_yv12->uv_stride;
+ uv_mb_height = 16 >> (new_yv12->y_height > new_yv12->uv_height);
+
+ set_ref_ptrs(cm, xd,
+ (cpi->ref_frame_flags & VP9_LAST_FLAG) ? LAST_FRAME: NONE,
+ (cpi->ref_frame_flags & VP9_GOLD_FLAG) ? GOLDEN_FRAME : NONE);
cpi->Source = vp9_scale_if_required(cm, cpi->un_scaled_source,
&cpi->scaled_source);
vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
sf, sf);
- if (cm->coding_use_prev_mi)
- vp9_find_mv_refs(cm, xd, tile, xd->mi[0], ref_frame,
+ if (cm->use_prev_frame_mvs)
+ vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0].src_mi, ref_frame,
- candidates, mi_row, mi_col);
+ candidates, mi_row, mi_col, NULL, NULL);
else
- const_motion[ref_frame] = mv_refs_rt(cm, xd, tile, xd->mi[0],
+ const_motion[ref_frame] = mv_refs_rt(cm, xd, tile_info,
+ xd->mi[0].src_mi,
ref_frame, candidates,
mi_row, mi_col);
}
}
-#if CONFIG_DENOISING
- vp9_denoiser_denoise(&cpi->denoiser, x, mi_row, mi_col, bsize);
-#endif
+ *rd_cost = best_rdc;
+}
- return INT64_MAX;
+void vp9_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
+ TileDataEnc *tile_data,
+ int mi_row, int mi_col, RD_COST *rd_cost,
+ BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
+ VP9_COMMON *const cm = &cpi->common;
+ TileInfo *const tile_info = &tile_data->tile_info;
+ SPEED_FEATURES *const sf = &cpi->sf;
+ MACROBLOCKD *const xd = &x->e_mbd;
+ MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ const struct segmentation *const seg = &cm->seg;
+ MV_REFERENCE_FRAME ref_frame, second_ref_frame = NONE;
+ MV_REFERENCE_FRAME best_ref_frame = NONE;
+ unsigned char segment_id = mbmi->segment_id;
+ struct buf_2d yv12_mb[4][MAX_MB_PLANE];
+ static const int flag_list[4] = { 0, VP9_LAST_FLAG, VP9_GOLD_FLAG,
+ VP9_ALT_FLAG };
+ int64_t best_rd = INT64_MAX;
+ b_mode_info bsi[MAX_REF_FRAMES][4];
+ int ref_frame_skip_mask = 0;
+ const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
+ const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
+ int idx, idy;
+
+ x->skip_encode = sf->skip_encode_frame && x->q_index < QIDX_SKIP_THRESH;
+ ctx->pred_pixel_ready = 0;
+
+ for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
+ int_mv dummy_mv[2];
+ x->pred_mv_sad[ref_frame] = INT_MAX;
+
+ if (cpi->ref_frame_flags & flag_list[ref_frame]) {
+ const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
+ int_mv *const candidates = mbmi->ref_mvs[ref_frame];
+ const struct scale_factors *const sf =
+ &cm->frame_refs[ref_frame - 1].sf;
+ vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
+ sf, sf);
+ vp9_find_mv_refs(cm, xd, tile_info, xd->mi[0].src_mi, ref_frame,
- candidates, mi_row, mi_col);
++ candidates, mi_row, mi_col, NULL, NULL);
+
+ vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv, candidates,
+ &dummy_mv[0], &dummy_mv[1]);
+ } else {
+ ref_frame_skip_mask |= (1 << ref_frame);
+ }
+ }
+
+ mbmi->sb_type = bsize;
+ mbmi->tx_size = TX_4X4;
+ mbmi->uv_mode = DC_PRED;
+ mbmi->ref_frame[0] = LAST_FRAME;
+ mbmi->ref_frame[1] = NONE;
+ mbmi->interp_filter = cm->interp_filter == SWITCHABLE ? EIGHTTAP
+ : cm->interp_filter;
+
+ for (ref_frame = LAST_FRAME; ref_frame <= GOLDEN_FRAME; ++ref_frame) {
+ int64_t this_rd = 0;
+ int plane;
+
+ if (ref_frame_skip_mask & (1 << ref_frame))
+ continue;
+
+ // TODO(jingning, agrange): Scaling reference frame not supported for
+ // sub8x8 blocks. Is this supported now?
+ if (ref_frame > INTRA_FRAME &&
+ vp9_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
+ continue;
+
+ // If the segment reference frame feature is enabled....
+ // then do nothing if the current ref frame is not allowed..
+ if (vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME) &&
+ vp9_get_segdata(seg, segment_id, SEG_LVL_REF_FRAME) != (int)ref_frame)
+ continue;
+
+ mbmi->ref_frame[0] = ref_frame;
+ x->skip = 0;
+ set_ref_ptrs(cm, xd, ref_frame, second_ref_frame);
+
+ // Select prediction reference frames.
+ for (plane = 0; plane < MAX_MB_PLANE; plane++)
+ xd->plane[plane].pre[0] = yv12_mb[ref_frame][plane];
+
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+ int_mv b_mv[MB_MODE_COUNT];
+ int64_t b_best_rd = INT64_MAX;
+ const int i = idy * 2 + idx;
+ PREDICTION_MODE this_mode;
+ int b_rate = 0;
+ int64_t b_dist = 0;
+ RD_COST this_rdc;
+ unsigned int var_y, sse_y;
+
+ struct macroblock_plane *p = &x->plane[0];
+ struct macroblockd_plane *pd = &xd->plane[0];
+
+ const struct buf_2d orig_src = p->src;
+ const struct buf_2d orig_dst = pd->dst;
+ struct buf_2d orig_pre[2];
+ vpx_memcpy(orig_pre, xd->plane[0].pre, sizeof(orig_pre));
+
+ // set buffer pointers for sub8x8 motion search.
+ p->src.buf =
+ &p->src.buf[vp9_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+ pd->dst.buf =
+ &pd->dst.buf[vp9_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
+ pd->pre[0].buf =
+ &pd->pre[0].buf[vp9_raster_block_offset(BLOCK_8X8,
+ i, pd->pre[0].stride)];
+
+ b_mv[ZEROMV].as_int = 0;
+ b_mv[NEWMV].as_int = INVALID_MV;
+ vp9_append_sub8x8_mvs_for_idx(cm, xd, tile_info, i, 0, mi_row, mi_col,
+ &b_mv[NEARESTMV],
+ &b_mv[NEARMV]);
+
+ for (this_mode = NEARESTMV; this_mode <= NEWMV; ++this_mode) {
+ xd->mi[0].bmi[i].as_mv[0].as_int = b_mv[this_mode].as_int;
+
+ if (this_mode == NEWMV) {
+ const int step_param = cpi->sf.mv.fullpel_search_step_param;
+ MV mvp_full;
+ MV tmp_mv;
+ int cost_list[5];
+ const int tmp_col_min = x->mv_col_min;
+ const int tmp_col_max = x->mv_col_max;
+ const int tmp_row_min = x->mv_row_min;
+ const int tmp_row_max = x->mv_row_max;
+ int dummy_dist;
+
+ if (i == 0) {
+ mvp_full.row = b_mv[NEARESTMV].as_mv.row >> 3;
+ mvp_full.col = b_mv[NEARESTMV].as_mv.col >> 3;
+ } else {
+ mvp_full.row = xd->mi[0].bmi[0].as_mv[0].as_mv.row >> 3;
+ mvp_full.col = xd->mi[0].bmi[0].as_mv[0].as_mv.col >> 3;
+ }
+
+ vp9_set_mv_search_range(x, &mbmi->ref_mvs[0]->as_mv);
+
+ vp9_full_pixel_search(
+ cpi, x, bsize, &mvp_full, step_param, x->sadperbit4,
+ cond_cost_list(cpi, cost_list),
+ &mbmi->ref_mvs[ref_frame][0].as_mv, &tmp_mv,
+ INT_MAX, 0);
+
+ x->mv_col_min = tmp_col_min;
+ x->mv_col_max = tmp_col_max;
+ x->mv_row_min = tmp_row_min;
+ x->mv_row_max = tmp_row_max;
+
+ // calculate the bit cost on motion vector
+ mvp_full.row = tmp_mv.row * 8;
+ mvp_full.col = tmp_mv.col * 8;
+
+ b_rate += vp9_mv_bit_cost(&mvp_full,
+ &mbmi->ref_mvs[ref_frame][0].as_mv,
+ x->nmvjointcost, x->mvcost,
+ MV_COST_WEIGHT);
+
+ b_rate += cpi->inter_mode_cost[mbmi->mode_context[ref_frame]]
+ [INTER_OFFSET(NEWMV)];
+ if (RDCOST(x->rdmult, x->rddiv, b_rate, 0) > b_best_rd)
+ continue;
+
+ cpi->find_fractional_mv_step(x, &tmp_mv,
+ &mbmi->ref_mvs[ref_frame][0].as_mv,
+ cpi->common.allow_high_precision_mv,
+ x->errorperbit,
+ &cpi->fn_ptr[bsize],
+ cpi->sf.mv.subpel_force_stop,
+ cpi->sf.mv.subpel_iters_per_step,
+ cond_cost_list(cpi, cost_list),
+ x->nmvjointcost, x->mvcost,
+ &dummy_dist,
+ &x->pred_sse[ref_frame], NULL, 0, 0);
+
+ xd->mi[0].bmi[i].as_mv[0].as_mv = tmp_mv;
+ }
+
+ vp9_build_inter_predictor(pd->pre[0].buf, pd->pre[0].stride,
+ pd->dst.buf, pd->dst.stride,
+ &xd->mi[0].bmi[i].as_mv[0].as_mv,
+ &xd->block_refs[0]->sf,
+ 4 * num_4x4_blocks_wide,
+ 4 * num_4x4_blocks_high, 0,
+ vp9_get_interp_kernel(mbmi->interp_filter),
+ MV_PRECISION_Q3,
+ mi_col * MI_SIZE + 4 * (i & 0x01),
+ mi_row * MI_SIZE + 4 * (i >> 1));
+ model_rd_for_sb_y(cpi, bsize, x, xd, &this_rdc.rate, &this_rdc.dist,
+ &var_y, &sse_y);
+
+ this_rdc.rate += b_rate;
+ this_rdc.dist += b_dist;
+ this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
+ this_rdc.rate, this_rdc.dist);
+ if (this_rdc.rdcost < b_best_rd) {
+ b_best_rd = this_rdc.rdcost;
+ bsi[ref_frame][i].as_mode = this_mode;
+ bsi[ref_frame][i].as_mv[0].as_mv = xd->mi[0].bmi[i].as_mv[0].as_mv;
+ }
+ } // mode search
+
+ // restore source and prediction buffer pointers.
+ p->src = orig_src;
+ pd->pre[0] = orig_pre[0];
+ pd->dst = orig_dst;
+ this_rd += b_best_rd;
+
+ xd->mi[0].bmi[i] = bsi[ref_frame][i];
+ if (num_4x4_blocks_wide > 1)
+ xd->mi[0].bmi[i + 1] = xd->mi[0].bmi[i];
+ if (num_4x4_blocks_high > 1)
+ xd->mi[0].bmi[i + 2] = xd->mi[0].bmi[i];
+ }
+ } // loop through sub8x8 blocks
+
+ if (this_rd < best_rd) {
+ best_rd = this_rd;
+ best_ref_frame = ref_frame;
+ }
+ } // reference frames
+
+ mbmi->tx_size = TX_4X4;
+ mbmi->ref_frame[0] = best_ref_frame;
+ for (idy = 0; idy < 2; idy += num_4x4_blocks_high) {
+ for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) {
+ const int block = idy * 2 + idx;
+ xd->mi[0].bmi[block] = bsi[best_ref_frame][block];
+ if (num_4x4_blocks_wide > 1)
+ xd->mi[0].bmi[block + 1] = bsi[best_ref_frame][block];
+ if (num_4x4_blocks_high > 1)
+ xd->mi[0].bmi[block + 2] = bsi[best_ref_frame][block];
+ }
+ }
+ mbmi->mode = xd->mi[0].bmi[3].as_mode;
+ ctx->mic = *(xd->mi[0].src_mi);
+ ctx->skip_txfm[0] = 0;
+ ctx->skip = 0;
+ // Dummy assignment for speed -5. No effect in speed -6.
+ rd_cost->rdcost = best_rd;
}
--- /dev/null
- return (scaled_idx != ref_idx) ? &cm->frame_bufs[scaled_idx].buf : NULL;
+/*
+ * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <math.h>
+#include <stdio.h>
+
+#include "./vp9_rtcd.h"
+
+#include "vpx_mem/vpx_mem.h"
+
+#include "vp9/common/vp9_common.h"
+#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_entropymode.h"
+#include "vp9/common/vp9_mvref_common.h"
+#include "vp9/common/vp9_pred_common.h"
+#include "vp9/common/vp9_quant_common.h"
+#include "vp9/common/vp9_reconinter.h"
+#include "vp9/common/vp9_reconintra.h"
+#include "vp9/common/vp9_seg_common.h"
+#include "vp9/common/vp9_systemdependent.h"
+
+#include "vp9/encoder/vp9_cost.h"
+#include "vp9/encoder/vp9_encodemb.h"
+#include "vp9/encoder/vp9_encodemv.h"
+#include "vp9/encoder/vp9_encoder.h"
+#include "vp9/encoder/vp9_mcomp.h"
+#include "vp9/encoder/vp9_quantize.h"
+#include "vp9/encoder/vp9_ratectrl.h"
+#include "vp9/encoder/vp9_rd.h"
+#include "vp9/encoder/vp9_tokenize.h"
+#include "vp9/encoder/vp9_variance.h"
+
+#define RD_THRESH_POW 1.25
+#define RD_MULT_EPB_RATIO 64
+
+// Factor to weigh the rate for switchable interp filters.
+#define SWITCHABLE_INTERP_RATE_FACTOR 1
+
+void vp9_rd_cost_reset(RD_COST *rd_cost) {
+ rd_cost->rate = INT_MAX;
+ rd_cost->dist = INT64_MAX;
+ rd_cost->rdcost = INT64_MAX;
+}
+
+void vp9_rd_cost_init(RD_COST *rd_cost) {
+ rd_cost->rate = 0;
+ rd_cost->dist = 0;
+ rd_cost->rdcost = 0;
+}
+
+// The baseline rd thresholds for breaking out of the rd loop for
+// certain modes are assumed to be based on 8x8 blocks.
+// This table is used to correct for block size.
+// The factors here are << 2 (2 = x0.5, 32 = x8 etc).
+static const uint8_t rd_thresh_block_size_factor[BLOCK_SIZES] = {
+ 2, 3, 3, 4, 6, 6, 8, 12, 12, 16, 24, 24, 32
+};
+
+static void fill_mode_costs(VP9_COMP *cpi) {
+ const FRAME_CONTEXT *const fc = cpi->common.fc;
+ int i, j;
+
+ for (i = 0; i < INTRA_MODES; ++i)
+ for (j = 0; j < INTRA_MODES; ++j)
+ vp9_cost_tokens(cpi->y_mode_costs[i][j], vp9_kf_y_mode_prob[i][j],
+ vp9_intra_mode_tree);
+
+ vp9_cost_tokens(cpi->mbmode_cost, fc->y_mode_prob[1], vp9_intra_mode_tree);
+ vp9_cost_tokens(cpi->intra_uv_mode_cost[KEY_FRAME],
+ vp9_kf_uv_mode_prob[TM_PRED], vp9_intra_mode_tree);
+ vp9_cost_tokens(cpi->intra_uv_mode_cost[INTER_FRAME],
+ fc->uv_mode_prob[TM_PRED], vp9_intra_mode_tree);
+
+ for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
+ vp9_cost_tokens(cpi->switchable_interp_costs[i],
+ fc->switchable_interp_prob[i], vp9_switchable_interp_tree);
+}
+
+static void fill_token_costs(vp9_coeff_cost *c,
+ vp9_coeff_probs_model (*p)[PLANE_TYPES]) {
+ int i, j, k, l;
+ TX_SIZE t;
+ for (t = TX_4X4; t <= TX_32X32; ++t)
+ for (i = 0; i < PLANE_TYPES; ++i)
+ for (j = 0; j < REF_TYPES; ++j)
+ for (k = 0; k < COEF_BANDS; ++k)
+ for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
+ vp9_prob probs[ENTROPY_NODES];
+ vp9_model_to_full_probs(p[t][i][j][k][l], probs);
+ vp9_cost_tokens((int *)c[t][i][j][k][0][l], probs,
+ vp9_coef_tree);
+ vp9_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
+ vp9_coef_tree);
+ assert(c[t][i][j][k][0][l][EOB_TOKEN] ==
+ c[t][i][j][k][1][l][EOB_TOKEN]);
+ }
+}
+
+// Values are now correlated to quantizer.
+static int sad_per_bit16lut_8[QINDEX_RANGE];
+static int sad_per_bit4lut_8[QINDEX_RANGE];
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static int sad_per_bit16lut_10[QINDEX_RANGE];
+static int sad_per_bit4lut_10[QINDEX_RANGE];
+static int sad_per_bit16lut_12[QINDEX_RANGE];
+static int sad_per_bit4lut_12[QINDEX_RANGE];
+#endif
+
+static void init_me_luts_bd(int *bit16lut, int *bit4lut, int range,
+ vpx_bit_depth_t bit_depth) {
+ int i;
+ // Initialize the sad lut tables using a formulaic calculation for now.
+ // This is to make it easier to resolve the impact of experimental changes
+ // to the quantizer tables.
+ for (i = 0; i < range; i++) {
+ const double q = vp9_convert_qindex_to_q(i, bit_depth);
+ bit16lut[i] = (int)(0.0418 * q + 2.4107);
+ bit4lut[i] = (int)(0.063 * q + 2.742);
+ }
+}
+
+void vp9_init_me_luts() {
+ init_me_luts_bd(sad_per_bit16lut_8, sad_per_bit4lut_8, QINDEX_RANGE,
+ VPX_BITS_8);
+#if CONFIG_VP9_HIGHBITDEPTH
+ init_me_luts_bd(sad_per_bit16lut_10, sad_per_bit4lut_10, QINDEX_RANGE,
+ VPX_BITS_10);
+ init_me_luts_bd(sad_per_bit16lut_12, sad_per_bit4lut_12, QINDEX_RANGE,
+ VPX_BITS_12);
+#endif
+}
+
+static const int rd_boost_factor[16] = {
+ 64, 32, 32, 32, 24, 16, 12, 12,
+ 8, 8, 4, 4, 2, 2, 1, 0
+};
+static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = {
+ 128, 144, 128, 128, 144
+};
+
+int vp9_compute_rd_mult(const VP9_COMP *cpi, int qindex) {
+ const int64_t q = vp9_dc_quant(qindex, 0, cpi->common.bit_depth);
+#if CONFIG_VP9_HIGHBITDEPTH
+ int64_t rdmult = 0;
+ switch (cpi->common.bit_depth) {
+ case VPX_BITS_8:
+ rdmult = 88 * q * q / 24;
+ break;
+ case VPX_BITS_10:
+ rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 4);
+ break;
+ case VPX_BITS_12:
+ rdmult = ROUND_POWER_OF_TWO(88 * q * q / 24, 8);
+ break;
+ default:
+ assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+ return -1;
+ }
+#else
+ int64_t rdmult = 88 * q * q / 24;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ if (cpi->oxcf.pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
+ const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
+ const FRAME_UPDATE_TYPE frame_type = gf_group->update_type[gf_group->index];
+ const int boost_index = MIN(15, (cpi->rc.gfu_boost / 100));
+
+ rdmult = (rdmult * rd_frame_type_factor[frame_type]) >> 7;
+ rdmult += ((rdmult * rd_boost_factor[boost_index]) >> 7);
+ }
+ return (int)rdmult;
+}
+
+static int compute_rd_thresh_factor(int qindex, vpx_bit_depth_t bit_depth) {
+ double q;
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth) {
+ case VPX_BITS_8:
+ q = vp9_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
+ break;
+ case VPX_BITS_10:
+ q = vp9_dc_quant(qindex, 0, VPX_BITS_10) / 16.0;
+ break;
+ case VPX_BITS_12:
+ q = vp9_dc_quant(qindex, 0, VPX_BITS_12) / 64.0;
+ break;
+ default:
+ assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+ return -1;
+ }
+#else
+ (void) bit_depth;
+ q = vp9_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+ // TODO(debargha): Adjust the function below.
+ return MAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8);
+}
+
+void vp9_initialize_me_consts(VP9_COMP *cpi, int qindex) {
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (cpi->common.bit_depth) {
+ case VPX_BITS_8:
+ cpi->td.mb.sadperbit16 = sad_per_bit16lut_8[qindex];
+ cpi->td.mb.sadperbit4 = sad_per_bit4lut_8[qindex];
+ break;
+ case VPX_BITS_10:
+ cpi->td.mb.sadperbit16 = sad_per_bit16lut_10[qindex];
+ cpi->td.mb.sadperbit4 = sad_per_bit4lut_10[qindex];
+ break;
+ case VPX_BITS_12:
+ cpi->td.mb.sadperbit16 = sad_per_bit16lut_12[qindex];
+ cpi->td.mb.sadperbit4 = sad_per_bit4lut_12[qindex];
+ break;
+ default:
+ assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+ }
+#else
+ cpi->td.mb.sadperbit16 = sad_per_bit16lut_8[qindex];
+ cpi->td.mb.sadperbit4 = sad_per_bit4lut_8[qindex];
+#endif // CONFIG_VP9_HIGHBITDEPTH
+}
+
+static void set_block_thresholds(const VP9_COMMON *cm, RD_OPT *rd) {
+ int i, bsize, segment_id;
+
+ for (segment_id = 0; segment_id < MAX_SEGMENTS; ++segment_id) {
+ const int qindex =
+ clamp(vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
+ cm->y_dc_delta_q, 0, MAXQ);
+ const int q = compute_rd_thresh_factor(qindex, cm->bit_depth);
+
+ for (bsize = 0; bsize < BLOCK_SIZES; ++bsize) {
+ // Threshold here seems unnecessarily harsh but fine given actual
+ // range of values used for cpi->sf.thresh_mult[].
+ const int t = q * rd_thresh_block_size_factor[bsize];
+ const int thresh_max = INT_MAX / t;
+
+ if (bsize >= BLOCK_8X8) {
+ for (i = 0; i < MAX_MODES; ++i)
+ rd->threshes[segment_id][bsize][i] =
+ rd->thresh_mult[i] < thresh_max
+ ? rd->thresh_mult[i] * t / 4
+ : INT_MAX;
+ } else {
+ for (i = 0; i < MAX_REFS; ++i)
+ rd->threshes[segment_id][bsize][i] =
+ rd->thresh_mult_sub8x8[i] < thresh_max
+ ? rd->thresh_mult_sub8x8[i] * t / 4
+ : INT_MAX;
+ }
+ }
+ }
+}
+
+void vp9_initialize_rd_consts(VP9_COMP *cpi) {
+ VP9_COMMON *const cm = &cpi->common;
+ MACROBLOCK *const x = &cpi->td.mb;
+ RD_OPT *const rd = &cpi->rd;
+ int i;
+
+ vp9_clear_system_state();
+
+ rd->RDDIV = RDDIV_BITS; // In bits (to multiply D by 128).
+ rd->RDMULT = vp9_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
+
+ x->errorperbit = rd->RDMULT / RD_MULT_EPB_RATIO;
+ x->errorperbit += (x->errorperbit == 0);
+
+ x->select_tx_size = (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
+ cm->frame_type != KEY_FRAME) ? 0 : 1;
+
+ set_block_thresholds(cm, rd);
+
+ if (!cpi->sf.use_nonrd_pick_mode || cm->frame_type == KEY_FRAME) {
+ fill_token_costs(x->token_costs, cm->fc->coef_probs);
+
+ for (i = 0; i < PARTITION_CONTEXTS; ++i)
+ vp9_cost_tokens(cpi->partition_cost[i], get_partition_probs(cm, i),
+ vp9_partition_tree);
+ }
+
+ if (!cpi->sf.use_nonrd_pick_mode || (cm->current_video_frame & 0x07) == 1 ||
+ cm->frame_type == KEY_FRAME) {
+ fill_mode_costs(cpi);
+
+ if (!frame_is_intra_only(cm)) {
+ vp9_build_nmv_cost_table(x->nmvjointcost,
+ cm->allow_high_precision_mv ? x->nmvcost_hp
+ : x->nmvcost,
+ &cm->fc->nmvc, cm->allow_high_precision_mv);
+
+ for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
+ vp9_cost_tokens((int *)cpi->inter_mode_cost[i],
+ cm->fc->inter_mode_probs[i], vp9_inter_mode_tree);
+ }
+ }
+}
+
+static void model_rd_norm(int xsq_q10, int *r_q10, int *d_q10) {
+ // NOTE: The tables below must be of the same size.
+
+ // The functions described below are sampled at the four most significant
+ // bits of x^2 + 8 / 256.
+
+ // Normalized rate:
+ // This table models the rate for a Laplacian source with given variance
+ // when quantized with a uniform quantizer with given stepsize. The
+ // closed form expression is:
+ // Rn(x) = H(sqrt(r)) + sqrt(r)*[1 + H(r)/(1 - r)],
+ // where r = exp(-sqrt(2) * x) and x = qpstep / sqrt(variance),
+ // and H(x) is the binary entropy function.
+ static const int rate_tab_q10[] = {
+ 65536, 6086, 5574, 5275, 5063, 4899, 4764, 4651,
+ 4553, 4389, 4255, 4142, 4044, 3958, 3881, 3811,
+ 3748, 3635, 3538, 3453, 3376, 3307, 3244, 3186,
+ 3133, 3037, 2952, 2877, 2809, 2747, 2690, 2638,
+ 2589, 2501, 2423, 2353, 2290, 2232, 2179, 2130,
+ 2084, 2001, 1928, 1862, 1802, 1748, 1698, 1651,
+ 1608, 1530, 1460, 1398, 1342, 1290, 1243, 1199,
+ 1159, 1086, 1021, 963, 911, 864, 821, 781,
+ 745, 680, 623, 574, 530, 490, 455, 424,
+ 395, 345, 304, 269, 239, 213, 190, 171,
+ 154, 126, 104, 87, 73, 61, 52, 44,
+ 38, 28, 21, 16, 12, 10, 8, 6,
+ 5, 3, 2, 1, 1, 1, 0, 0,
+ };
+ // Normalized distortion:
+ // This table models the normalized distortion for a Laplacian source
+ // with given variance when quantized with a uniform quantizer
+ // with given stepsize. The closed form expression is:
+ // Dn(x) = 1 - 1/sqrt(2) * x / sinh(x/sqrt(2))
+ // where x = qpstep / sqrt(variance).
+ // Note the actual distortion is Dn * variance.
+ static const int dist_tab_q10[] = {
+ 0, 0, 1, 1, 1, 2, 2, 2,
+ 3, 3, 4, 5, 5, 6, 7, 7,
+ 8, 9, 11, 12, 13, 15, 16, 17,
+ 18, 21, 24, 26, 29, 31, 34, 36,
+ 39, 44, 49, 54, 59, 64, 69, 73,
+ 78, 88, 97, 106, 115, 124, 133, 142,
+ 151, 167, 184, 200, 215, 231, 245, 260,
+ 274, 301, 327, 351, 375, 397, 418, 439,
+ 458, 495, 528, 559, 587, 613, 637, 659,
+ 680, 717, 749, 777, 801, 823, 842, 859,
+ 874, 899, 919, 936, 949, 960, 969, 977,
+ 983, 994, 1001, 1006, 1010, 1013, 1015, 1017,
+ 1018, 1020, 1022, 1022, 1023, 1023, 1023, 1024,
+ };
+ static const int xsq_iq_q10[] = {
+ 0, 4, 8, 12, 16, 20, 24, 28,
+ 32, 40, 48, 56, 64, 72, 80, 88,
+ 96, 112, 128, 144, 160, 176, 192, 208,
+ 224, 256, 288, 320, 352, 384, 416, 448,
+ 480, 544, 608, 672, 736, 800, 864, 928,
+ 992, 1120, 1248, 1376, 1504, 1632, 1760, 1888,
+ 2016, 2272, 2528, 2784, 3040, 3296, 3552, 3808,
+ 4064, 4576, 5088, 5600, 6112, 6624, 7136, 7648,
+ 8160, 9184, 10208, 11232, 12256, 13280, 14304, 15328,
+ 16352, 18400, 20448, 22496, 24544, 26592, 28640, 30688,
+ 32736, 36832, 40928, 45024, 49120, 53216, 57312, 61408,
+ 65504, 73696, 81888, 90080, 98272, 106464, 114656, 122848,
+ 131040, 147424, 163808, 180192, 196576, 212960, 229344, 245728,
+ };
+ const int tmp = (xsq_q10 >> 2) + 8;
+ const int k = get_msb(tmp) - 3;
+ const int xq = (k << 3) + ((tmp >> k) & 0x7);
+ const int one_q10 = 1 << 10;
+ const int a_q10 = ((xsq_q10 - xsq_iq_q10[xq]) << 10) >> (2 + k);
+ const int b_q10 = one_q10 - a_q10;
+ *r_q10 = (rate_tab_q10[xq] * b_q10 + rate_tab_q10[xq + 1] * a_q10) >> 10;
+ *d_q10 = (dist_tab_q10[xq] * b_q10 + dist_tab_q10[xq + 1] * a_q10) >> 10;
+}
+
+void vp9_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2,
+ unsigned int qstep, int *rate,
+ int64_t *dist) {
+ // This function models the rate and distortion for a Laplacian
+ // source with given variance when quantized with a uniform quantizer
+ // with given stepsize. The closed form expressions are in:
+ // Hang and Chen, "Source Model for transform video coder and its
+ // application - Part I: Fundamental Theory", IEEE Trans. Circ.
+ // Sys. for Video Tech., April 1997.
+ if (var == 0) {
+ *rate = 0;
+ *dist = 0;
+ } else {
+ int d_q10, r_q10;
+ static const uint32_t MAX_XSQ_Q10 = 245727;
+ const uint64_t xsq_q10_64 =
+ (((uint64_t)qstep * qstep << (n_log2 + 10)) + (var >> 1)) / var;
+ const int xsq_q10 = (int)MIN(xsq_q10_64, MAX_XSQ_Q10);
+ model_rd_norm(xsq_q10, &r_q10, &d_q10);
+ *rate = ((r_q10 << n_log2) + 2) >> 2;
+ *dist = (var * (int64_t)d_q10 + 512) >> 10;
+ }
+}
+
+void vp9_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
+ const struct macroblockd_plane *pd,
+ ENTROPY_CONTEXT t_above[16],
+ ENTROPY_CONTEXT t_left[16]) {
+ const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
+ const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
+ const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
+ const ENTROPY_CONTEXT *const above = pd->above_context;
+ const ENTROPY_CONTEXT *const left = pd->left_context;
+
+ int i;
+ switch (tx_size) {
+ case TX_4X4:
+ vpx_memcpy(t_above, above, sizeof(ENTROPY_CONTEXT) * num_4x4_w);
+ vpx_memcpy(t_left, left, sizeof(ENTROPY_CONTEXT) * num_4x4_h);
+ break;
+ case TX_8X8:
+ for (i = 0; i < num_4x4_w; i += 2)
+ t_above[i] = !!*(const uint16_t *)&above[i];
+ for (i = 0; i < num_4x4_h; i += 2)
+ t_left[i] = !!*(const uint16_t *)&left[i];
+ break;
+ case TX_16X16:
+ for (i = 0; i < num_4x4_w; i += 4)
+ t_above[i] = !!*(const uint32_t *)&above[i];
+ for (i = 0; i < num_4x4_h; i += 4)
+ t_left[i] = !!*(const uint32_t *)&left[i];
+ break;
+ case TX_32X32:
+ for (i = 0; i < num_4x4_w; i += 8)
+ t_above[i] = !!*(const uint64_t *)&above[i];
+ for (i = 0; i < num_4x4_h; i += 8)
+ t_left[i] = !!*(const uint64_t *)&left[i];
+ break;
+ default:
+ assert(0 && "Invalid transform size.");
+ break;
+ }
+}
+
+void vp9_mv_pred(VP9_COMP *cpi, MACROBLOCK *x,
+ uint8_t *ref_y_buffer, int ref_y_stride,
+ int ref_frame, BLOCK_SIZE block_size) {
+ MACROBLOCKD *xd = &x->e_mbd;
+ MB_MODE_INFO *mbmi = &xd->mi[0].src_mi->mbmi;
+ int i;
+ int zero_seen = 0;
+ int best_index = 0;
+ int best_sad = INT_MAX;
+ int this_sad = INT_MAX;
+ int max_mv = 0;
+ uint8_t *src_y_ptr = x->plane[0].src.buf;
+ uint8_t *ref_y_ptr;
+ const int num_mv_refs = MAX_MV_REF_CANDIDATES +
+ (cpi->sf.adaptive_motion_search &&
+ block_size < x->max_partition_size);
+
+ MV pred_mv[3];
+ pred_mv[0] = mbmi->ref_mvs[ref_frame][0].as_mv;
+ pred_mv[1] = mbmi->ref_mvs[ref_frame][1].as_mv;
+ pred_mv[2] = x->pred_mv[ref_frame];
+
+ // Get the sad for each candidate reference mv.
+ for (i = 0; i < num_mv_refs; ++i) {
+ const MV *this_mv = &pred_mv[i];
+
+ max_mv = MAX(max_mv, MAX(abs(this_mv->row), abs(this_mv->col)) >> 3);
+ if (is_zero_mv(this_mv) && zero_seen)
+ continue;
+
+ zero_seen |= is_zero_mv(this_mv);
+
+ ref_y_ptr =
+ &ref_y_buffer[ref_y_stride * (this_mv->row >> 3) + (this_mv->col >> 3)];
+
+ // Find sad for current vector.
+ this_sad = cpi->fn_ptr[block_size].sdf(src_y_ptr, x->plane[0].src.stride,
+ ref_y_ptr, ref_y_stride);
+
+ // Note if it is the best so far.
+ if (this_sad < best_sad) {
+ best_sad = this_sad;
+ best_index = i;
+ }
+ }
+
+ // Note the index of the mv that worked best in the reference list.
+ x->mv_best_ref_index[ref_frame] = best_index;
+ x->max_mv_context[ref_frame] = max_mv;
+ x->pred_mv_sad[ref_frame] = best_sad;
+}
+
+void vp9_setup_pred_block(const MACROBLOCKD *xd,
+ struct buf_2d dst[MAX_MB_PLANE],
+ const YV12_BUFFER_CONFIG *src,
+ int mi_row, int mi_col,
+ const struct scale_factors *scale,
+ const struct scale_factors *scale_uv) {
+ int i;
+
+ dst[0].buf = src->y_buffer;
+ dst[0].stride = src->y_stride;
+ dst[1].buf = src->u_buffer;
+ dst[2].buf = src->v_buffer;
+ dst[1].stride = dst[2].stride = src->uv_stride;
+
+ for (i = 0; i < MAX_MB_PLANE; ++i) {
+ setup_pred_plane(dst + i, dst[i].buf, dst[i].stride, mi_row, mi_col,
+ i ? scale_uv : scale,
+ xd->plane[i].subsampling_x, xd->plane[i].subsampling_y);
+ }
+}
+
+int vp9_raster_block_offset(BLOCK_SIZE plane_bsize,
+ int raster_block, int stride) {
+ const int bw = b_width_log2_lookup[plane_bsize];
+ const int y = 4 * (raster_block >> bw);
+ const int x = 4 * (raster_block & ((1 << bw) - 1));
+ return y * stride + x;
+}
+
+int16_t* vp9_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+ int raster_block, int16_t *base) {
+ const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
+ return base + vp9_raster_block_offset(plane_bsize, raster_block, stride);
+}
+
+const YV12_BUFFER_CONFIG *vp9_get_scaled_ref_frame(const VP9_COMP *cpi,
+ int ref_frame) {
+ const VP9_COMMON *const cm = &cpi->common;
+ const int ref_idx = cm->ref_frame_map[get_ref_frame_idx(cpi, ref_frame)];
+ const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1];
++ return (scaled_idx != ref_idx) ?
++ &cm->buffer_pool->frame_bufs[scaled_idx].buf : NULL;
+}
+
+int vp9_get_switchable_rate(const VP9_COMP *cpi, const MACROBLOCKD *const xd) {
+ const MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
+ const int ctx = vp9_get_pred_context_switchable_interp(xd);
+ return SWITCHABLE_INTERP_RATE_FACTOR *
+ cpi->switchable_interp_costs[ctx][mbmi->interp_filter];
+}
+
+void vp9_set_rd_speed_thresholds(VP9_COMP *cpi) {
+ int i;
+ RD_OPT *const rd = &cpi->rd;
+ SPEED_FEATURES *const sf = &cpi->sf;
+
+ // Set baseline threshold values.
+ for (i = 0; i < MAX_MODES; ++i)
+ rd->thresh_mult[i] = cpi->oxcf.mode == BEST ? -500 : 0;
+
+ if (sf->adaptive_rd_thresh) {
+ rd->thresh_mult[THR_NEARESTMV] = 300;
+ rd->thresh_mult[THR_NEARESTG] = 300;
+ rd->thresh_mult[THR_NEARESTA] = 300;
+ } else {
+ rd->thresh_mult[THR_NEARESTMV] = 0;
+ rd->thresh_mult[THR_NEARESTG] = 0;
+ rd->thresh_mult[THR_NEARESTA] = 0;
+ }
+
+ rd->thresh_mult[THR_DC] += 1000;
+
+ rd->thresh_mult[THR_NEWMV] += 1000;
+ rd->thresh_mult[THR_NEWA] += 1000;
+ rd->thresh_mult[THR_NEWG] += 1000;
+
+ // Adjust threshold only in real time mode, which only uses last
+ // reference frame.
+ rd->thresh_mult[THR_NEWMV] += sf->elevate_newmv_thresh;
+
+ rd->thresh_mult[THR_NEARMV] += 1000;
+ rd->thresh_mult[THR_NEARA] += 1000;
+ rd->thresh_mult[THR_COMP_NEARESTLA] += 1000;
+ rd->thresh_mult[THR_COMP_NEARESTGA] += 1000;
+
+ rd->thresh_mult[THR_TM] += 1000;
+
+ rd->thresh_mult[THR_COMP_NEARLA] += 1500;
+ rd->thresh_mult[THR_COMP_NEWLA] += 2000;
+ rd->thresh_mult[THR_NEARG] += 1000;
+ rd->thresh_mult[THR_COMP_NEARGA] += 1500;
+ rd->thresh_mult[THR_COMP_NEWGA] += 2000;
+
+ rd->thresh_mult[THR_ZEROMV] += 2000;
+ rd->thresh_mult[THR_ZEROG] += 2000;
+ rd->thresh_mult[THR_ZEROA] += 2000;
+ rd->thresh_mult[THR_COMP_ZEROLA] += 2500;
+ rd->thresh_mult[THR_COMP_ZEROGA] += 2500;
+
+ rd->thresh_mult[THR_H_PRED] += 2000;
+ rd->thresh_mult[THR_V_PRED] += 2000;
+ rd->thresh_mult[THR_D45_PRED ] += 2500;
+ rd->thresh_mult[THR_D135_PRED] += 2500;
+ rd->thresh_mult[THR_D117_PRED] += 2500;
+ rd->thresh_mult[THR_D153_PRED] += 2500;
+ rd->thresh_mult[THR_D207_PRED] += 2500;
+ rd->thresh_mult[THR_D63_PRED] += 2500;
+}
+
+void vp9_set_rd_speed_thresholds_sub8x8(VP9_COMP *cpi) {
+ static const int thresh_mult[2][MAX_REFS] =
+ {{2500, 2500, 2500, 4500, 4500, 2500},
+ {2000, 2000, 2000, 4000, 4000, 2000}};
+ RD_OPT *const rd = &cpi->rd;
+ const int idx = cpi->oxcf.mode == BEST;
+ vpx_memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx],
+ sizeof(thresh_mult[idx]));
+}
+
+void vp9_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh,
+ int bsize, int best_mode_index) {
+ if (rd_thresh > 0) {
+ const int top_mode = bsize < BLOCK_8X8 ? MAX_REFS : MAX_MODES;
+ int mode;
+ for (mode = 0; mode < top_mode; ++mode) {
+ const BLOCK_SIZE min_size = MAX(bsize - 1, BLOCK_4X4);
+ const BLOCK_SIZE max_size = MIN(bsize + 2, BLOCK_64X64);
+ BLOCK_SIZE bs;
+ for (bs = min_size; bs <= max_size; ++bs) {
+ int *const fact = &factor_buf[bs][mode];
+ if (mode == best_mode_index) {
+ *fact -= (*fact >> 4);
+ } else {
+ *fact = MIN(*fact + RD_THRESH_INC,
+ rd_thresh * RD_THRESH_MAX_FACT);
+ }
+ }
+ }
+ }
+}
+
+int vp9_get_intra_cost_penalty(int qindex, int qdelta,
+ vpx_bit_depth_t bit_depth) {
+ const int q = vp9_dc_quant(qindex, qdelta, bit_depth);
+#if CONFIG_VP9_HIGHBITDEPTH
+ switch (bit_depth) {
+ case VPX_BITS_8:
+ return 20 * q;
+ case VPX_BITS_10:
+ return 5 * q;
+ case VPX_BITS_12:
+ return ROUND_POWER_OF_TWO(5 * q, 2);
+ default:
+ assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
+ return -1;
+ }
+#else
+ return 20 * q;
+#endif // CONFIG_VP9_HIGHBITDEPTH
+}
+
size_t pending_frame_sizes[8];
size_t pending_frame_magnitude;
vpx_image_t preview_img;
+ vpx_enc_frame_flags_t next_frame_flags;
vp8_postproc_cfg_t preview_ppcfg;
- vpx_codec_pkt_list_decl(128) pkt_list;
+ vpx_codec_pkt_list_decl(256) pkt_list;
unsigned int fixed_kf_cntr;
+ vpx_codec_priv_output_cx_pkt_cb_pair_t output_cx_pkt_cb;
+ // BufferPool that holds all reference frames.
+ BufferPool *buffer_pool;
};
static VP9_REFFRAME ref_frame_to_vp9_reframe(vpx_ref_frame_type_t frame) {
if (priv == NULL)
return VPX_CODEC_MEM_ERROR;
- ctx->priv = &priv->base;
- ctx->priv->sz = sizeof(*ctx->priv);
- ctx->priv->iface = ctx->iface;
- ctx->priv->alg_priv = priv;
+ ctx->priv = (vpx_codec_priv_t *)priv;
ctx->priv->init_flags = ctx->init_flags;
ctx->priv->enc.total_encoders = 1;
- ctx->priv->alg_priv->buffer_pool =
++ priv->buffer_pool =
+ (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
- if (ctx->priv->alg_priv->buffer_pool == NULL)
++ if (priv->buffer_pool == NULL)
+ return VPX_CODEC_MEM_ERROR;
if (ctx->config.enc) {
// Update the reference to the config structure to an internal copy.
res = validate_config(priv, &priv->cfg, &priv->extra_cfg);
if (res == VPX_CODEC_OK) {
- VP9_COMP *cpi;
- set_encoder_config(&ctx->priv->alg_priv->oxcf,
- &ctx->priv->alg_priv->cfg,
- &ctx->priv->alg_priv->extra_cfg);
- cpi = vp9_create_compressor(&ctx->priv->alg_priv->oxcf,
- ctx->priv->alg_priv->buffer_pool);
- if (cpi == NULL)
+ set_encoder_config(&priv->oxcf, &priv->cfg, &priv->extra_cfg);
+#if CONFIG_VP9_HIGHBITDEPTH
+ priv->oxcf.use_highbitdepth =
+ (ctx->init_flags & VPX_CODEC_USE_HIGHBITDEPTH) ? 1 : 0;
+#endif
- priv->cpi = vp9_create_compressor(&priv->oxcf);
++ priv->cpi = vp9_create_compressor(&priv->oxcf, priv->buffer_pool);
+ if (priv->cpi == NULL)
res = VPX_CODEC_MEM_ERROR;
else
- ctx->priv->alg_priv->cpi = cpi;
+ priv->cpi->output_pkt_list = &priv->pkt_list.head;
}
}
static vpx_codec_err_t encoder_destroy(vpx_codec_alg_priv_t *ctx) {
free(ctx->cx_data);
vp9_remove_compressor(ctx->cpi);
- free(ctx);
+ vpx_free(ctx->buffer_pool);
+ vpx_free(ctx);
return VPX_CODEC_OK;
}
#include "vpx/vp8dx.h"
#include "vpx/vpx_decoder.h"
++#include "vp9/common/vp9_alloccommon.h"
#include "vp9/common/vp9_frame_buffers.h"
+ #include "vp9/common/vp9_thread.h"
#include "vp9/decoder/vp9_decoder.h"
+#include "vp9/decoder/vp9_decodeframe.h"
#include "vp9/decoder/vp9_read_bit_buffer.h"
#include "vp9/vp9_iface_common.h"
int postproc_cfg_set;
vp8_postproc_cfg_t postproc_cfg;
vpx_decrypt_cb decrypt_cb;
- void *decrypt_state;
+ void *decrypt_state;
vpx_image_t img;
+ int img_avail;
int flushed;
int invert_tile_order;
+ int last_show_frame; // Index of last output frame.
+
+ // Frame parallel related.
int frame_parallel_decode; // frame-based threading.
+ int byte_alignment;
+ VP9Worker *frame_workers;
+ int num_frame_workers;
+ int next_submit_worker_id;
+ int last_submit_worker_id;
+ int next_output_worker_id;
+ int available_threads;
+ cache_frame frame_cache[FRAME_CACHE_SIZE];
+ int frame_cache_write;
+ int frame_cache_read;
+ int num_cache_frames;
+
+ // BufferPool that holds all reference frames. Shared by all the FrameWorkers.
+ BufferPool *buffer_pool;
// External frame buffer info to save for VP9 common.
void *ext_priv; // Private data associated with the external frame buffers.
(void)data;
if (!ctx->priv) {
- vpx_codec_alg_priv_t *alg_priv = vpx_memalign(32, sizeof(*alg_priv));
- if (alg_priv == NULL)
+ vpx_codec_alg_priv_t *const priv = vpx_calloc(1, sizeof(*priv));
+ if (priv == NULL)
return VPX_CODEC_MEM_ERROR;
- vp9_zero(*alg_priv);
-
- ctx->priv = (vpx_codec_priv_t *)alg_priv;
- ctx->priv->sz = sizeof(*ctx->priv);
- ctx->priv->iface = ctx->iface;
- ctx->priv->alg_priv = alg_priv;
- ctx->priv->alg_priv->si.sz = sizeof(ctx->priv->alg_priv->si);
+ ctx->priv = (vpx_codec_priv_t *)priv;
ctx->priv->init_flags = ctx->init_flags;
-
- ctx->priv->alg_priv->flushed = 0;
+ priv->si.sz = sizeof(priv->si);
+ priv->flushed = 0;
+ priv->frame_parallel_decode =
+ (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING);
+ priv->frame_parallel_decode = 0; // Disable for now
++ priv->flushed = 0;
+ // Only do frame parallel decode when threads > 1.
- ctx->priv->alg_priv->frame_parallel_decode =
- ((ctx->config.dec->threads > 1) &&
++ priv->frame_parallel_decode =
++ (ctx->config.dec && (ctx->config.dec->threads > 1) &&
+ (ctx->init_flags & VPX_CODEC_USE_FRAME_THREADING)) ? 1 : 0;
if (ctx->config.dec) {
- // Update the reference to the config structure to an internal copy.
- ctx->priv->alg_priv->cfg = *ctx->config.dec;
- ctx->config.dec = &ctx->priv->alg_priv->cfg;
+ priv->cfg = *ctx->config.dec;
+ ctx->config.dec = &priv->cfg;
}
}
}
static vpx_codec_err_t decoder_destroy(vpx_codec_alg_priv_t *ctx) {
- if (ctx->pbi) {
- vp9_decoder_remove(ctx->pbi);
- ctx->pbi = NULL;
+ if (ctx->frame_workers != NULL) {
+ int i;
+ for (i = 0; i < ctx->num_frame_workers; ++i) {
+ VP9Worker *const worker = &ctx->frame_workers[i];
+ FrameWorkerData *const frame_worker_data =
+ (FrameWorkerData *)worker->data1;
+ vp9_get_worker_interface()->end(worker);
++ vp9_remove_common(&frame_worker_data->pbi->common);
+ vp9_decoder_remove(frame_worker_data->pbi);
+ vpx_free(frame_worker_data->scratch_buffer);
+ #if CONFIG_MULTITHREAD
+ pthread_mutex_destroy(&frame_worker_data->stats_mutex);
+ pthread_cond_destroy(&frame_worker_data->stats_cond);
+ #endif
+ vpx_free(frame_worker_data);
+ }
+ #if CONFIG_MULTITHREAD
+ pthread_mutex_destroy(&ctx->buffer_pool->pool_mutex);
+ #endif
- vp9_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers);
}
- vpx_free(ctx);
++ if (ctx->buffer_pool)
++ vp9_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers);
+
+ vpx_free(ctx->frame_workers);
+ vpx_free(ctx->buffer_pool);
+ vpx_free(ctx);
-
return VPX_CODEC_OK;
}
}
static void init_buffer_callbacks(vpx_codec_alg_priv_t *ctx) {
- VP9_COMMON *const cm = &ctx->pbi->common;
+ int i;
- cm->new_fb_idx = -1;
- cm->byte_alignment = ctx->byte_alignment;
+ for (i = 0; i < ctx->num_frame_workers; ++i) {
+ VP9Worker *const worker = &ctx->frame_workers[i];
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
+ VP9_COMMON *const cm = &frame_worker_data->pbi->common;
+ BufferPool *const pool = cm->buffer_pool;
- if (ctx->get_ext_fb_cb != NULL && ctx->release_ext_fb_cb != NULL) {
- cm->get_fb_cb = ctx->get_ext_fb_cb;
- cm->release_fb_cb = ctx->release_ext_fb_cb;
- cm->cb_priv = ctx->ext_priv;
- } else {
- cm->get_fb_cb = vp9_get_frame_buffer;
- cm->release_fb_cb = vp9_release_frame_buffer;
+ cm->new_fb_idx = -1;
++ cm->byte_alignment = ctx->byte_alignment;
+
- if (vp9_alloc_internal_frame_buffers(&cm->int_frame_buffers))
- vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
- "Failed to initialize internal frame buffers");
+ if (ctx->get_ext_fb_cb != NULL && ctx->release_ext_fb_cb != NULL) {
+ pool->get_fb_cb = ctx->get_ext_fb_cb;
+ pool->release_fb_cb = ctx->release_ext_fb_cb;
+ pool->cb_priv = ctx->ext_priv;
+ } else {
+ pool->get_fb_cb = vp9_get_frame_buffer;
+ pool->release_fb_cb = vp9_release_frame_buffer;
- cm->cb_priv = &cm->int_frame_buffers;
+ if (vp9_alloc_internal_frame_buffers(&pool->int_frame_buffers))
+ vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
+ "Failed to initialize internal frame buffers");
+
+ pool->cb_priv = &pool->int_frame_buffers;
+ }
}
}
flags->noise_level = ctx->postproc_cfg.noise_level;
}
- static void init_decoder(vpx_codec_alg_priv_t *ctx) {
- ctx->pbi = vp9_decoder_create();
- if (ctx->pbi == NULL)
- return;
+ static int frame_worker_hook(void *arg1, void *arg2) {
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)arg1;
+ const uint8_t *data = frame_worker_data->data;
+ (void)arg2;
+
+ frame_worker_data->result =
+ vp9_receive_compressed_data(frame_worker_data->pbi,
+ frame_worker_data->data_size,
+ &data);
+ frame_worker_data->data_end = data;
+
+ if (frame_worker_data->pbi->frame_parallel_decode) {
+ // In frame parallel decoding, a worker thread must successfully decode all
+ // the compressed data.
+ if (frame_worker_data->result != 0 ||
+ frame_worker_data->data + frame_worker_data->data_size - 1 > data) {
+ VP9Worker *const worker = frame_worker_data->pbi->frame_worker_owner;
+ BufferPool *const pool = frame_worker_data->pbi->common.buffer_pool;
+ // Signal all the other threads that are waiting for this frame.
+ vp9_frameworker_lock_stats(worker);
+ frame_worker_data->frame_context_ready = 1;
+ lock_buffer_pool(pool);
+ frame_worker_data->pbi->cur_buf->buf.corrupted = 1;
+ unlock_buffer_pool(pool);
+ frame_worker_data->pbi->need_resync = 1;
+ vp9_frameworker_signal_stats(worker);
+ vp9_frameworker_unlock_stats(worker);
+ return 0;
+ }
+ } else if (frame_worker_data->result != 0) {
+ // Check decode result in serial decode.
+ frame_worker_data->pbi->cur_buf->buf.corrupted = 1;
+ frame_worker_data->pbi->need_resync = 1;
+ }
+
+ return !frame_worker_data->result;
+ }
+
+ static vpx_codec_err_t init_decoder(vpx_codec_alg_priv_t *ctx) {
+ int i;
+ const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
+
+ ctx->last_show_frame = -1;
+ ctx->next_submit_worker_id = 0;
+ ctx->last_submit_worker_id = 0;
+ ctx->next_output_worker_id = 0;
+ ctx->frame_cache_read = 0;
+ ctx->frame_cache_write = 0;
+ ctx->num_cache_frames = 0;
+ ctx->num_frame_workers =
+ (ctx->frame_parallel_decode == 1) ? ctx->cfg.threads: 1;
+ ctx->available_threads = ctx->num_frame_workers;
+ ctx->flushed = 0;
- ctx->pbi->max_threads = ctx->cfg.threads;
- ctx->pbi->inv_tile_order = ctx->invert_tile_order;
- ctx->pbi->frame_parallel_decode = ctx->frame_parallel_decode;
+ ctx->buffer_pool = (BufferPool *)vpx_calloc(1, sizeof(BufferPool));
+ if (ctx->buffer_pool == NULL)
+ return VPX_CODEC_MEM_ERROR;
+
+ #if CONFIG_MULTITHREAD
+ if (pthread_mutex_init(&ctx->buffer_pool->pool_mutex, NULL)) {
+ set_error_detail(ctx, "Failed to allocate buffer pool mutex");
+ return VPX_CODEC_MEM_ERROR;
+ }
+ #endif
+
+ ctx->frame_workers = (VP9Worker *)
+ vpx_malloc(ctx->num_frame_workers * sizeof(*ctx->frame_workers));
+ if (ctx->frame_workers == NULL) {
+ set_error_detail(ctx, "Failed to allocate frame_workers");
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ for (i = 0; i < ctx->num_frame_workers; ++i) {
+ VP9Worker *const worker = &ctx->frame_workers[i];
+ FrameWorkerData *frame_worker_data = NULL;
+ winterface->init(worker);
+ worker->data1 = vpx_memalign(32, sizeof(FrameWorkerData));
+ if (worker->data1 == NULL) {
+ set_error_detail(ctx, "Failed to allocate frame_worker_data");
+ return VPX_CODEC_MEM_ERROR;
+ }
+ frame_worker_data = (FrameWorkerData *)worker->data1;
+ frame_worker_data->pbi = vp9_decoder_create(ctx->buffer_pool);
+ if (frame_worker_data->pbi == NULL) {
+ set_error_detail(ctx, "Failed to allocate frame_worker_data");
+ return VPX_CODEC_MEM_ERROR;
+ }
+ frame_worker_data->pbi->frame_worker_owner = worker;
- frame_worker_data->pbi->common.mi_idx = 0;
- frame_worker_data->pbi->common.prev_mi_idx = 1;
+ frame_worker_data->worker_id = i;
+ frame_worker_data->scratch_buffer = NULL;
+ frame_worker_data->scratch_buffer_size = 0;
+ frame_worker_data->frame_context_ready = 0;
+ #if CONFIG_MULTITHREAD
+ if (pthread_mutex_init(&frame_worker_data->stats_mutex, NULL)) {
+ set_error_detail(ctx, "Failed to allocate frame_worker_data mutex");
+ return VPX_CODEC_MEM_ERROR;
+ }
+
+ if (pthread_cond_init(&frame_worker_data->stats_cond, NULL)) {
+ set_error_detail(ctx, "Failed to allocate frame_worker_data cond");
+ return VPX_CODEC_MEM_ERROR;
+ }
+ #endif
+ // If decoding in serial mode, FrameWorker thread could create tile worker
+ // thread or loopfilter thread.
+ frame_worker_data->pbi->max_threads =
+ (ctx->frame_parallel_decode == 0) ? ctx->cfg.threads : 0;
+
+ frame_worker_data->pbi->inv_tile_order = ctx->invert_tile_order;
+ frame_worker_data->pbi->frame_parallel_decode = ctx->frame_parallel_decode;
+ frame_worker_data->pbi->common.frame_parallel_decode =
+ ctx->frame_parallel_decode;
+ worker->hook = (VP9WorkerHook)frame_worker_hook;
+ if (!winterface->reset(worker)) {
+ set_error_detail(ctx, "Frame Worker thread creation failed");
+ return VPX_CODEC_MEM_ERROR;
+ }
+ }
// If postprocessing was enabled by the application and a
// configuration has not been provided, default it.
static vpx_codec_err_t decode_one(vpx_codec_alg_priv_t *ctx,
const uint8_t **data, unsigned int data_sz,
void *user_priv, int64_t deadline) {
- YV12_BUFFER_CONFIG sd;
- vp9_ppflags_t flags = {0};
+ vp9_ppflags_t flags = {0, 0, 0};
- VP9_COMMON *cm = NULL;
-
+ const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
(void)deadline;
- vp9_zero(sd);
- ctx->img_avail = 0;
-
// Determine the stream parameters. Note that we rely on peek_si to
// validate that we have a buffer that does not wrap around the top
// of the heap.
return VPX_CODEC_OK;
}
-static INLINE uint8_t read_marker(vpx_decrypt_cb decrypt_cb,
- void *decrypt_state,
- const uint8_t *data) {
- if (decrypt_cb) {
- uint8_t marker;
- decrypt_cb(decrypt_state, data, &marker, 1);
- return marker;
- }
- return *data;
-}
-
-static vpx_codec_err_t parse_superframe_index(const uint8_t *data,
- size_t data_sz,
- uint32_t sizes[8], int *count,
- vpx_decrypt_cb decrypt_cb,
- void *decrypt_state) {
- // A chunk ending with a byte matching 0xc0 is an invalid chunk unless
- // it is a super frame index. If the last byte of real video compression
- // data is 0xc0 the encoder must add a 0 byte. If we have the marker but
- // not the associated matching marker byte at the front of the index we have
- // an invalid bitstream and need to return an error.
-
- uint8_t marker;
-
- assert(data_sz);
- marker = read_marker(decrypt_cb, decrypt_state, data + data_sz - 1);
- *count = 0;
-
- if ((marker & 0xe0) == 0xc0) {
- const uint32_t frames = (marker & 0x7) + 1;
- const uint32_t mag = ((marker >> 3) & 0x3) + 1;
- const size_t index_sz = 2 + mag * frames;
-
- // This chunk is marked as having a superframe index but doesn't have
- // enough data for it, thus it's an invalid superframe index.
- if (data_sz < index_sz)
- return VPX_CODEC_CORRUPT_FRAME;
-
- {
- const uint8_t marker2 = read_marker(decrypt_cb, decrypt_state,
- data + data_sz - index_sz);
-
- // This chunk is marked as having a superframe index but doesn't have
- // the matching marker byte at the front of the index therefore it's an
- // invalid chunk.
- if (marker != marker2)
- return VPX_CODEC_CORRUPT_FRAME;
- }
-
- {
- // Found a valid superframe index.
- uint32_t i, j;
- const uint8_t *x = &data[data_sz - index_sz + 1];
-
- // Frames has a maximum of 8 and mag has a maximum of 4.
- uint8_t clear_buffer[32];
- assert(sizeof(clear_buffer) >= frames * mag);
- if (decrypt_cb) {
- decrypt_cb(decrypt_state, x, clear_buffer, frames * mag);
- x = clear_buffer;
- }
-
- for (i = 0; i < frames; ++i) {
- uint32_t this_sz = 0;
-
- for (j = 0; j < mag; ++j)
- this_sz |= (*x++) << (j * 8);
- sizes[i] = this_sz;
- }
- *count = frames;
- }
- }
- return VPX_CODEC_OK;
-}
-
+ static void wait_worker_and_cache_frame(vpx_codec_alg_priv_t *ctx) {
+ YV12_BUFFER_CONFIG sd;
+ vp9_ppflags_t flags = {0, 0, 0};
+ const VP9WorkerInterface *const winterface = vp9_get_worker_interface();
+ VP9Worker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
+ ctx->next_output_worker_id =
+ (ctx->next_output_worker_id + 1) % ctx->num_frame_workers;
+ winterface->sync(worker);
+ ++ctx->available_threads;
+ if (vp9_get_raw_frame(frame_worker_data->pbi, &sd, &flags) == 0) {
+ VP9_COMMON *const cm = &frame_worker_data->pbi->common;
+ RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
+ ctx->frame_cache[ctx->frame_cache_write].fb_idx = cm->new_fb_idx;
+ yuvconfig2image(&ctx->frame_cache[ctx->frame_cache_write].img, &sd,
+ frame_worker_data->user_priv);
+ ctx->frame_cache[ctx->frame_cache_write].img.fb_priv =
+ frame_bufs[cm->new_fb_idx].raw_frame_buffer.priv;
+ ctx->frame_cache_write =
+ (ctx->frame_cache_write + 1) % FRAME_CACHE_SIZE;
+ ++ctx->num_cache_frames;
+ }
+ }
+
static vpx_codec_err_t decoder_decode(vpx_codec_alg_priv_t *ctx,
const uint8_t *data, unsigned int data_sz,
void *user_priv, long deadline) {
// Reset flushed when receiving a valid frame.
ctx->flushed = 0;
- res = parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
- ctx->decrypt_cb, ctx->decrypt_state);
+ // Initialize the decoder workers on the first frame.
+ if (ctx->frame_workers == NULL) {
+ const vpx_codec_err_t res = init_decoder(ctx);
+ if (res != VPX_CODEC_OK)
+ return res;
+ }
+
+ res = vp9_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
+ ctx->decrypt_cb, ctx->decrypt_state);
if (res != VPX_CODEC_OK)
return res;
va_list args) {
vp9_ref_frame_t *data = va_arg(args, vp9_ref_frame_t *);
+ // Only support this function in serial decode.
+ if (ctx->frame_parallel_decode) {
+ set_error_detail(ctx, "Not supported in frame parallel decode");
+ return VPX_CODEC_INCAPABLE;
+ }
+
if (data) {
- YV12_BUFFER_CONFIG* fb = get_ref_frame(&ctx->pbi->common, data->idx);
+ YV12_BUFFER_CONFIG* fb;
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
- vp9_get_reference_dec(frame_worker_data->pbi, data->idx, &fb);
++ fb = get_ref_frame(&frame_worker_data->pbi->common, data->idx);
+ if (fb == NULL) return VPX_CODEC_ERROR;
-
yuvconfig2image(&data->img, fb, NULL);
return VPX_CODEC_OK;
} else {
va_list args) {
int *corrupted = va_arg(args, int *);
- if (corrupted != NULL && ctx->pbi != NULL) {
- const YV12_BUFFER_CONFIG *const frame = ctx->pbi->common.frame_to_show;
- if (frame == NULL) return VPX_CODEC_ERROR;
- *corrupted = frame->corrupted;
+ if (corrupted) {
+ if (ctx->frame_workers) {
+ VP9Worker *const worker = ctx->frame_workers;
+ FrameWorkerData *const frame_worker_data =
+ (FrameWorkerData *)worker->data1;
+ RefCntBuffer *const frame_bufs =
+ frame_worker_data->pbi->common.buffer_pool->frame_bufs;
++ if (frame_worker_data->pbi->common.frame_to_show == NULL)
++ return VPX_CODEC_ERROR;
+ *corrupted = frame_bufs[ctx->last_show_frame].buf.corrupted;
+ } else {
+ return VPX_CODEC_ERROR;
+ }
return VPX_CODEC_OK;
} else {
return VPX_CODEC_INVALID_PARAM;
}
}
- if (ctx->pbi) {
- const VP9_COMMON *const cm = &ctx->pbi->common;
+static vpx_codec_err_t ctrl_get_bit_depth(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ unsigned int *const bit_depth = va_arg(args, unsigned int *);
++ VP9Worker *const worker = &ctx->frame_workers[ctx->next_output_worker_id];
+
+ if (bit_depth) {
++ if (worker) {
++ FrameWorkerData *const frame_worker_data =
++ (FrameWorkerData *)worker->data1;
++ const VP9_COMMON *const cm = &frame_worker_data->pbi->common;
+ *bit_depth = cm->bit_depth;
+ return VPX_CODEC_OK;
+ } else {
+ return VPX_CODEC_ERROR;
+ }
+ } else {
+ return VPX_CODEC_INVALID_PARAM;
+ }
+}
+
static vpx_codec_err_t ctrl_set_invert_tile_order(vpx_codec_alg_priv_t *ctx,
va_list args) {
ctx->invert_tile_order = va_arg(args, int);
return VPX_CODEC_OK;
}
- if (ctx->pbi != NULL) {
- VP9_COMMON *const cm = &ctx->pbi->common;
- cm->byte_alignment = byte_alignment;
+static vpx_codec_err_t ctrl_set_byte_alignment(vpx_codec_alg_priv_t *ctx,
+ va_list args) {
+ const int legacy_byte_alignment = 0;
+ const int min_byte_alignment = 32;
+ const int max_byte_alignment = 1024;
+ const int byte_alignment = va_arg(args, int);
+
+ if (byte_alignment != legacy_byte_alignment &&
+ (byte_alignment < min_byte_alignment ||
+ byte_alignment > max_byte_alignment ||
+ (byte_alignment & (byte_alignment - 1)) != 0))
+ return VPX_CODEC_INVALID_PARAM;
+
+ ctx->byte_alignment = byte_alignment;
++ if (ctx->frame_workers) {
++ VP9Worker *const worker = ctx->frame_workers;
++ FrameWorkerData *const frame_worker_data =
++ (FrameWorkerData *)worker->data1;
++ frame_worker_data->pbi->common.byte_alignment = byte_alignment;
+ }
+ return VPX_CODEC_OK;
+}
+
static vpx_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
{VP8_COPY_REFERENCE, ctrl_copy_reference},
VP9_DX_SRCS-yes += decoder/vp9_read_bit_buffer.h
VP9_DX_SRCS-yes += decoder/vp9_decodemv.h
VP9_DX_SRCS-yes += decoder/vp9_detokenize.h
++VP9_DX_SRCS-yes += decoder/vp9_dthread.c
++VP9_DX_SRCS-yes += decoder/vp9_dthread.h
VP9_DX_SRCS-yes += decoder/vp9_decoder.c
VP9_DX_SRCS-yes += decoder/vp9_decoder.h
VP9_DX_SRCS-yes += decoder/vp9_dsubexp.c