2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
10 * Based on code from the OggTheora software codec source code,
11 * Copyright (C) 2002-2010 The Xiph.Org Foundation and contributors.
17 #include "vpx/vpx_integer.h"
20 // Reads 'size' bytes from 'file' into 'buf' with some fault tolerance.
21 // Returns true on success.
22 static int file_read(void *buf, size_t size, FILE *file) {
23 const int kMaxRetries = 5;
28 const size_t n = fread((uint8_t*)buf + len, 1, size - len, file);
30 file_error = ferror(file);
32 if (errno == EINTR || errno == EAGAIN) {
36 fprintf(stderr, "Error reading file: %u of %u bytes read, %d: %s\n",
37 (uint32_t)len, (uint32_t)size, errno, strerror(errno));
41 } while (!feof(file) && len < size && ++retry_count < kMaxRetries);
43 if (!feof(file) && len != size) {
44 fprintf(stderr, "Error reading file: %u of %u bytes read,"
45 " error: %d, retries: %d, %d: %s\n",
46 (uint32_t)len, (uint32_t)size, file_error, retry_count,
47 errno, strerror(errno));
52 static int y4m_parse_tags(y4m_input *_y4m, char *_tags) {
61 got_w = got_h = got_fps = got_interlace = got_par = got_chroma = 0;
62 for (p = _tags;; p = q) {
63 /*Skip any leading spaces.*/
65 /*If that's all we have, stop.*/
66 if (p[0] == '\0')break;
67 /*Find the end of this tag.*/
68 for (q = p + 1; *q != '\0' && *q != ' '; q++);
72 if (sscanf(p + 1, "%d", &_y4m->pic_w) != 1)return -1;
77 if (sscanf(p + 1, "%d", &_y4m->pic_h) != 1)return -1;
82 if (sscanf(p + 1, "%d:%d", &_y4m->fps_n, &_y4m->fps_d) != 2) {
89 _y4m->interlace = p[1];
94 if (sscanf(p + 1, "%d:%d", &_y4m->par_n, &_y4m->par_d) != 2) {
101 if (q - p > 16)return -1;
102 memcpy(_y4m->chroma_type, p + 1, q - p - 1);
103 _y4m->chroma_type[q - p - 1] = '\0';
107 /*Ignore unknown tags.*/
110 if (!got_w || !got_h || !got_fps)return -1;
111 if (!got_interlace)_y4m->interlace = '?';
112 if (!got_par)_y4m->par_n = _y4m->par_d = 0;
113 /*Chroma-type is not specified in older files, e.g., those generated by
115 if (!got_chroma)strcpy(_y4m->chroma_type, "420");
121 /*All anti-aliasing filters in the following conversion functions are based on
122 one of two window functions:
123 The 6-tap Lanczos window (for down-sampling and shifts):
124 sinc(\pi*t)*sinc(\pi*t/3), |t|<3 (sinc(t)==sin(t)/t)
126 The 4-tap Mitchell window (for up-sampling):
127 7|t|^3-12|t|^2+16/3, |t|<1
128 -(7/3)|x|^3+12|x|^2-20|x|+32/3, |t|<2
130 The number of taps is intentionally kept small to reduce computational
131 overhead and limit ringing.
133 The taps from these filters are scaled so that their sum is 1, and the result
134 is scaled by 128 and rounded to integers to create a filter whose
135 intermediate values fit inside 16 bits.
136 Coefficients are rounded in such a way as to ensure their sum is still 128,
137 which is usually equivalent to normal rounding.
139 Conversions which require both horizontal and vertical filtering could
140 have these steps pipelined, for less memory consumption and better cache
141 performance, but we do them separately for simplicity.*/
143 #define OC_MINI(_a,_b) ((_a)>(_b)?(_b):(_a))
144 #define OC_MAXI(_a,_b) ((_a)<(_b)?(_b):(_a))
145 #define OC_CLAMPI(_a,_b,_c) (OC_MAXI(_a,OC_MINI(_b,_c)))
147 /*420jpeg chroma samples are sited like:
148 Y-------Y-------Y-------Y-------
152 Y-------Y-------Y-------Y-------
156 Y-------Y-------Y-------Y-------
160 Y-------Y-------Y-------Y-------
165 420mpeg2 chroma samples are sited like:
166 Y-------Y-------Y-------Y-------
170 Y-------Y-------Y-------Y-------
174 Y-------Y-------Y-------Y-------
178 Y-------Y-------Y-------Y-------
183 We use a resampling filter to shift the site locations one quarter pixel (at
184 the chroma plane's resolution) to the right.
185 The 4:2:2 modes look exactly the same, except there are twice as many chroma
186 lines, and they are vertically co-sited with the luma samples in both the
187 mpeg2 and jpeg cases (thus requiring no vertical resampling).*/
188 static void y4m_42xmpeg2_42xjpeg_helper(unsigned char *_dst,
189 const unsigned char *_src, int _c_w, int _c_h) {
192 for (y = 0; y < _c_h; y++) {
193 /*Filter: [4 -17 114 35 -9 1]/128, derived from a 6-tap Lanczos
195 for (x = 0; x < OC_MINI(_c_w, 2); x++) {
196 _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[0] - 17 * _src[OC_MAXI(x - 1, 0)] +
197 114 * _src[x] + 35 * _src[OC_MINI(x + 1, _c_w - 1)] - 9 * _src[OC_MINI(x + 2, _c_w - 1)] +
198 _src[OC_MINI(x + 3, _c_w - 1)] + 64) >> 7, 255);
200 for (; x < _c_w - 3; x++) {
201 _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[x - 2] - 17 * _src[x - 1] +
202 114 * _src[x] + 35 * _src[x + 1] - 9 * _src[x + 2] + _src[x + 3] + 64) >> 7, 255);
204 for (; x < _c_w; x++) {
205 _dst[x] = (unsigned char)OC_CLAMPI(0, (4 * _src[x - 2] - 17 * _src[x - 1] +
206 114 * _src[x] + 35 * _src[OC_MINI(x + 1, _c_w - 1)] - 9 * _src[OC_MINI(x + 2, _c_w - 1)] +
207 _src[_c_w - 1] + 64) >> 7, 255);
214 /*Handles both 422 and 420mpeg2 to 422jpeg and 420jpeg, respectively.*/
215 static void y4m_convert_42xmpeg2_42xjpeg(y4m_input *_y4m, unsigned char *_dst,
216 unsigned char *_aux) {
221 /*Skip past the luma data.*/
222 _dst += _y4m->pic_w * _y4m->pic_h;
223 /*Compute the size of each chroma plane.*/
224 c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
225 c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
227 for (pli = 1; pli < 3; pli++) {
228 y4m_42xmpeg2_42xjpeg_helper(_dst, _aux, c_w, c_h);
234 /*This format is only used for interlaced content, but is included for
237 420jpeg chroma samples are sited like:
238 Y-------Y-------Y-------Y-------
242 Y-------Y-------Y-------Y-------
246 Y-------Y-------Y-------Y-------
250 Y-------Y-------Y-------Y-------
255 420paldv chroma samples are sited like:
256 YR------Y-------YR------Y-------
260 YB------Y-------YB------Y-------
264 YR------Y-------YR------Y-------
268 YB------Y-------YB------Y-------
273 We use a resampling filter to shift the site locations one quarter pixel (at
274 the chroma plane's resolution) to the right.
275 Then we use another filter to move the C_r location down one quarter pixel,
276 and the C_b location up one quarter pixel.*/
277 static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m, unsigned char *_dst,
278 unsigned char *_aux) {
286 /*Skip past the luma data.*/
287 _dst += _y4m->pic_w * _y4m->pic_h;
288 /*Compute the size of each chroma plane.*/
289 c_w = (_y4m->pic_w + 1) / 2;
290 c_h = (_y4m->pic_h + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
292 tmp = _aux + 2 * c_sz;
293 for (pli = 1; pli < 3; pli++) {
294 /*First do the horizontal re-sampling.
295 This is the same as the mpeg2 case, except that after the horizontal
296 case, we need to apply a second vertical filter.*/
297 y4m_42xmpeg2_42xjpeg_helper(tmp, _aux, c_w, c_h);
301 /*Slide C_b up a quarter-pel.
302 This is the same filter used above, but in the other order.*/
303 for (x = 0; x < c_w; x++) {
304 for (y = 0; y < OC_MINI(c_h, 3); y++) {
305 _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[0]
306 - 9 * tmp[OC_MAXI(y - 2, 0) * c_w] + 35 * tmp[OC_MAXI(y - 1, 0) * c_w]
307 + 114 * tmp[y * c_w] - 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w]
308 + 4 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + 64) >> 7, 255);
310 for (; y < c_h - 2; y++) {
311 _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[(y - 3) * c_w]
312 - 9 * tmp[(y - 2) * c_w] + 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w]
313 - 17 * tmp[(y + 1) * c_w] + 4 * tmp[(y + 2) * c_w] + 64) >> 7, 255);
315 for (; y < c_h; y++) {
316 _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (tmp[(y - 3) * c_w]
317 - 9 * tmp[(y - 2) * c_w] + 35 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w]
318 - 17 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] + 4 * tmp[(c_h - 1) * c_w] + 64) >> 7, 255);
328 /*Slide C_r down a quarter-pel.
329 This is the same as the horizontal filter.*/
330 for (x = 0; x < c_w; x++) {
331 for (y = 0; y < OC_MINI(c_h, 2); y++) {
332 _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[0]
333 - 17 * tmp[OC_MAXI(y - 1, 0) * c_w] + 114 * tmp[y * c_w]
334 + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w] - 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w]
335 + tmp[OC_MINI(y + 3, c_h - 1) * c_w] + 64) >> 7, 255);
337 for (; y < c_h - 3; y++) {
338 _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[(y - 2) * c_w]
339 - 17 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] + 35 * tmp[(y + 1) * c_w]
340 - 9 * tmp[(y + 2) * c_w] + tmp[(y + 3) * c_w] + 64) >> 7, 255);
342 for (; y < c_h; y++) {
343 _dst[y * c_w] = (unsigned char)OC_CLAMPI(0, (4 * tmp[(y - 2) * c_w]
344 - 17 * tmp[(y - 1) * c_w] + 114 * tmp[y * c_w] + 35 * tmp[OC_MINI(y + 1, c_h - 1) * c_w]
345 - 9 * tmp[OC_MINI(y + 2, c_h - 1) * c_w] + tmp[(c_h - 1) * c_w] + 64) >> 7, 255);
353 /*For actual interlaced material, this would have to be done separately on
354 each field, and the shift amounts would be different.
355 C_r moves down 1/8, C_b up 3/8 in the top field, and C_r moves down 3/8,
356 C_b up 1/8 in the bottom field.
357 The corresponding filters would be:
358 Down 1/8 (reverse order for up): [3 -11 125 15 -4 0]/128
359 Down 3/8 (reverse order for up): [4 -19 98 56 -13 2]/128*/
363 /*Perform vertical filtering to reduce a single plane from 4:2:2 to 4:2:0.
364 This is used as a helper by several converation routines.*/
365 static void y4m_422jpeg_420jpeg_helper(unsigned char *_dst,
366 const unsigned char *_src, int _c_w, int _c_h) {
369 /*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
370 for (x = 0; x < _c_w; x++) {
371 for (y = 0; y < OC_MINI(_c_h, 2); y += 2) {
372 _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (64 * _src[0]
373 + 78 * _src[OC_MINI(1, _c_h - 1) * _c_w]
374 - 17 * _src[OC_MINI(2, _c_h - 1) * _c_w]
375 + 3 * _src[OC_MINI(3, _c_h - 1) * _c_w] + 64) >> 7, 255);
377 for (; y < _c_h - 3; y += 2) {
378 _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w] + _src[(y + 3) * _c_w])
379 - 17 * (_src[(y - 1) * _c_w] + _src[(y + 2) * _c_w])
380 + 78 * (_src[y * _c_w] + _src[(y + 1) * _c_w]) + 64) >> 7, 255);
382 for (; y < _c_h; y += 2) {
383 _dst[(y >> 1)*_c_w] = OC_CLAMPI(0, (3 * (_src[(y - 2) * _c_w]
384 + _src[(_c_h - 1) * _c_w]) - 17 * (_src[(y - 1) * _c_w]
385 + _src[OC_MINI(y + 2, _c_h - 1) * _c_w])
386 + 78 * (_src[y * _c_w] + _src[OC_MINI(y + 1, _c_h - 1) * _c_w]) + 64) >> 7, 255);
393 /*420jpeg chroma samples are sited like:
394 Y-------Y-------Y-------Y-------
398 Y-------Y-------Y-------Y-------
402 Y-------Y-------Y-------Y-------
406 Y-------Y-------Y-------Y-------
411 422jpeg chroma samples are sited like:
412 Y---BR--Y-------Y---BR--Y-------
416 Y---BR--Y-------Y---BR--Y-------
420 Y---BR--Y-------Y---BR--Y-------
424 Y---BR--Y-------Y---BR--Y-------
429 We use a resampling filter to decimate the chroma planes by two in the
430 vertical direction.*/
431 static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m, unsigned char *_dst,
432 unsigned char *_aux) {
440 /*Skip past the luma data.*/
441 _dst += _y4m->pic_w * _y4m->pic_h;
442 /*Compute the size of each chroma plane.*/
443 c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
445 dst_c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
446 dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
448 dst_c_sz = dst_c_w * dst_c_h;
449 for (pli = 1; pli < 3; pli++) {
450 y4m_422jpeg_420jpeg_helper(_dst, _aux, c_w, c_h);
456 /*420jpeg chroma samples are sited like:
457 Y-------Y-------Y-------Y-------
461 Y-------Y-------Y-------Y-------
465 Y-------Y-------Y-------Y-------
469 Y-------Y-------Y-------Y-------
474 422 chroma samples are sited like:
475 YBR-----Y-------YBR-----Y-------
479 YBR-----Y-------YBR-----Y-------
483 YBR-----Y-------YBR-----Y-------
487 YBR-----Y-------YBR-----Y-------
492 We use a resampling filter to shift the original site locations one quarter
493 pixel (at the original chroma resolution) to the right.
494 Then we use a second resampling filter to decimate the chroma planes by two
495 in the vertical direction.*/
496 static void y4m_convert_422_420jpeg(y4m_input *_y4m, unsigned char *_dst,
497 unsigned char *_aux) {
505 /*Skip past the luma data.*/
506 _dst += _y4m->pic_w * _y4m->pic_h;
507 /*Compute the size of each chroma plane.*/
508 c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
510 dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
512 dst_c_sz = c_w * dst_c_h;
513 tmp = _aux + 2 * c_sz;
514 for (pli = 1; pli < 3; pli++) {
515 /*In reality, the horizontal and vertical steps could be pipelined, for
516 less memory consumption and better cache performance, but we do them
517 separately for simplicity.*/
518 /*First do horizontal filtering (convert to 422jpeg)*/
519 y4m_42xmpeg2_42xjpeg_helper(tmp, _aux, c_w, c_h);
520 /*Now do the vertical filtering.*/
521 y4m_422jpeg_420jpeg_helper(_dst, tmp, c_w, c_h);
527 /*420jpeg chroma samples are sited like:
528 Y-------Y-------Y-------Y-------
532 Y-------Y-------Y-------Y-------
536 Y-------Y-------Y-------Y-------
540 Y-------Y-------Y-------Y-------
545 411 chroma samples are sited like:
546 YBR-----Y-------Y-------Y-------
550 YBR-----Y-------Y-------Y-------
554 YBR-----Y-------Y-------Y-------
558 YBR-----Y-------Y-------Y-------
563 We use a filter to resample at site locations one eighth pixel (at the source
564 chroma plane's horizontal resolution) and five eighths of a pixel to the
566 Then we use another filter to decimate the planes by 2 in the vertical
568 static void y4m_convert_411_420jpeg(y4m_input *_y4m, unsigned char *_dst,
569 unsigned char *_aux) {
581 /*Skip past the luma data.*/
582 _dst += _y4m->pic_w * _y4m->pic_h;
583 /*Compute the size of each chroma plane.*/
584 c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
586 dst_c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
587 dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
589 dst_c_sz = dst_c_w * dst_c_h;
590 tmp_sz = dst_c_w * c_h;
591 tmp = _aux + 2 * c_sz;
592 for (pli = 1; pli < 3; pli++) {
593 /*In reality, the horizontal and vertical steps could be pipelined, for
594 less memory consumption and better cache performance, but we do them
595 separately for simplicity.*/
596 /*First do horizontal filtering (convert to 422jpeg)*/
597 for (y = 0; y < c_h; y++) {
598 /*Filters: [1 110 18 -1]/128 and [-3 50 86 -5]/128, both derived from a
599 4-tap Mitchell window.*/
600 for (x = 0; x < OC_MINI(c_w, 1); x++) {
601 tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (111 * _aux[0]
602 + 18 * _aux[OC_MINI(1, c_w - 1)] - _aux[OC_MINI(2, c_w - 1)] + 64) >> 7, 255);
603 tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (47 * _aux[0]
604 + 86 * _aux[OC_MINI(1, c_w - 1)] - 5 * _aux[OC_MINI(2, c_w - 1)] + 64) >> 7, 255);
606 for (; x < c_w - 2; x++) {
607 tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x]
608 + 18 * _aux[x + 1] - _aux[x + 2] + 64) >> 7, 255);
609 tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (-3 * _aux[x - 1] + 50 * _aux[x]
610 + 86 * _aux[x + 1] - 5 * _aux[x + 2] + 64) >> 7, 255);
612 for (; x < c_w; x++) {
613 tmp[x << 1] = (unsigned char)OC_CLAMPI(0, (_aux[x - 1] + 110 * _aux[x]
614 + 18 * _aux[OC_MINI(x + 1, c_w - 1)] - _aux[c_w - 1] + 64) >> 7, 255);
615 if ((x << 1 | 1) < dst_c_w) {
616 tmp[x << 1 | 1] = (unsigned char)OC_CLAMPI(0, (-3 * _aux[x - 1] + 50 * _aux[x]
617 + 86 * _aux[OC_MINI(x + 1, c_w - 1)] - 5 * _aux[c_w - 1] + 64) >> 7, 255);
624 /*Now do the vertical filtering.*/
625 y4m_422jpeg_420jpeg_helper(_dst, tmp, dst_c_w, c_h);
630 /*Convert 444 to 420jpeg.*/
631 static void y4m_convert_444_420jpeg(y4m_input *_y4m, unsigned char *_dst,
632 unsigned char *_aux) {
644 /*Skip past the luma data.*/
645 _dst += _y4m->pic_w * _y4m->pic_h;
646 /*Compute the size of each chroma plane.*/
647 c_w = (_y4m->pic_w + _y4m->src_c_dec_h - 1) / _y4m->src_c_dec_h;
649 dst_c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
650 dst_c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
652 dst_c_sz = dst_c_w * dst_c_h;
653 tmp_sz = dst_c_w * c_h;
654 tmp = _aux + 2 * c_sz;
655 for (pli = 1; pli < 3; pli++) {
656 /*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
657 for (y = 0; y < c_h; y++) {
658 for (x = 0; x < OC_MINI(c_w, 2); x += 2) {
659 tmp[x >> 1] = OC_CLAMPI(0, (64 * _aux[0] + 78 * _aux[OC_MINI(1, c_w - 1)]
660 - 17 * _aux[OC_MINI(2, c_w - 1)]
661 + 3 * _aux[OC_MINI(3, c_w - 1)] + 64) >> 7, 255);
663 for (; x < c_w - 3; x += 2) {
664 tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[x + 3])
665 - 17 * (_aux[x - 1] + _aux[x + 2]) + 78 * (_aux[x] + _aux[x + 1]) + 64) >> 7, 255);
667 for (; x < c_w; x += 2) {
668 tmp[x >> 1] = OC_CLAMPI(0, (3 * (_aux[x - 2] + _aux[c_w - 1]) -
669 17 * (_aux[x - 1] + _aux[OC_MINI(x + 2, c_w - 1)]) +
670 78 * (_aux[x] + _aux[OC_MINI(x + 1, c_w - 1)]) + 64) >> 7, 255);
676 /*Now do the vertical filtering.*/
677 y4m_422jpeg_420jpeg_helper(_dst, tmp, dst_c_w, c_h);
682 /*The image is padded with empty chroma components at 4:2:0.*/
683 static void y4m_convert_mono_420jpeg(y4m_input *_y4m, unsigned char *_dst,
684 unsigned char *_aux) {
687 _dst += _y4m->pic_w * _y4m->pic_h;
688 c_sz = ((_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h) *
689 ((_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v);
690 memset(_dst, 128, c_sz * 2);
693 /*No conversion function needed.*/
694 static void y4m_convert_null(y4m_input *_y4m, unsigned char *_dst,
695 unsigned char *_aux) {
701 int y4m_input_open(y4m_input *_y4m, FILE *_fin, char *_skip, int _nskip,
703 char buffer[80] = {0};
706 /*Read until newline, or 80 cols, whichever happens first.*/
707 for (i = 0; i < 79; i++) {
709 buffer[i] = *_skip++;
712 if (!file_read(buffer + i, 1, _fin)) return -1;
714 if (buffer[i] == '\n')break;
716 /*We skipped too much header data.*/
717 if (_nskip > 0)return -1;
719 fprintf(stderr, "Error parsing header; not a YUV2MPEG2 file?\n");
723 if (memcmp(buffer, "YUV4MPEG", 8)) {
724 fprintf(stderr, "Incomplete magic for YUV4MPEG file.\n");
727 if (buffer[8] != '2') {
728 fprintf(stderr, "Incorrect YUV input file version; YUV4MPEG2 required.\n");
730 ret = y4m_parse_tags(_y4m, buffer + 5);
732 fprintf(stderr, "Error parsing YUV4MPEG2 header.\n");
735 if (_y4m->interlace == '?') {
736 fprintf(stderr, "Warning: Input video interlacing format unknown; "
737 "assuming progressive scan.\n");
738 } else if (_y4m->interlace != 'p') {
739 fprintf(stderr, "Input video is interlaced; "
740 "Only progressive scan handled.\n");
743 _y4m->vpx_fmt = VPX_IMG_FMT_I420;
746 if (strcmp(_y4m->chroma_type, "420") == 0 ||
747 strcmp(_y4m->chroma_type, "420jpeg") == 0) {
748 _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2;
749 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h
750 + 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
751 /* Natively supported: no conversion required. */
752 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
753 _y4m->convert = y4m_convert_null;
754 } else if (strcmp(_y4m->chroma_type, "420p10") == 0) {
755 _y4m->src_c_dec_h = 2;
756 _y4m->dst_c_dec_h = 2;
757 _y4m->src_c_dec_v = 2;
758 _y4m->dst_c_dec_v = 2;
759 _y4m->dst_buf_read_sz = 2 * (_y4m->pic_w * _y4m->pic_h +
760 2 * ((_y4m->pic_w + 1) / 2) *
761 ((_y4m->pic_h + 1) / 2));
762 /* Natively supported: no conversion required. */
763 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
764 _y4m->convert = y4m_convert_null;
765 _y4m->bit_depth = 10;
767 _y4m->vpx_fmt = VPX_IMG_FMT_I42016;
769 fprintf(stderr, "Unsupported conversion from 420p10 to 420jpeg\n");
772 } else if (strcmp(_y4m->chroma_type, "420p12") == 0) {
773 _y4m->src_c_dec_h = 2;
774 _y4m->dst_c_dec_h = 2;
775 _y4m->src_c_dec_v = 2;
776 _y4m->dst_c_dec_v = 2;
777 _y4m->dst_buf_read_sz = 2 * (_y4m->pic_w * _y4m->pic_h +
778 2 * ((_y4m->pic_w + 1) / 2) *
779 ((_y4m->pic_h + 1) / 2));
780 /* Natively supported: no conversion required. */
781 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
782 _y4m->convert = y4m_convert_null;
783 _y4m->bit_depth = 12;
785 _y4m->vpx_fmt = VPX_IMG_FMT_I42016;
787 fprintf(stderr, "Unsupported conversion from 420p12 to 420jpeg\n");
790 } else if (strcmp(_y4m->chroma_type, "420mpeg2") == 0) {
791 _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2;
792 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
793 /*Chroma filter required: read into the aux buf first.*/
794 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz =
795 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
796 _y4m->convert = y4m_convert_42xmpeg2_42xjpeg;
797 } else if (strcmp(_y4m->chroma_type, "420paldv") == 0) {
798 _y4m->src_c_dec_h = _y4m->dst_c_dec_h = _y4m->src_c_dec_v = _y4m->dst_c_dec_v = 2;
799 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
800 /*Chroma filter required: read into the aux buf first.
801 We need to make two filter passes, so we need some extra space in the
803 _y4m->aux_buf_sz = 3 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
804 _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * ((_y4m->pic_h + 1) / 2);
805 _y4m->convert = y4m_convert_42xpaldv_42xjpeg;
806 } else if (strcmp(_y4m->chroma_type, "422jpeg") == 0) {
807 _y4m->src_c_dec_h = _y4m->dst_c_dec_h = 2;
808 _y4m->src_c_dec_v = 1;
809 _y4m->dst_c_dec_v = 2;
810 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
811 /*Chroma filter required: read into the aux buf first.*/
812 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
813 _y4m->convert = y4m_convert_422jpeg_420jpeg;
814 } else if (strcmp(_y4m->chroma_type, "422") == 0) {
815 _y4m->src_c_dec_h = 2;
816 _y4m->src_c_dec_v = 1;
818 _y4m->dst_c_dec_h = 2;
819 _y4m->dst_c_dec_v = 2;
820 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
821 /*Chroma filter required: read into the aux buf first.
822 We need to make two filter passes, so we need some extra space in the
824 _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
825 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz +
826 ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
827 _y4m->convert = y4m_convert_422_420jpeg;
829 _y4m->vpx_fmt = VPX_IMG_FMT_I422;
831 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
832 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
833 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h
834 + 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
835 /*Natively supported: no conversion required.*/
836 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
837 _y4m->convert = y4m_convert_null;
839 } else if (strcmp(_y4m->chroma_type, "422p10") == 0) {
840 _y4m->src_c_dec_h = 2;
841 _y4m->src_c_dec_v = 1;
842 _y4m->vpx_fmt = VPX_IMG_FMT_I42216;
844 _y4m->bit_depth = 10;
845 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
846 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
847 _y4m->dst_buf_read_sz = 2 * (_y4m->pic_w * _y4m->pic_h +
848 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h);
849 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
850 _y4m->convert = y4m_convert_null;
852 fprintf(stderr, "Unsupported conversion from 422p10 to 420jpeg\n");
855 } else if (strcmp(_y4m->chroma_type, "422p12") == 0) {
856 _y4m->src_c_dec_h = 2;
857 _y4m->src_c_dec_v = 1;
858 _y4m->vpx_fmt = VPX_IMG_FMT_I42216;
860 _y4m->bit_depth = 12;
861 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
862 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
863 _y4m->dst_buf_read_sz = 2 * (_y4m->pic_w * _y4m->pic_h +
864 2 * ((_y4m->pic_w + 1) / 2) * _y4m->pic_h);
865 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
866 _y4m->convert = y4m_convert_null;
868 fprintf(stderr, "Unsupported conversion from 422p12 to 420jpeg\n");
871 } else if (strcmp(_y4m->chroma_type, "411") == 0) {
872 _y4m->src_c_dec_h = 4;
873 _y4m->dst_c_dec_h = 2;
874 _y4m->src_c_dec_v = 1;
875 _y4m->dst_c_dec_v = 2;
876 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
877 /*Chroma filter required: read into the aux buf first.
878 We need to make two filter passes, so we need some extra space in the
880 _y4m->aux_buf_read_sz = 2 * ((_y4m->pic_w + 3) / 4) * _y4m->pic_h;
881 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz + ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
882 _y4m->convert = y4m_convert_411_420jpeg;
883 } else if (strcmp(_y4m->chroma_type, "444") == 0) {
884 _y4m->src_c_dec_h = 1;
885 _y4m->src_c_dec_v = 1;
887 _y4m->dst_c_dec_h = 2;
888 _y4m->dst_c_dec_v = 2;
889 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
890 /*Chroma filter required: read into the aux buf first.
891 We need to make two filter passes, so we need some extra space in the
893 _y4m->aux_buf_read_sz = 2 * _y4m->pic_w * _y4m->pic_h;
894 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz +
895 ((_y4m->pic_w + 1) / 2) * _y4m->pic_h;
896 _y4m->convert = y4m_convert_444_420jpeg;
898 _y4m->vpx_fmt = VPX_IMG_FMT_I444;
900 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
901 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
902 _y4m->dst_buf_read_sz = 3 * _y4m->pic_w * _y4m->pic_h;
903 /*Natively supported: no conversion required.*/
904 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
905 _y4m->convert = y4m_convert_null;
907 } else if (strcmp(_y4m->chroma_type, "444p10") == 0) {
908 _y4m->src_c_dec_h = 1;
909 _y4m->src_c_dec_v = 1;
910 _y4m->vpx_fmt = VPX_IMG_FMT_I44416;
912 _y4m->bit_depth = 10;
913 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
914 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
915 _y4m->dst_buf_read_sz = 2 * 3 * _y4m->pic_w * _y4m->pic_h;
916 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
917 _y4m->convert = y4m_convert_null;
919 fprintf(stderr, "Unsupported conversion from 444p10 to 420jpeg\n");
922 } else if (strcmp(_y4m->chroma_type, "444p12") == 0) {
923 _y4m->src_c_dec_h = 1;
924 _y4m->src_c_dec_v = 1;
925 _y4m->vpx_fmt = VPX_IMG_FMT_I44416;
927 _y4m->bit_depth = 12;
928 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
929 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
930 _y4m->dst_buf_read_sz = 2 * 3 * _y4m->pic_w * _y4m->pic_h;
931 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
932 _y4m->convert = y4m_convert_null;
934 fprintf(stderr, "Unsupported conversion from 444p12 to 420jpeg\n");
937 } else if (strcmp(_y4m->chroma_type, "444alpha") == 0) {
938 _y4m->src_c_dec_h = 1;
939 _y4m->src_c_dec_v = 1;
941 _y4m->dst_c_dec_h = 2;
942 _y4m->dst_c_dec_v = 2;
943 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
944 /*Chroma filter required: read into the aux buf first.
945 We need to make two filter passes, so we need some extra space in the
947 The extra plane also gets read into the aux buf.
948 It will be discarded.*/
949 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 3 * _y4m->pic_w * _y4m->pic_h;
950 _y4m->convert = y4m_convert_444_420jpeg;
952 _y4m->vpx_fmt = VPX_IMG_FMT_444A;
954 _y4m->dst_c_dec_h = _y4m->src_c_dec_h;
955 _y4m->dst_c_dec_v = _y4m->src_c_dec_v;
956 _y4m->dst_buf_read_sz = 4 * _y4m->pic_w * _y4m->pic_h;
957 /*Natively supported: no conversion required.*/
958 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
959 _y4m->convert = y4m_convert_null;
961 } else if (strcmp(_y4m->chroma_type, "mono") == 0) {
962 _y4m->src_c_dec_h = _y4m->src_c_dec_v = 0;
963 _y4m->dst_c_dec_h = _y4m->dst_c_dec_v = 2;
964 _y4m->dst_buf_read_sz = _y4m->pic_w * _y4m->pic_h;
965 /*No extra space required, but we need to clear the chroma planes.*/
966 _y4m->aux_buf_sz = _y4m->aux_buf_read_sz = 0;
967 _y4m->convert = y4m_convert_mono_420jpeg;
969 fprintf(stderr, "Unknown chroma sampling type: %s\n", _y4m->chroma_type);
972 /*The size of the final frame buffers is always computed from the
973 destination chroma decimation type.*/
974 _y4m->dst_buf_sz = _y4m->pic_w * _y4m->pic_h
975 + 2 * ((_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h) *
976 ((_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v);
977 if (_y4m->bit_depth == 8)
978 _y4m->dst_buf = (unsigned char *)malloc(_y4m->dst_buf_sz);
980 _y4m->dst_buf = (unsigned char *)malloc(2 * _y4m->dst_buf_sz);
982 if (_y4m->aux_buf_sz > 0)
983 _y4m->aux_buf = (unsigned char *)malloc(_y4m->aux_buf_sz);
987 void y4m_input_close(y4m_input *_y4m) {
992 int y4m_input_fetch_frame(y4m_input *_y4m, FILE *_fin, vpx_image_t *_img) {
998 int bytes_per_sample = _y4m->bit_depth > 8 ? 2 : 1;
999 /*Read and skip the frame header.*/
1000 if (!file_read(frame, 6, _fin)) return 0;
1001 if (memcmp(frame, "FRAME", 5)) {
1002 fprintf(stderr, "Loss of framing in Y4M input data\n");
1005 if (frame[5] != '\n') {
1008 for (j = 0; j < 79 && file_read(&c, 1, _fin) && c != '\n'; j++) {}
1010 fprintf(stderr, "Error parsing Y4M frame header\n");
1014 /*Read the frame data that needs no conversion.*/
1015 if (!file_read(_y4m->dst_buf, _y4m->dst_buf_read_sz, _fin)) {
1016 fprintf(stderr, "Error reading Y4M frame data.\n");
1019 /*Read the frame data that does need conversion.*/
1020 if (!file_read(_y4m->aux_buf, _y4m->aux_buf_read_sz, _fin)) {
1021 fprintf(stderr, "Error reading Y4M frame data.\n");
1024 /*Now convert the just read frame.*/
1025 (*_y4m->convert)(_y4m, _y4m->dst_buf, _y4m->aux_buf);
1026 /*Fill in the frame buffer pointers.
1027 We don't use vpx_img_wrap() because it forces padding for odd picture
1028 sizes, which would require a separate fread call for every row.*/
1029 memset(_img, 0, sizeof(*_img));
1030 /*Y4M has the planes in Y'CbCr order, which libvpx calls Y, U, and V.*/
1031 _img->fmt = _y4m->vpx_fmt;
1032 _img->w = _img->d_w = _y4m->pic_w;
1033 _img->h = _img->d_h = _y4m->pic_h;
1034 _img->x_chroma_shift = _y4m->dst_c_dec_h >> 1;
1035 _img->y_chroma_shift = _y4m->dst_c_dec_v >> 1;
1036 _img->bps = _y4m->bps;
1038 /*Set up the buffer pointers.*/
1039 pic_sz = _y4m->pic_w * _y4m->pic_h * bytes_per_sample;
1040 c_w = (_y4m->pic_w + _y4m->dst_c_dec_h - 1) / _y4m->dst_c_dec_h;
1041 c_w *= bytes_per_sample;
1042 c_h = (_y4m->pic_h + _y4m->dst_c_dec_v - 1) / _y4m->dst_c_dec_v;
1044 _img->stride[PLANE_Y] = _img->stride[PLANE_ALPHA] =
1045 _y4m->pic_w * bytes_per_sample;
1046 _img->stride[PLANE_U] = _img->stride[PLANE_V] = c_w;
1047 _img->planes[PLANE_Y] = _y4m->dst_buf;
1048 _img->planes[PLANE_U] = _y4m->dst_buf + pic_sz;
1049 _img->planes[PLANE_V] = _y4m->dst_buf + pic_sz + c_sz;
1050 _img->planes[PLANE_ALPHA] = _y4m->dst_buf + pic_sz + 2 * c_sz;