2 * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license and patent
5 * grant that can be found in the LICENSE file in the root of the source
6 * tree. All contributing project authors may be found in the AUTHORS
7 * file in the root of the source tree.
9 * Based on code from the OggTheora software codec source code,
10 * Copyright (C) 2002-2010 The Xiph.Org Foundation and contributors.
16 static int y4m_parse_tags(y4m_input *_y4m,char *_tags){
25 got_w=got_h=got_fps=got_interlace=got_par=got_chroma=0;
27 /*Skip any leading spaces.*/
29 /*If that's all we have, stop.*/
31 /*Find the end of this tag.*/
32 for(q=p+1;*q!='\0'&&*q!=' ';q++);
36 if(sscanf(p+1,"%d",&_y4m->pic_w)!=1)return -1;
40 if(sscanf(p+1,"%d",&_y4m->pic_h)!=1)return -1;
44 if(sscanf(p+1,"%d:%d",&_y4m->fps_n,&_y4m->fps_d)!=2){
54 if(sscanf(p+1,"%d:%d",&_y4m->par_n,&_y4m->par_d)!=2){
61 memcpy(_y4m->chroma_type,p+1,q-p-1);
62 _y4m->chroma_type[q-p-1]='\0';
65 /*Ignore unknown tags.*/
68 if(!got_w||!got_h||!got_fps)return -1;
69 if(!got_interlace)_y4m->interlace='?';
70 if(!got_par)_y4m->par_n=_y4m->par_d=0;
71 /*Chroma-type is not specified in older files, e.g., those generated by
73 if(!got_chroma)strcpy(_y4m->chroma_type,"420");
79 /*All anti-aliasing filters in the following conversion functions are based on
80 one of two window functions:
81 The 6-tap Lanczos window (for down-sampling and shifts):
82 sinc(\pi*t)*sinc(\pi*t/3), |t|<3 (sinc(t)==sin(t)/t)
84 The 4-tap Mitchell window (for up-sampling):
85 7|t|^3-12|t|^2+16/3, |t|<1
86 -(7/3)|x|^3+12|x|^2-20|x|+32/3, |t|<2
88 The number of taps is intentionally kept small to reduce computational
89 overhead and limit ringing.
91 The taps from these filters are scaled so that their sum is 1, and the result
92 is scaled by 128 and rounded to integers to create a filter whose
93 intermediate values fit inside 16 bits.
94 Coefficients are rounded in such a way as to ensure their sum is still 128,
95 which is usually equivalent to normal rounding.
97 Conversions which require both horizontal and vertical filtering could
98 have these steps pipelined, for less memory consumption and better cache
99 performance, but we do them separately for simplicity.*/
101 #define OC_MINI(_a,_b) ((_a)>(_b)?(_b):(_a))
102 #define OC_MAXI(_a,_b) ((_a)<(_b)?(_b):(_a))
103 #define OC_CLAMPI(_a,_b,_c) (OC_MAXI(_a,OC_MINI(_b,_c)))
105 /*420jpeg chroma samples are sited like:
106 Y-------Y-------Y-------Y-------
110 Y-------Y-------Y-------Y-------
114 Y-------Y-------Y-------Y-------
118 Y-------Y-------Y-------Y-------
123 420mpeg2 chroma samples are sited like:
124 Y-------Y-------Y-------Y-------
128 Y-------Y-------Y-------Y-------
132 Y-------Y-------Y-------Y-------
136 Y-------Y-------Y-------Y-------
141 We use a resampling filter to shift the site locations one quarter pixel (at
142 the chroma plane's resolution) to the right.
143 The 4:2:2 modes look exactly the same, except there are twice as many chroma
144 lines, and they are vertically co-sited with the luma samples in both the
145 mpeg2 and jpeg cases (thus requiring no vertical resampling).*/
146 static void y4m_42xmpeg2_42xjpeg_helper(unsigned char *_dst,
147 const unsigned char *_src,int _c_w,int _c_h){
152 /*Filter: [4 -17 114 35 -9 1]/128, derived from a 6-tap Lanczos
154 for(x=0;x<OC_MINI(_c_w,2);x++){
155 _dst[x]=(unsigned char)OC_CLAMPI(0,(4*_src[0]-17*_src[OC_MAXI(x-1,0)]+
156 114*_src[x]+35*_src[OC_MINI(x+1,_c_w-1)]-9*_src[OC_MINI(x+2,_c_w-1)]+
157 _src[OC_MINI(x+3,_c_w-1)]+64)>>7,255);
160 _dst[x]=(unsigned char)OC_CLAMPI(0,(4*_src[x-2]-17*_src[x-1]+
161 114*_src[x]+35*_src[x+1]-9*_src[x+2]+_src[x+3]+64)>>7,255);
164 _dst[x]=(unsigned char)OC_CLAMPI(0,(4*_src[x-2]-17*_src[x-1]+
165 114*_src[x]+35*_src[OC_MINI(x+1,_c_w-1)]-9*_src[OC_MINI(x+2,_c_w-1)]+
166 _src[_c_w-1]+64)>>7,255);
173 /*Handles both 422 and 420mpeg2 to 422jpeg and 420jpeg, respectively.*/
174 static void y4m_convert_42xmpeg2_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
175 unsigned char *_aux){
182 /*Skip past the luma data.*/
183 _dst+=_y4m->pic_w*_y4m->pic_h;
184 /*Compute the size of each chroma plane.*/
185 c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
186 c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
188 for(pli=1;pli<3;pli++){
189 y4m_42xmpeg2_42xjpeg_helper(_dst,_aux,c_w,c_h);
195 /*This format is only used for interlaced content, but is included for
198 420jpeg chroma samples are sited like:
199 Y-------Y-------Y-------Y-------
203 Y-------Y-------Y-------Y-------
207 Y-------Y-------Y-------Y-------
211 Y-------Y-------Y-------Y-------
216 420paldv chroma samples are sited like:
217 YR------Y-------YR------Y-------
221 YB------Y-------YB------Y-------
225 YR------Y-------YR------Y-------
229 YB------Y-------YB------Y-------
234 We use a resampling filter to shift the site locations one quarter pixel (at
235 the chroma plane's resolution) to the right.
236 Then we use another filter to move the C_r location down one quarter pixel,
237 and the C_b location up one quarter pixel.*/
238 static void y4m_convert_42xpaldv_42xjpeg(y4m_input *_y4m,unsigned char *_dst,
239 unsigned char *_aux){
247 /*Skip past the luma data.*/
248 _dst+=_y4m->pic_w*_y4m->pic_h;
249 /*Compute the size of each chroma plane.*/
250 c_w=(_y4m->pic_w+1)/2;
251 c_h=(_y4m->pic_h+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
254 for(pli=1;pli<3;pli++){
255 /*First do the horizontal re-sampling.
256 This is the same as the mpeg2 case, except that after the horizontal
257 case, we need to apply a second vertical filter.*/
258 y4m_42xmpeg2_42xjpeg_helper(tmp,_aux,c_w,c_h);
262 /*Slide C_b up a quarter-pel.
263 This is the same filter used above, but in the other order.*/
265 for(y=0;y<OC_MINI(c_h,3);y++){
266 _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(tmp[0]
267 -9*tmp[OC_MAXI(y-2,0)*c_w]+35*tmp[OC_MAXI(y-1,0)*c_w]
268 +114*tmp[y*c_w]-17*tmp[OC_MINI(y+1,c_h-1)*c_w]
269 +4*tmp[OC_MINI(y+2,c_h-1)*c_w]+64)>>7,255);
272 _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(tmp[(y-3)*c_w]
273 -9*tmp[(y-2)*c_w]+35*tmp[(y-1)*c_w]+114*tmp[y*c_w]
274 -17*tmp[(y+1)*c_w]+4*tmp[(y+2)*c_w]+64)>>7,255);
277 _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(tmp[(y-3)*c_w]
278 -9*tmp[(y-2)*c_w]+35*tmp[(y-1)*c_w]+114*tmp[y*c_w]
279 -17*tmp[OC_MINI(y+1,c_h-1)*c_w]+4*tmp[(c_h-1)*c_w]+64)>>7,255);
288 /*Slide C_r down a quarter-pel.
289 This is the same as the horizontal filter.*/
291 for(y=0;y<OC_MINI(c_h,2);y++){
292 _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(4*tmp[0]
293 -17*tmp[OC_MAXI(y-1,0)*c_w]+114*tmp[y*c_w]
294 +35*tmp[OC_MINI(y+1,c_h-1)*c_w]-9*tmp[OC_MINI(y+2,c_h-1)*c_w]
295 +tmp[OC_MINI(y+3,c_h-1)*c_w]+64)>>7,255);
298 _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(4*tmp[(y-2)*c_w]
299 -17*tmp[(y-1)*c_w]+114*tmp[y*c_w]+35*tmp[(y+1)*c_w]
300 -9*tmp[(y+2)*c_w]+tmp[(y+3)*c_w]+64)>>7,255);
303 _dst[y*c_w]=(unsigned char)OC_CLAMPI(0,(4*tmp[(y-2)*c_w]
304 -17*tmp[(y-1)*c_w]+114*tmp[y*c_w]+35*tmp[OC_MINI(y+1,c_h-1)*c_w]
305 -9*tmp[OC_MINI(y+2,c_h-1)*c_w]+tmp[(c_h-1)*c_w]+64)>>7,255);
312 /*For actual interlaced material, this would have to be done separately on
313 each field, and the shift amounts would be different.
314 C_r moves down 1/8, C_b up 3/8 in the top field, and C_r moves down 3/8,
315 C_b up 1/8 in the bottom field.
316 The corresponding filters would be:
317 Down 1/8 (reverse order for up): [3 -11 125 15 -4 0]/128
318 Down 3/8 (reverse order for up): [4 -19 98 56 -13 2]/128*/
322 /*Perform vertical filtering to reduce a single plane from 4:2:2 to 4:2:0.
323 This is used as a helper by several converation routines.*/
324 static void y4m_422jpeg_420jpeg_helper(unsigned char *_dst,
325 const unsigned char *_src,int _c_w,int _c_h){
328 /*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
330 for(y=0;y<OC_MINI(_c_h,2);y+=2){
331 _dst[(y>>1)*_c_w]=OC_CLAMPI(0,(64*_src[0]
332 +78*_src[OC_MINI(1,_c_h-1)*_c_w]
333 -17*_src[OC_MINI(2,_c_h-1)*_c_w]
334 +3*_src[OC_MINI(3,_c_h-1)*_c_w]+64)>>7,255);
337 _dst[(y>>1)*_c_w]=OC_CLAMPI(0,(3*(_src[(y-2)*_c_w]+_src[(y+3)*_c_w])
338 -17*(_src[(y-1)*_c_w]+_src[(y+2)*_c_w])
339 +78*(_src[y*_c_w]+_src[(y+1)*_c_w])+64)>>7,255);
342 _dst[(y>>1)*_c_w]=OC_CLAMPI(0,(3*(_src[(y-2)*_c_w]
343 +_src[(_c_h-1)*_c_w])-17*(_src[(y-1)*_c_w]
344 +_src[OC_MINI(y+2,_c_h-1)*_c_w])
345 +78*(_src[y*_c_w]+_src[OC_MINI(y+1,_c_h-1)*_c_w])+64)>>7,255);
352 /*420jpeg chroma samples are sited like:
353 Y-------Y-------Y-------Y-------
357 Y-------Y-------Y-------Y-------
361 Y-------Y-------Y-------Y-------
365 Y-------Y-------Y-------Y-------
370 422jpeg chroma samples are sited like:
371 Y---BR--Y-------Y---BR--Y-------
375 Y---BR--Y-------Y---BR--Y-------
379 Y---BR--Y-------Y---BR--Y-------
383 Y---BR--Y-------Y---BR--Y-------
388 We use a resampling filter to decimate the chroma planes by two in the
389 vertical direction.*/
390 static void y4m_convert_422jpeg_420jpeg(y4m_input *_y4m,unsigned char *_dst,
391 unsigned char *_aux){
401 /*Skip past the luma data.*/
402 _dst+=_y4m->pic_w*_y4m->pic_h;
403 /*Compute the size of each chroma plane.*/
404 c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
406 dst_c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
407 dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
409 dst_c_sz=dst_c_w*dst_c_h;
410 for(pli=1;pli<3;pli++){
411 y4m_422jpeg_420jpeg_helper(_dst,_aux,c_w,c_h);
417 /*420jpeg chroma samples are sited like:
418 Y-------Y-------Y-------Y-------
422 Y-------Y-------Y-------Y-------
426 Y-------Y-------Y-------Y-------
430 Y-------Y-------Y-------Y-------
435 422 chroma samples are sited like:
436 YBR-----Y-------YBR-----Y-------
440 YBR-----Y-------YBR-----Y-------
444 YBR-----Y-------YBR-----Y-------
448 YBR-----Y-------YBR-----Y-------
453 We use a resampling filter to shift the original site locations one quarter
454 pixel (at the original chroma resolution) to the right.
455 Then we use a second resampling filter to decimate the chroma planes by two
456 in the vertical direction.*/
457 static void y4m_convert_422_420jpeg(y4m_input *_y4m,unsigned char *_dst,
458 unsigned char *_aux){
469 /*Skip past the luma data.*/
470 _dst+=_y4m->pic_w*_y4m->pic_h;
471 /*Compute the size of each chroma plane.*/
472 c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
474 dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
476 dst_c_sz=c_w*dst_c_h;
478 for(pli=1;pli<3;pli++){
479 /*In reality, the horizontal and vertical steps could be pipelined, for
480 less memory consumption and better cache performance, but we do them
481 separately for simplicity.*/
482 /*First do horizontal filtering (convert to 422jpeg)*/
483 y4m_42xmpeg2_42xjpeg_helper(tmp,_aux,c_w,c_h);
484 /*Now do the vertical filtering.*/
485 y4m_422jpeg_420jpeg_helper(_dst,tmp,c_w,c_h);
491 /*420jpeg chroma samples are sited like:
492 Y-------Y-------Y-------Y-------
496 Y-------Y-------Y-------Y-------
500 Y-------Y-------Y-------Y-------
504 Y-------Y-------Y-------Y-------
509 411 chroma samples are sited like:
510 YBR-----Y-------Y-------Y-------
514 YBR-----Y-------Y-------Y-------
518 YBR-----Y-------Y-------Y-------
522 YBR-----Y-------Y-------Y-------
527 We use a filter to resample at site locations one eighth pixel (at the source
528 chroma plane's horizontal resolution) and five eighths of a pixel to the
530 Then we use another filter to decimate the planes by 2 in the vertical
532 static void y4m_convert_411_420jpeg(y4m_input *_y4m,unsigned char *_dst,
533 unsigned char *_aux){
545 /*Skip past the luma data.*/
546 _dst+=_y4m->pic_w*_y4m->pic_h;
547 /*Compute the size of each chroma plane.*/
548 c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
550 dst_c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
551 dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
553 dst_c_sz=dst_c_w*dst_c_h;
556 for(pli=1;pli<3;pli++){
557 /*In reality, the horizontal and vertical steps could be pipelined, for
558 less memory consumption and better cache performance, but we do them
559 separately for simplicity.*/
560 /*First do horizontal filtering (convert to 422jpeg)*/
562 /*Filters: [1 110 18 -1]/128 and [-3 50 86 -5]/128, both derived from a
563 4-tap Mitchell window.*/
564 for(x=0;x<OC_MINI(c_w,1);x++){
565 tmp[x<<1]=(unsigned char)OC_CLAMPI(0,(111*_aux[0]
566 +18*_aux[OC_MINI(1,c_w-1)]-_aux[OC_MINI(2,c_w-1)]+64)>>7,255);
567 tmp[x<<1|1]=(unsigned char)OC_CLAMPI(0,(47*_aux[0]
568 +86*_aux[OC_MINI(1,c_w-1)]-5*_aux[OC_MINI(2,c_w-1)]+64)>>7,255);
571 tmp[x<<1]=(unsigned char)OC_CLAMPI(0,(_aux[x-1]+110*_aux[x]
572 +18*_aux[x+1]-_aux[x+2]+64)>>7,255);
573 tmp[x<<1|1]=(unsigned char)OC_CLAMPI(0,(-3*_aux[x-1]+50*_aux[x]
574 +86*_aux[x+1]-5*_aux[x+2]+64)>>7,255);
577 tmp[x<<1]=(unsigned char)OC_CLAMPI(0,(_aux[x-1]+110*_aux[x]
578 +18*_aux[OC_MINI(x+1,c_w-1)]-_aux[c_w-1]+64)>>7,255);
579 if((x<<1|1)<dst_c_w){
580 tmp[x<<1|1]=(unsigned char)OC_CLAMPI(0,(-3*_aux[x-1]+50*_aux[x]
581 +86*_aux[OC_MINI(x+1,c_w-1)]-5*_aux[c_w-1]+64)>>7,255);
588 /*Now do the vertical filtering.*/
589 y4m_422jpeg_420jpeg_helper(_dst,tmp,dst_c_w,c_h);
594 /*Convert 444 to 420jpeg.*/
595 static void y4m_convert_444_420jpeg(y4m_input *_y4m,unsigned char *_dst,
596 unsigned char *_aux){
608 /*Skip past the luma data.*/
609 _dst+=_y4m->pic_w*_y4m->pic_h;
610 /*Compute the size of each chroma plane.*/
611 c_w=(_y4m->pic_w+_y4m->src_c_dec_h-1)/_y4m->src_c_dec_h;
613 dst_c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
614 dst_c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
616 dst_c_sz=dst_c_w*dst_c_h;
619 for(pli=1;pli<3;pli++){
620 /*Filter: [3 -17 78 78 -17 3]/128, derived from a 6-tap Lanczos window.*/
622 for(x=0;x<OC_MINI(c_w,2);x+=2){
623 tmp[x>>1]=OC_CLAMPI(0,(64*_aux[0]+78*_aux[OC_MINI(1,c_w-1)]
624 -17*_aux[OC_MINI(2,c_w-1)]
625 +3*_aux[OC_MINI(3,c_w-1)]+64)>>7,255);
628 tmp[x>>1]=OC_CLAMPI(0,(3*(_aux[x-2]+_aux[x+3])
629 -17*(_aux[x-1]+_aux[x+2])+78*(_aux[x]+_aux[x+1])+64)>>7,255);
632 tmp[x>>1]=OC_CLAMPI(0,(3*(_aux[x-2]+_aux[c_w-1])-
633 17*(_aux[x-1]+_aux[OC_MINI(x+2,c_w-1)])+
634 78*(_aux[x]+_aux[OC_MINI(x+1,c_w-1)])+64)>>7,255);
640 /*Now do the vertical filtering.*/
641 y4m_422jpeg_420jpeg_helper(_dst,tmp,dst_c_w,c_h);
646 /*The image is padded with empty chroma components at 4:2:0.*/
647 static void y4m_convert_mono_420jpeg(y4m_input *_y4m,unsigned char *_dst,
648 unsigned char *_aux){
650 _dst+=_y4m->pic_w*_y4m->pic_h;
651 c_sz=((_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h)*
652 ((_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v);
653 memset(_dst,128,c_sz*2);
656 /*No conversion function needed.*/
657 static void y4m_convert_null(y4m_input *_y4m,unsigned char *_dst,
658 unsigned char *_aux){
661 int y4m_input_open(y4m_input *_y4m,FILE *_fin,char *_skip,int _nskip){
665 /*Read until newline, or 80 cols, whichever happens first.*/
672 ret=fread(buffer+i,1,1,_fin);
675 if(buffer[i]=='\n')break;
677 /*We skipped too much header data.*/
678 if(_nskip>0)return -1;
680 fprintf(stderr,"Error parsing header; not a YUV2MPEG2 file?\n");
684 if(memcmp(buffer,"YUV4MPEG",8)){
685 fprintf(stderr,"Incomplete magic for YUV4MPEG file.\n");
689 fprintf(stderr,"Incorrect YUV input file version; YUV4MPEG2 required.\n");
691 ret=y4m_parse_tags(_y4m,buffer+5);
693 fprintf(stderr,"Error parsing YUV4MPEG2 header.\n");
696 if(_y4m->interlace=='?'){
697 fprintf(stderr,"Warning: Input video interlacing format unknown; "
698 "assuming progressive scan.\n");
700 else if(_y4m->interlace!='p'){
701 fprintf(stderr,"Input video is interlaced; "
702 "Only progressive scan handled.\n");
705 if(strcmp(_y4m->chroma_type,"420")==0||
706 strcmp(_y4m->chroma_type,"420jpeg")==0){
707 _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
708 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h
709 +2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
710 /*Natively supported: no conversion required.*/
711 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
712 _y4m->convert=y4m_convert_null;
714 else if(strcmp(_y4m->chroma_type,"420mpeg2")==0){
715 _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
716 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
717 /*Chroma filter required: read into the aux buf first.*/
718 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=
719 2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
720 _y4m->convert=y4m_convert_42xmpeg2_42xjpeg;
722 else if(strcmp(_y4m->chroma_type,"420paldv")==0){
723 _y4m->src_c_dec_h=_y4m->dst_c_dec_h=_y4m->src_c_dec_v=_y4m->dst_c_dec_v=2;
724 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
725 /*Chroma filter required: read into the aux buf first.
726 We need to make two filter passes, so we need some extra space in the
728 _y4m->aux_buf_sz=3*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
729 _y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*((_y4m->pic_h+1)/2);
730 _y4m->convert=y4m_convert_42xpaldv_42xjpeg;
732 else if(strcmp(_y4m->chroma_type,"422jpeg")==0){
733 _y4m->src_c_dec_h=_y4m->dst_c_dec_h=2;
736 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
737 /*Chroma filter required: read into the aux buf first.*/
738 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*_y4m->pic_h;
739 _y4m->convert=y4m_convert_422jpeg_420jpeg;
741 else if(strcmp(_y4m->chroma_type,"422")==0){
742 _y4m->src_c_dec_h=_y4m->dst_c_dec_h=2;
745 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
746 /*Chroma filter required: read into the aux buf first.
747 We need to make two filter passes, so we need some extra space in the
749 _y4m->aux_buf_read_sz=2*((_y4m->pic_w+1)/2)*_y4m->pic_h;
750 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz+((_y4m->pic_w+1)/2)*_y4m->pic_h;
751 _y4m->convert=y4m_convert_422_420jpeg;
753 else if(strcmp(_y4m->chroma_type,"411")==0){
758 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
759 /*Chroma filter required: read into the aux buf first.
760 We need to make two filter passes, so we need some extra space in the
762 _y4m->aux_buf_read_sz=2*((_y4m->pic_w+3)/4)*_y4m->pic_h;
763 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz+((_y4m->pic_w+1)/2)*_y4m->pic_h;
764 _y4m->convert=y4m_convert_411_420jpeg;
766 else if(strcmp(_y4m->chroma_type,"444")==0){
771 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
772 /*Chroma filter required: read into the aux buf first.
773 We need to make two filter passes, so we need some extra space in the
775 _y4m->aux_buf_read_sz=2*_y4m->pic_w*_y4m->pic_h;
776 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz+((_y4m->pic_w+1)/2)*_y4m->pic_h;
777 _y4m->convert=y4m_convert_444_420jpeg;
779 else if(strcmp(_y4m->chroma_type,"444alpha")==0){
784 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
785 /*Chroma filter required: read into the aux buf first.
786 We need to make two filter passes, so we need some extra space in the
788 The extra plane also gets read into the aux buf.
789 It will be discarded.*/
790 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=3*_y4m->pic_w*_y4m->pic_h;
791 _y4m->convert=y4m_convert_444_420jpeg;
793 else if(strcmp(_y4m->chroma_type,"mono")==0){
794 _y4m->src_c_dec_h=_y4m->src_c_dec_v=0;
795 _y4m->dst_c_dec_h=_y4m->dst_c_dec_v=2;
796 _y4m->dst_buf_read_sz=_y4m->pic_w*_y4m->pic_h;
797 /*No extra space required, but we need to clear the chroma planes.*/
798 _y4m->aux_buf_sz=_y4m->aux_buf_read_sz=0;
799 _y4m->convert=y4m_convert_mono_420jpeg;
802 fprintf(stderr,"Unknown chroma sampling type: %s\n",_y4m->chroma_type);
805 /*The size of the final frame buffers is always computed from the
806 destination chroma decimation type.*/
807 _y4m->dst_buf_sz=_y4m->pic_w*_y4m->pic_h
808 +2*((_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h)*
809 ((_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v);
810 _y4m->dst_buf=(unsigned char *)malloc(_y4m->dst_buf_sz);
811 _y4m->aux_buf=(unsigned char *)malloc(_y4m->aux_buf_sz);
815 void y4m_input_close(y4m_input *_y4m){
820 int y4m_input_fetch_frame(y4m_input *_y4m,FILE *_fin,vpx_image_t *_img){
829 /*Read and skip the frame header.*/
830 ret=fread(frame,1,6,_fin);
832 if(memcmp(frame,"FRAME",5)){
833 fprintf(stderr,"Loss of framing in Y4M input data\n");
839 for(j=0;j<79&&fread(&c,1,1,_fin)&&c!='\n';j++);
841 fprintf(stderr,"Error parsing Y4M frame header\n");
845 /*Read the frame data that needs no conversion.*/
846 if(fread(_y4m->dst_buf,1,_y4m->dst_buf_read_sz,_fin)!=_y4m->dst_buf_read_sz){
847 fprintf(stderr,"Error reading Y4M frame data.\n");
850 /*Read the frame data that does need conversion.*/
851 if(fread(_y4m->aux_buf,1,_y4m->aux_buf_read_sz,_fin)!=_y4m->aux_buf_read_sz){
852 fprintf(stderr,"Error reading Y4M frame data.\n");
855 /*Now convert the just read frame.*/
856 (*_y4m->convert)(_y4m,_y4m->dst_buf,_y4m->aux_buf);
857 /*Fill in the frame buffer pointers.
858 We don't use vpx_img_wrap() because it forces padding for odd picture
859 sizes, which would require a separate fread call for every row.*/
860 memset(_img,0,sizeof(*_img));
861 /*Y4M has the planes in Y'CbCr order, which libvpx calls Y, U, and V.*/
862 _img->fmt=IMG_FMT_I420;
863 _img->w=_img->d_w=_y4m->pic_w;
864 _img->h=_img->d_h=_y4m->pic_h;
865 /*This is hard-coded to 4:2:0 for now, as that's all VP8 supports.*/
866 _img->x_chroma_shift=1;
867 _img->y_chroma_shift=1;
869 /*Set up the buffer pointers.*/
870 pic_sz=_y4m->pic_w*_y4m->pic_h;
871 c_w=(_y4m->pic_w+_y4m->dst_c_dec_h-1)/_y4m->dst_c_dec_h;
872 c_h=(_y4m->pic_h+_y4m->dst_c_dec_v-1)/_y4m->dst_c_dec_v;
874 _img->stride[PLANE_Y]=_y4m->pic_w;
875 _img->stride[PLANE_U]=_img->stride[PLANE_V]=c_w;
876 _img->planes[PLANE_Y]=_y4m->dst_buf;
877 _img->planes[PLANE_U]=_y4m->dst_buf+pic_sz;
878 _img->planes[PLANE_V]=_y4m->dst_buf+pic_sz+c_sz;